ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a572473fbb97262b2e1cbf2c433dd2ce92bd294
|
import os
import argparse
import pandas as pd
from typing import Dict
from utils import parse_date, load_colnames
from parsers import (
confirmados_diarios_por_estado,
negativos_diarios_por_estado,
pruebas_pendientes_diarias_por_estado,
defunciones_diarias_por_estado,
hospitalizados_diarios_por_estado,
uci_diarios_por_estado
)
func_dict = dict()
func_dict['covid19_mex_confirmed.csv'] = confirmados_diarios_por_estado
func_dict['covid19_mex_negative.csv'] = negativos_diarios_por_estado
func_dict['covid19_mex_awaiting.csv'] = pruebas_pendientes_diarias_por_estado
func_dict['covid19_mex_deceased.csv'] = defunciones_diarias_por_estado
func_dict['covid19_mex_hospitalised.csv'] = hospitalizados_diarios_por_estado
func_dict['covid19_mex_icu.csv'] = uci_diarios_por_estado
def write_files(main_df: pd.DataFrame, colnames: Dict[str, str], data_dir: str):
for key, func in func_dict.items():
df = func(main_df, colnames)
filename = os.path.join(data_dir, key)
df.to_csv(filename)
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='parse main dataset (zip file)')
parser.add_argument('input_file', help='file containing dataset')
parser.add_argument('-d', '--date', type=str, default=None,
help="specify the date to use as yyyymmdd")
args = parser.parse_args()
date_filename, date_iso = parse_date(args)
data_dir = os.path.join(os.pardir, 'data')
# main dataset
input_file = args.input_file
assert input_file.endswith(f'{date_filename}.zip')
try:
main_df = pd.read_csv(input_file, compression='zip')
colnames_dict = load_colnames('catalogo_entidades.csv') # names of 32 states
write_files(main_df, colnames_dict, data_dir)
print(f'Successfully parsed datos_abiertos_{date_filename}.zip')
except FileNotFoundError:
print('ERROR: Wrong date or missing file')
|
py
|
1a5724f55a76d9e41cabef2240cc6c808d35f8b3
|
'''global configuration file for quickly accessing and tuning parameters
of the simulation'''
class Config():
def __init__(self):
self.timestep = 0.1
self.equilibration = 100 # Set equilibration time
self.observation = 500 # Set time for observation
self.fps = 60
# track properties
self.aggressives = 0.0 # % of aggressive drivers
self.passives = 1.0 # passive drivers
self.lanes = 3
self.speed_limit = 20
self.track_length = 1000
self.buckets = 20.0
self.max_num_cars = 150
self.buffer_length = 5
self.bucket_length = self.track_length / self.buckets
# car properties
self.acceleration = 4.3
self.deceleration = 5.5
self.max_deceleration = 10.0
self.aggressiveness = 1.2
self.passiveness = 0.8
self.safetymultiplier = 1.5
self.car_length = 5.0
self.min_distance = 5.0
self.nice_cars = False
# self.delay_buffer_length = 2
self.reaction_time = 1.0 # Seconds between deacceleration
|
py
|
1a572571ccc0b7928bbeef683e42c4a1d4e5b56c
|
from setuptools import setup
def read_md(filename):
return open(filename).read()
def parse_requirements(filename):
reqs = []
with open(filename, 'r') as f:
reqs = f.read().splitlines()
if not reqs:
raise RuntimeError("Unable to read requirements from '%s'" % filename)
return reqs
setup(
name='django_mock_queries',
version='2.1.7',
description='A django library for mocking queryset functions in memory for testing',
long_description=read_md('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/stphivos/django-mock-queries',
author='Phivos Stylianides',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Mocking',
'Topic :: Software Development :: Testing :: Unit',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='django orm mocking unit-testing tdd',
packages=['django_mock_queries'],
install_requires=parse_requirements('requirements/core.txt'),
)
|
py
|
1a5726cc8f9a0976db61c1f9c3ffc72165e241da
|
import numpy as np
import os
import cv2
def imread(file_path, c=None):
if c is None:
im = cv2.imread(file_path)
else:
im = cv2.imread(file_path, c)
if im is None:
raise 'Can not read image'
if im.ndim == 3 and im.shape[2] == 3:
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
return im
def imwrite(file_path, image):
if image.ndim == 3 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(file_path, image)
def fold_dir(folder):
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def get_mask_BZ(img):
if img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray_img = img
threhold = np.mean(gray_img)/3-5
#cv2.imshow('gray_img', gray_img)
#cv2.waitKey()
#print(threhold)
_, mask = cv2.threshold(gray_img, max(5,threhold), 1, cv2.THRESH_BINARY)
#cv2.imshow('bz_mask', mask*255)
#cv2.waitKey()
nn_mask = np.zeros((mask.shape[0]+2,mask.shape[1]+2),np.uint8)
new_mask = (1-mask).astype(np.uint8)
# cv::floodFill(Temp, Point(0, 0), Scalar(255));
# _,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, [(0, 0),(0,new_mask.shape[0])], (0), cv2.FLOODFILL_MASK_ONLY)
_,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, (0,0), (0), cv2.FLOODFILL_MASK_ONLY)
_,new_mask,_,_ = cv2.floodFill(new_mask, nn_mask, (new_mask.shape[1]-1,new_mask.shape[0]-1), (0), cv2.FLOODFILL_MASK_ONLY)
mask = mask + new_mask
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
mask = cv2.erode(mask, kernel)
mask = cv2.dilate(mask, kernel)
return mask
def _get_center_by_edge(mask):
center=[0,0]
x=mask.sum(axis=1)
center[0]=np.where(x>x.max()*0.95)[0].mean()
x=mask.sum(axis=0)
center[1]=np.where(x>x.max()*0.95)[0].mean()
return center
def _get_radius_by_mask_center(mask,center):
mask=mask.astype(np.uint8)
ksize=max(mask.shape[1]//400*2+1,3)
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ksize,ksize))
mask=cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernel)
# radius=
index=np.where(mask>0)
d_int=np.sqrt((index[0]-center[0])**2+(index[1]-center[1])**2)
b_count=np.bincount(np.ceil(d_int).astype(np.int))
radius=np.where(b_count>b_count.max()*0.995)[0].max()
return radius
def _get_circle_by_center_bbox(shape,center,bbox,radius):
center_mask=np.zeros(shape=shape).astype('uint8')
tmp_mask=np.zeros(shape=bbox[2:4])
center_tmp=(int(center[0]),int(center[1]))
center_mask=cv2.circle(center_mask,center_tmp[::-1],int(radius),(1),-1)
# center_mask[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3]]=tmp_mask
# center_mask[bbox[0]:min(bbox[0]+bbox[2],center_mask.shape[0]),bbox[1]:min(bbox[1]+bbox[3],center_mask.shape[1])]=tmp_mask
return center_mask
def get_mask(img):
if img.ndim ==3:
#raise 'image dim is not 3'
g_img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#cv2.imshow('ImageWindow', g_img)
#cv2.waitKey()
elif img.ndim == 2:
g_img =img.copy()
else:
raise 'image dim is not 1 or 3'
h,w = g_img.shape
shape=g_img.shape[0:2]
#g_img = cv2.resize(g_img,(0,0),fx = 0.5,fy = 0.5)
tg_img=cv2.normalize(g_img, None, 0, 255, cv2.NORM_MINMAX)
tmp_mask=get_mask_BZ(tg_img)
center=_get_center_by_edge(tmp_mask)
#bbox=_get_bbox_by_mask(tmp_mask)
#print(center)
#cv2.imshow('ImageWindow', tmp_mask*255)
#cv2.waitKey()
radius=_get_radius_by_mask_center(tmp_mask,center)
#resize back
#center = [center[0]*2,center[1]*2]
#radius = int(radius*2)
center = [center[0], center[1]]
radius = int(radius)
s_h = max(0,int(center[0] - radius))
s_w = max(0, int(center[1] - radius))
bbox = (s_h, s_w, min(h-s_h,2 * radius), min(w-s_w,2 * radius))
tmp_mask=_get_circle_by_center_bbox(shape,center,bbox,radius)
return tmp_mask,bbox,center,radius
def mask_image(img,mask):
img[mask<=0,...]=0
return img
def remove_back_area(img,bbox=None,border=None):
image=img
if border is None:
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=np.int)
image=image[border[0]:border[1],border[2]:border[3],...]
return image,border
def supplemental_black_area(img,border=None):
image=img
if border is None:
h,v=img.shape[0:2]
max_l=max(h,v)
if image.ndim>2:
image=np.zeros(shape=[max_l,max_l,img.shape[2]],dtype=img.dtype)
else:
image=np.zeros(shape=[max_l,max_l],dtype=img.dtype)
border=(int(max_l/2-h/2),int(max_l/2-h/2)+h,int(max_l/2-v/2),int(max_l/2-v/2)+v,max_l)
else:
max_l=border[4]
if image.ndim>2:
image=np.zeros(shape=[max_l,max_l,img.shape[2]],dtype=img.dtype)
else:
image=np.zeros(shape=[max_l,max_l],dtype=img.dtype)
image[border[0]:border[1],border[2]:border[3],...]=img
return image,border
def process_without_gb(img, label,radius_list,centre_list_w, centre_list_h):
# preprocess images
# img : origin image
# tar_height: height of tar image
# return:
# result_img: preprocessed image
# borders: remove border, supplement mask
# mask: mask for preprocessed image
borders = []
mask, bbox, center, radius = get_mask(img)
#print('center is: ',center)
#print('radius is: ',radius)
r_img = mask_image(img, mask)
r_img, r_border = remove_back_area(r_img,bbox=bbox)
mask, _ = remove_back_area(mask,border=r_border)
label, _ = remove_back_area(label,bbox=bbox)
borders.append(r_border)
r_img,sup_border = supplemental_black_area(r_img)
#print(r_img.shape)
label,sup_border = supplemental_black_area(label)
mask,_ = supplemental_black_area(mask,border=sup_border)
borders.append(sup_border)
radius_list.append(radius)
centre_list_w.append(int(center[0]))
centre_list_h.append(int(center[1]))
return r_img,borders,(mask*255).astype(np.uint8),label, radius_list,centre_list_w, centre_list_h
|
py
|
1a5726f13d24c54db2f1b2d407945cac3f35c771
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Alias(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
Alias ARN.
"""
description: pulumi.Output[str]
"""
Description of the alias.
"""
name: pulumi.Output[str]
"""
Name of the alias.
"""
routing_strategy: pulumi.Output[dict]
"""
Specifies the fleet and/or routing type to use for the alias.
* `fleetId` (`str`) - ID of the Gamelift Fleet to point the alias to.
* `message` (`str`) - Message text to be used with the `TERMINAL` routing strategy.
* `type` (`str`) - Type of routing strategy. e.g. `SIMPLE` or `TERMINAL`
"""
tags: pulumi.Output[dict]
"""
Key-value map of resource tags
"""
def __init__(__self__, resource_name, opts=None, description=None, name=None, routing_strategy=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Gamelift Alias resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.gamelift.Alias("example",
description="Example Description",
routing_strategy={
"message": "Example Message",
"type": "TERMINAL",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the alias.
:param pulumi.Input[str] name: Name of the alias.
:param pulumi.Input[dict] routing_strategy: Specifies the fleet and/or routing type to use for the alias.
:param pulumi.Input[dict] tags: Key-value map of resource tags
The **routing_strategy** object supports the following:
* `fleetId` (`pulumi.Input[str]`) - ID of the Gamelift Fleet to point the alias to.
* `message` (`pulumi.Input[str]`) - Message text to be used with the `TERMINAL` routing strategy.
* `type` (`pulumi.Input[str]`) - Type of routing strategy. e.g. `SIMPLE` or `TERMINAL`
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['name'] = name
if routing_strategy is None:
raise TypeError("Missing required property 'routing_strategy'")
__props__['routing_strategy'] = routing_strategy
__props__['tags'] = tags
__props__['arn'] = None
super(Alias, __self__).__init__(
'aws:gamelift/alias:Alias',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, description=None, name=None, routing_strategy=None, tags=None):
"""
Get an existing Alias resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Alias ARN.
:param pulumi.Input[str] description: Description of the alias.
:param pulumi.Input[str] name: Name of the alias.
:param pulumi.Input[dict] routing_strategy: Specifies the fleet and/or routing type to use for the alias.
:param pulumi.Input[dict] tags: Key-value map of resource tags
The **routing_strategy** object supports the following:
* `fleetId` (`pulumi.Input[str]`) - ID of the Gamelift Fleet to point the alias to.
* `message` (`pulumi.Input[str]`) - Message text to be used with the `TERMINAL` routing strategy.
* `type` (`pulumi.Input[str]`) - Type of routing strategy. e.g. `SIMPLE` or `TERMINAL`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["description"] = description
__props__["name"] = name
__props__["routing_strategy"] = routing_strategy
__props__["tags"] = tags
return Alias(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py
|
1a5727241694f2b96b9aa8a9124d4499d24fc173
|
import tty
import sys
import curses
import datetime
import locale
from decimal import Decimal
import getpass
import electrum
from electrum.util import format_satoshis, set_verbosity
from electrum.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum.transaction import TxOutput
from electrum.wallet import Wallet
from electrum.storage import WalletStorage
from electrum.network import NetworkParameters
from electrum.interface import deserialize_server
_ = lambda x:x
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists():
print("Wallet not found. try 'electrum create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
set_verbosity(False)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback(self.update, ['wallet_updated', 'network_updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for tx_hash, tx_mined_status, value, balance in self.wallet.get_history():
if tx_mined_status.conf:
timestamp = tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_receiving_address()
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
self.maxpos = 6
def print_banner(self):
if self.network and self.network.banner:
banner = self.network.banner
banner = banner.replace('\r', '')
self.print_list(banner.split('\n'))
def print_qr(self, data):
import qrcode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
s = StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
try:
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
except curses.error:
m = 'error. screen too small?'
m = m.encode(self.encoding)
self.stdscr.addstr(5, 1, m, 0)
def print_list(self, lst, firstline = None):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = lst[i] if i < len(lst) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print(c)
cc = curses.unctrl(c).decode()
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif cc in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif cc in ['^N']: self.network_dialog()
elif cc == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
cc = curses.unctrl(c).decode()
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or cc in '0123456789.':
target += cc
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Address', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = list(self.contacts.keys())[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[key] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
try:
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
except curses.error as e:
raise Exception("Error with curses. Is your screen too small?") from e
finally:
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_address(self.str_recipient):
self.show_message(_('Invalid MUE address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
self.show_message(str(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
self.show_message(_("Please wait..."), getchar=False)
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
self.show_message(repr(e))
else:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, list(map(lambda x: {'type':'button','label':x}, items)), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
net_params = self.network.get_parameters()
host, port, protocol = net_params.host, net_params.port, net_params.protocol
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = deserialize_server(server)
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('server') or out.get('proxy'):
proxy = electrum.network.deserialize_proxy(out.get('proxy')) if out.get('proxy') else proxy_config
net_params = NetworkParameters(host, port, protocol, proxy, auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def settings_dialog(self):
fee = str(Decimal(self.config.fee_per_kb()) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee }
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(list(items))*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(list(items))
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if 'value' in item:
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
|
py
|
1a57273f13f3cdc367adccb64e12a9f7d09b9f83
|
from flask import Flask, render_template, request
import sqlite3 as sql
app = Flask(__name__)
import sqlite3
conn = sqlite3.connect('database.db')
# print("Opened database successfully")
# conn.execute('CREATE TABLE students (name TEXT, addr TEXT, city TEXT, pin TEXT)')
# print("Table created successfully")
# conn.close()
@app.route('/')
def home():
return render_template('index.html')
@app.route('/enternew')
def new_student():
return render_template('student.html')
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if request.method == 'POST':
try:
nm = request.form['nm']
addr = request.form['add']
city = request.form['city']
pin = request.form['pin']
with sql.connect("database.db") as con:
cur = con.cursor()
#cur.execute("INSERT INTO students (name,addr,city,pin) VALUES (?,?,?,?)",(nm,addr,city,pin) )
con.commit()
# msg = "Record successfully added"
except:
con.rollback()
msg = "error in insert operation"
finally:
return render_template("result.html",msg = msg)
con.close()
@app.route('/list')
def list():
con = sql.connect("database.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from all_month")
rows = cur.fetchall();
return render_template("list.html",rows = rows)
if __name__ == '__main__':
app.run(debug = True)
|
py
|
1a572785f005eb5254654e3a75457c921ec7fa0f
|
"""Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import importlib.util
import py_compile
import struct
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
quiet=False, legacy=False, optimize=-1):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: if True, be quiet during compilation
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
if not quiet:
print('Listing {!r}...'.format(dir))
try:
names = os.listdir(dir)
except OSError:
print("Can't list {!r}".format(dir))
names = []
names.sort()
success = 1
for name in names:
if name == '__pycache__':
continue
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet,
legacy, optimize):
success = 0
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
os.path.isdir(fullname) and not os.path.islink(fullname)):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet, legacy, optimize):
success = 0
return success
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=False,
legacy=False, optimize=-1):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: if True, be quiet during compilation
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
if legacy:
cfile = fullname + ('c' if __debug__ else 'o')
else:
if optimize >= 0:
cfile = importlib.util.cache_from_source(
fullname, debug_override=not optimize)
else:
cfile = importlib.util.cache_from_source(fullname)
cache_dir = os.path.dirname(cfile)
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', importlib.util.MAGIC_NUMBER,
mtime)
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except OSError:
pass
if not quiet:
print('Compiling {!r}...'.format(fullname))
try:
ok = py_compile.compile(fullname, cfile, dfile, True,
optimize=optimize)
except py_compile.PyCompileError as err:
if quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
# escape non-printable characters in msg
msg = err.msg.encode(sys.stdout.encoding,
errors='backslashreplace')
msg = msg.decode(sys.stdout.encoding)
print(msg)
success = 0
except (SyntaxError, UnicodeError, OSError) as e:
if quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
print(e.__class__.__name__ + ':', e)
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=False,
legacy=False, optimize=-1):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default True)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default False)
quiet: as for compile_dir() (default False)
legacy: as for compile_dir() (default False)
optimize: as for compile_dir() (default -1)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print('Skipping current directory')
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet,
legacy=legacy, optimize=optimize)
return success
def main():
"""Script main program."""
import argparse
parser = argparse.ArgumentParser(
description='Utilities to support installing Python libraries.')
parser.add_argument('-l', action='store_const', const=0,
default=10, dest='maxlevels',
help="don't recurse into subdirectories")
parser.add_argument('-f', action='store_true', dest='force',
help='force rebuild even if timestamps are up to date')
parser.add_argument('-q', action='store_true', dest='quiet',
help='output only error messages')
parser.add_argument('-b', action='store_true', dest='legacy',
help='use legacy (pre-PEP3147) compiled file locations')
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
help=('directory to prepend to file paths for use in '
'compile-time tracebacks and in runtime '
'tracebacks in cases where the source file is '
'unavailable'))
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
help=('skip files matching the regular expression; '
'the regexp is searched for in the full path '
'of each file considered for compilation'))
parser.add_argument('-i', metavar='FILE', dest='flist',
help=('add all the files and directories listed in '
'FILE to the list considered for compilation; '
'if "-", names are read from stdin'))
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
help=('zero or more file and directory names '
'to compile; if no arguments given, defaults '
'to the equivalent of -l sys.path'))
args = parser.parse_args()
compile_dests = args.compile_dest
if args.rx:
import re
args.rx = re.compile(args.rx)
# if flist is provided then load it
if args.flist:
try:
with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
for line in f:
compile_dests.append(line.strip())
except OSError:
print("Error reading file list {}".format(args.flist))
return False
success = True
try:
if compile_dests:
for dest in compile_dests:
if os.path.isfile(dest):
if not compile_file(dest, args.ddir, args.force, args.rx,
args.quiet, args.legacy):
success = False
else:
if not compile_dir(dest, args.maxlevels, args.ddir,
args.force, args.rx, args.quiet,
args.legacy):
success = False
return success
else:
return compile_path(legacy=args.legacy, force=args.force,
quiet=args.quiet)
except KeyboardInterrupt:
print("\n[interrupted]")
return False
return True
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
|
py
|
1a5727bbd90d9307e29a2a3ebc2319d52908e1e1
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self) -> str:
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self) -> str:
return self.choice_text
|
py
|
1a5727e236d14bd4ef8a53cee5726388c4f89176
|
##########################################################################################################################################
## License: Apache 2.0. See LICENSE file in root directory. ##
##########################################################################################################################################
import pyrealsense2 as rs
import cv2
import numpy as np
import time
from dynamo.realsense_device_manager import DeviceManager
import dynamo.calibration as calibration
import dynamo.stream as stream
import copy
import threading
import sys
import multiprocessing
import pickle
import queue
import os
import argparse
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--load", help="load calibration",
nargs='?')
parser.add_argument("--new", help="new calibration",
nargs='?',default="new.cal")
parser.add_argument("--folder", help="data folder",
nargs = '?', default="data")
parser.add_argument("--time", help="time to collect data (s)",
nargs = '?', default="10")
args = parser.parse_args()
rsConfig = rs.config()
if args.load:
print(os.path.join(os.getcwd(),args.load))
file = open(os.path.join(os.getcwd(),args.load),'rb')
transformation = pickle.load(file)
file.close()
#transformation = calibration.load(os.path.join(os.getcwd(),args.load))
print(transformation)
deviceManager = DeviceManager(rs.context(), rsConfig)
deviceManager.enable_all_emitters()
elif args.new:
resolutionWidth = 848
resolutionHeight = 480
frameRate = 30
rsConfig.enable_stream(rs.stream.depth, resolutionWidth, resolutionHeight, rs.format.z16, frameRate)
rsConfig.enable_stream(rs.stream.color, resolutionWidth, resolutionHeight, rs.format.bgr8, frameRate)
deviceManager = DeviceManager(rs.context(), rsConfig)
deviceManager.enable_all_emitters()
deviceManager.load_settings_json('calibrationSettings.json')
cameraOrder = [
'822512060522',
'823112060874',
'823112060112',
'822512060553',
'822512060853',
'822512061105']
transformation = calibration.newIterative(args.new,deviceManager, cameraOrder, 4,5,0.0762)
deviceManager.disable_all_devices()
rsConfig.disable_stream(rs.stream.depth)
rsConfig.disable_stream(rs.stream.color)
resolutionWidth = 848
resolutionHeight = 480
frameRate = 90
rsConfig.enable_stream(rs.stream.depth, resolutionWidth, resolutionHeight, rs.format.z16, frameRate)
rsConfig.enable_stream(rs.stream.infrared, 1, resolutionWidth, resolutionHeight, rs.format.y8, frameRate)
deviceManager.load_settings_json('markerSettings.json')
deviceManager.enable_all_devices()
input("Calibration complete, press Enter to continue...")
script_path = os.path.abspath(__file__)
scriptDir = os.path.split(script_path)[0]
if not os.path.isdir(os.path.join(os.getcwd(), args.folder)):
os.mkdir(args.folder) #make base folder if it doesn't exist already
iteration = 1
while True:
loc = args.folder+'\\'+str(format(iteration, '02d'))
saveDirectory = os.path.join(os.getcwd(), loc)
if not os.path.isdir(saveDirectory):
os.mkdir(args.folder+'\\'+str(format(iteration, '02d'))) #make iteration folder if it doesn't exist already
data = stream.start(deviceManager, transformation, saveDirectory,args.time)
input("Data Collection complete, press Enter to continue...")
iteration+=1
|
py
|
1a57284697ad5775fbefdd1edf956a7a41e193bf
|
#!/usr/bin/env python3
import binascii
import os
import struct
import time
from collections import namedtuple
import numpy as np
from opendbc import DBC_PATH
from common.realtime import Ratekeeper
from selfdrive.config import Conversions as CV
import selfdrive.messaging as messaging
from selfdrive.services import service_list
from selfdrive.car import crc8_pedal
from selfdrive.car.honda.hondacan import fix
from selfdrive.car.honda.values import CAR
from selfdrive.car.honda.carstate import get_can_signals
from selfdrive.boardd.boardd import can_capnp_to_can_list, can_list_to_can_capnp
from selfdrive.can.plant_can_parser import CANParser
from selfdrive.car.honda.interface import CarInterface
from common.dbc import dbc
honda = dbc(os.path.join(DBC_PATH, "honda_civic_touring_2016_can_generated.dbc"))
# Trick: set 0x201 (interceptor) in fingerprints for gas is controlled like if there was an interceptor
CP = CarInterface.get_params(CAR.CIVIC, {0: {0x201: 6}, 1: {}, 2: {}, 3: {}})
def car_plant(pos, speed, grade, gas, brake):
# vehicle parameters
mass = 1700
aero_cd = 0.3
force_peak = mass*3.
force_brake_peak = -mass*10. #1g
power_peak = 100000 # 100kW
speed_base = power_peak/force_peak
rolling_res = 0.01
g = 9.81
frontal_area = 2.2
air_density = 1.225
gas_to_peak_linear_slope = 3.33
brake_to_peak_linear_slope = 0.3
creep_accel_v = [1., 0.]
creep_accel_bp = [0., 1.5]
#*** longitudinal model ***
# find speed where peak torque meets peak power
force_brake = brake * force_brake_peak * brake_to_peak_linear_slope
if speed < speed_base: # torque control
force_gas = gas * force_peak * gas_to_peak_linear_slope
else: # power control
force_gas = gas * power_peak / speed * gas_to_peak_linear_slope
force_grade = - grade * mass # positive grade means uphill
creep_accel = np.interp(speed, creep_accel_bp, creep_accel_v)
force_creep = creep_accel * mass
force_resistance = -(rolling_res * mass * g + 0.5 * speed**2 * aero_cd * air_density * frontal_area)
force = force_gas + force_brake + force_resistance + force_grade + force_creep
acceleration = force / mass
# TODO: lateral model
return speed, acceleration
def get_car_can_parser():
dbc_f = 'honda_civic_touring_2016_can_generated.dbc'
signals = [
("STEER_TORQUE", 0xe4, 0),
("STEER_TORQUE_REQUEST", 0xe4, 0),
("COMPUTER_BRAKE", 0x1fa, 0),
("COMPUTER_BRAKE_REQUEST", 0x1fa, 0),
("GAS_COMMAND", 0x200, 0),
]
checks = [
(0xe4, 100),
(0x1fa, 50),
(0x200, 50),
]
return CANParser(dbc_f, signals, checks)
def to_3_byte(x):
# Convert into 12 bit value
s = struct.pack("!H", int(x))
return binascii.hexlify(s)[1:]
def to_3s_byte(x):
s = struct.pack("!h", int(x))
return binascii.hexlify(s)[1:]
class Plant():
messaging_initialized = False
def __init__(self, lead_relevancy=False, rate=100, speed=0.0, distance_lead=2.0):
self.rate = rate
if not Plant.messaging_initialized:
Plant.logcan = messaging.pub_sock(service_list['can'].port)
Plant.sendcan = messaging.sub_sock(service_list['sendcan'].port)
Plant.model = messaging.pub_sock(service_list['model'].port)
Plant.live_params = messaging.pub_sock(service_list['liveParameters'].port)
Plant.health = messaging.pub_sock(service_list['health'].port)
Plant.thermal = messaging.pub_sock(service_list['thermal'].port)
Plant.driverMonitoring = messaging.pub_sock(service_list['driverMonitoring'].port)
Plant.cal = messaging.pub_sock(service_list['liveCalibration'].port)
Plant.controls_state = messaging.sub_sock(service_list['controlsState'].port)
Plant.plan = messaging.sub_sock(service_list['plan'].port)
Plant.messaging_initialized = True
self.frame = 0
self.angle_steer = 0.
self.gear_choice = 0
self.speed, self.speed_prev = 0., 0.
self.esp_disabled = 0
self.main_on = 1
self.user_gas = 0
self.computer_brake,self.user_brake = 0,0
self.brake_pressed = 0
self.angle_steer_rate = 0
self.distance, self.distance_prev = 0., 0.
self.speed, self.speed_prev = speed, speed
self.steer_error, self.brake_error, self.steer_not_allowed = 0, 0, 0
self.gear_shifter = 8 # D gear
self.pedal_gas = 0
self.cruise_setting = 0
self.seatbelt, self.door_all_closed = True, True
self.steer_torque, self.v_cruise, self.acc_status = 0, 0, 0 # v_cruise is reported from can, not the one used for controls
self.lead_relevancy = lead_relevancy
# lead car
self.distance_lead, self.distance_lead_prev = distance_lead , distance_lead
self.rk = Ratekeeper(rate, print_delay_threshold=100)
self.ts = 1./rate
self.cp = get_car_can_parser()
self.response_seen = False
time.sleep(1)
messaging.drain_sock(Plant.sendcan)
messaging.drain_sock(Plant.controls_state)
def close(self):
Plant.logcan.close()
Plant.model.close()
Plant.live_params.close()
def speed_sensor(self, speed):
if speed<0.3:
return 0
else:
return speed * CV.MS_TO_KPH
def current_time(self):
return float(self.rk.frame) / self.rate
def step(self, v_lead=0.0, cruise_buttons=None, grade=0.0, publish_model = True):
gen_signals, gen_checks = get_can_signals(CP)
sgs = [s[0] for s in gen_signals]
msgs = [s[1] for s in gen_signals]
cks_msgs = set(check[0] for check in gen_checks)
cks_msgs.add(0x18F)
cks_msgs.add(0x30C)
# ******** get messages sent to the car ********
can_msgs = []
for a in messaging.drain_sock(Plant.sendcan, wait_for_one=self.response_seen):
can_msgs.extend(can_capnp_to_can_list(a.sendcan, [0,2]))
# After the first response the car is done fingerprinting, so we can run in lockstep with controlsd
if can_msgs:
self.response_seen = True
self.cp.update_can(can_msgs)
# ******** get controlsState messages for plotting ***
controls_state_msgs = []
for a in messaging.drain_sock(Plant.controls_state, wait_for_one=self.response_seen):
controls_state_msgs.append(a.controlsState)
fcw = None
for a in messaging.drain_sock(Plant.plan):
if a.plan.fcw:
fcw = True
if self.cp.vl[0x1fa]['COMPUTER_BRAKE_REQUEST']:
brake = self.cp.vl[0x1fa]['COMPUTER_BRAKE'] * 0.003906248
else:
brake = 0.0
if self.cp.vl[0x200]['GAS_COMMAND'] > 0:
gas = self.cp.vl[0x200]['GAS_COMMAND'] / 256.0
else:
gas = 0.0
if self.cp.vl[0xe4]['STEER_TORQUE_REQUEST']:
steer_torque = self.cp.vl[0xe4]['STEER_TORQUE']*1.0/0xf00
else:
steer_torque = 0.0
distance_lead = self.distance_lead_prev + v_lead * self.ts
# ******** run the car ********
speed, acceleration = car_plant(self.distance_prev, self.speed_prev, grade, gas, brake)
distance = self.distance_prev + speed * self.ts
speed = self.speed_prev + self.ts * acceleration
if speed <= 0:
speed = 0
acceleration = 0
# ******** lateral ********
self.angle_steer -= (steer_torque/10.0) * self.ts
# *** radar model ***
if self.lead_relevancy:
d_rel = np.maximum(0., distance_lead - distance)
v_rel = v_lead - speed
else:
d_rel = 200.
v_rel = 0.
lateral_pos_rel = 0.
# print at 5hz
if (self.frame % (self.rate//5)) == 0:
print("%6.2f m %6.2f m/s %6.2f m/s2 %.2f ang gas: %.2f brake: %.2f steer: %5.2f lead_rel: %6.2f m %6.2f m/s" % (distance, speed, acceleration, self.angle_steer, gas, brake, steer_torque, d_rel, v_rel))
# ******** publish the car ********
vls_tuple = namedtuple('vls', [
'XMISSION_SPEED',
'WHEEL_SPEED_FL', 'WHEEL_SPEED_FR', 'WHEEL_SPEED_RL', 'WHEEL_SPEED_RR',
'STEER_ANGLE', 'STEER_ANGLE_RATE', 'STEER_TORQUE_SENSOR', 'STEER_TORQUE_MOTOR',
'LEFT_BLINKER', 'RIGHT_BLINKER',
'GEAR',
'WHEELS_MOVING',
'BRAKE_ERROR_1', 'BRAKE_ERROR_2',
'SEATBELT_DRIVER_LAMP', 'SEATBELT_DRIVER_LATCHED',
'BRAKE_PRESSED', 'BRAKE_SWITCH',
'CRUISE_BUTTONS',
'ESP_DISABLED',
'HUD_LEAD',
'USER_BRAKE',
'STEER_STATUS',
'GEAR_SHIFTER',
'PEDAL_GAS',
'CRUISE_SETTING',
'ACC_STATUS',
'CRUISE_SPEED_PCM',
'CRUISE_SPEED_OFFSET',
'DOOR_OPEN_FL', 'DOOR_OPEN_FR', 'DOOR_OPEN_RL', 'DOOR_OPEN_RR',
'CAR_GAS',
'MAIN_ON',
'EPB_STATE',
'BRAKE_HOLD_ACTIVE',
'INTERCEPTOR_GAS',
'INTERCEPTOR_GAS2',
'IMPERIAL_UNIT',
])
vls = vls_tuple(
self.speed_sensor(speed),
self.speed_sensor(speed), self.speed_sensor(speed), self.speed_sensor(speed), self.speed_sensor(speed),
self.angle_steer, self.angle_steer_rate, 0, 0,#Steer torque sensor
0, 0, # Blinkers
self.gear_choice,
speed != 0,
self.brake_error, self.brake_error,
not self.seatbelt, self.seatbelt, # Seatbelt
self.brake_pressed, 0., #Brake pressed, Brake switch
cruise_buttons,
self.esp_disabled,
0, # HUD lead
self.user_brake,
self.steer_error,
self.gear_shifter,
self.pedal_gas,
self.cruise_setting,
self.acc_status,
self.v_cruise,
0, # Cruise speed offset
0, 0, 0, 0, # Doors
self.user_gas,
self.main_on,
0, # EPB State
0, # Brake hold
0, # Interceptor feedback
0, # Interceptor 2 feedback
False
)
# TODO: publish each message at proper frequency
can_msgs = []
for msg in set(msgs):
msg_struct = {}
indxs = [i for i, x in enumerate(msgs) if msg == msgs[i]]
for i in indxs:
msg_struct[sgs[i]] = getattr(vls, sgs[i])
if "COUNTER" in honda.get_signals(msg):
msg_struct["COUNTER"] = self.frame % 4
if "COUNTER_PEDAL" in honda.get_signals(msg):
msg_struct["COUNTER_PEDAL"] = self.frame % 0xf
msg = honda.lookup_msg_id(msg)
msg_data = honda.encode(msg, msg_struct)
if "CHECKSUM" in honda.get_signals(msg):
msg_data = fix(msg_data, msg)
if "CHECKSUM_PEDAL" in honda.get_signals(msg):
msg_struct["CHECKSUM_PEDAL"] = crc8_pedal(msg_data[:-1])
msg_data = honda.encode(msg, msg_struct)
can_msgs.append([msg, 0, msg_data, 0])
# add the radar message
# TODO: use the DBC
if self.frame % 5 == 0:
radar_state_msg = b'\x79\x00\x00\x00\x00\x00\x00\x00'
radar_msg = to_3_byte(d_rel*16.0) + \
to_3_byte(int(lateral_pos_rel*16.0)&0x3ff) + \
to_3s_byte(int(v_rel*32.0)) + \
b"0f00000"
radar_msg = binascii.unhexlify(radar_msg)
can_msgs.append([0x400, 0, radar_state_msg, 1])
can_msgs.append([0x445, 0, radar_msg, 1])
# add camera msg so controlsd thinks it's alive
msg_struct["COUNTER"] = self.frame % 4
msg = honda.lookup_msg_id(0xe4)
msg_data = honda.encode(msg, msg_struct)
msg_data = fix(msg_data, 0xe4)
can_msgs.append([0xe4, 0, msg_data, 2])
# Fake sockets that controlsd subscribes to
live_parameters = messaging.new_message()
live_parameters.init('liveParameters')
live_parameters.liveParameters.valid = True
live_parameters.liveParameters.sensorValid = True
live_parameters.liveParameters.posenetValid = True
live_parameters.liveParameters.steerRatio = CP.steerRatio
live_parameters.liveParameters.stiffnessFactor = 1.0
Plant.live_params.send(live_parameters.to_bytes())
driver_monitoring = messaging.new_message()
driver_monitoring.init('driverMonitoring')
driver_monitoring.driverMonitoring.faceOrientation = [0.] * 3
driver_monitoring.driverMonitoring.facePosition = [0.] * 2
Plant.driverMonitoring.send(driver_monitoring.to_bytes())
health = messaging.new_message()
health.init('health')
health.health.controlsAllowed = True
Plant.health.send(health.to_bytes())
thermal = messaging.new_message()
thermal.init('thermal')
thermal.thermal.freeSpace = 1.
thermal.thermal.batteryPercent = 100
Plant.thermal.send(thermal.to_bytes())
# ******** publish a fake model going straight and fake calibration ********
# note that this is worst case for MPC, since model will delay long mpc by one time step
if publish_model and self.frame % 5 == 0:
md = messaging.new_message()
cal = messaging.new_message()
md.init('model')
cal.init('liveCalibration')
md.model.frameId = 0
for x in [md.model.path, md.model.leftLane, md.model.rightLane]:
x.points = [0.0]*50
x.prob = 1.0
x.std = 1.0
if self.lead_relevancy:
d_rel = np.maximum(0., distance_lead - distance)
v_rel = v_lead - speed
prob = 1.0
else:
d_rel = 200.
v_rel = 0.
prob = 0.0
md.model.lead.dist = float(d_rel)
md.model.lead.prob = prob
md.model.lead.relY = 0.0
md.model.lead.relYStd = 1.
md.model.lead.relVel = float(v_rel)
md.model.lead.relVelStd = 1.
md.model.lead.relA = 0.0
md.model.lead.relAStd = 10.
md.model.lead.std = 1.0
cal.liveCalibration.calStatus = 1
cal.liveCalibration.calPerc = 100
cal.liveCalibration.rpyCalib = [0.] * 3
# fake values?
Plant.model.send(md.to_bytes())
Plant.cal.send(cal.to_bytes())
Plant.logcan.send(can_list_to_can_capnp(can_msgs))
# ******** update prevs ********
self.frame += 1
if self.response_seen:
self.rk.monitor_time()
self.speed = speed
self.distance = distance
self.distance_lead = distance_lead
self.speed_prev = speed
self.distance_prev = distance
self.distance_lead_prev = distance_lead
else:
# Don't advance time when controlsd is not yet ready
self.rk.keep_time()
self.rk._frame = 0
return {
"distance": distance,
"speed": speed,
"acceleration": acceleration,
"distance_lead": distance_lead,
"brake": brake,
"gas": gas,
"steer_torque": steer_torque,
"fcw": fcw,
"controls_state_msgs": controls_state_msgs,
}
# simple engage in standalone mode
def plant_thread(rate=100):
plant = Plant(rate)
while 1:
plant.step()
if __name__ == "__main__":
plant_thread()
|
py
|
1a572869678b4ffbedccbe7c3c4eb3fed3c043c5
|
"""
Tests for vSQL methods.
The test are done via the Python DB interface.
To run the tests, :mod:`pytest` is required.
"""
from conftest import *
###
### Tests
###
def test_str_lower(config_persons):
check_vsql(config_persons, "'MISSISSIPPI'.lower() == 'mississippi'")
def test_str_upper(config_persons):
check_vsql(config_persons, "'mississippi'.upper() == 'MISSISSIPPI'")
def test_str_startswith(config_persons):
check_vsql(config_persons, "'mississippi'.startswith('missi')")
def test_str_endswith(config_persons):
check_vsql(config_persons, "'mississippi'.endswith('sippi')")
def test_str_strip1(config_persons):
check_vsql(config_persons, "'\\r\\t\\n foo \\r\\t\\n '.strip() == 'foo'")
def test_str_strip2(config_persons):
check_vsql(config_persons, "'xyzzygurkxyzzy'.strip('xyz') == 'gurk'")
def test_str_lstrip1(config_persons):
check_vsql(config_persons, "'\\r\\t\\n foo \\r\\t\\n '.lstrip() == 'foo \\r\\t\\n '")
def test_str_lstrip2(config_persons):
check_vsql(config_persons, "'xyzzygurkxyzzy'.lstrip('xyz') == 'gurkxyzzy'")
def test_str_rstrip1(config_persons):
check_vsql(config_persons, "'\\r\\t\\n foo \\r\\t\\n '.rstrip() == '\\r\\t\\n foo'")
def test_str_rstrip2(config_persons):
check_vsql(config_persons, "'xyzzygurkxyzzy'.rstrip('xyz') == 'xyzzygurk'")
def test_str_find1(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('ks') == -1")
def test_str_find2(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk') == 2")
def test_str_find3(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk', 2) == 2")
def test_str_find4(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk', -3) == 6")
def test_str_find5(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk', 2, 4) == 2")
def test_str_find6(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk', 4, 8) == 6")
def test_str_find7(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('ur', -4, -1) == 5")
def test_str_find8(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk', 2, 3) == -1")
def test_str_find9(config_persons):
check_vsql(config_persons, "'gurkgurk'.find('rk', 7) == -1")
def test_str_replace(config_persons):
check_vsql(config_persons, "'gurk'.replace('u', 'oo') == 'goork'")
def test_str_split1(config_persons):
check_vsql(config_persons, "' \\t\\r\\nf \\t\\r\\no \\t\\r\\no \\t\\r\\n'.split() == ['f', 'o', 'o']")
def test_str_split2(config_persons):
check_vsql(config_persons, "' \\t\\r\\nf \\t\\r\\no \\t\\r\\no \\t\\r\\n'.split(None, 1) == ['f', 'o \\t\\r\\no']")
def test_str_split3(config_persons):
check_vsql(config_persons, "'xxfxxoxxoxx'.split('xx') == [None, 'f', 'o', 'o', None]")
def test_str_split4(config_persons):
check_vsql(config_persons, "'xxfxxoxxoxx'.split('xx', 2) == [None, 'f', 'oxxoxx']")
def test_str_join_str(config_persons):
check_vsql(config_persons, "','.join('1234') == '1,2,3,4'")
def test_str_join_list(config_persons):
check_vsql(config_persons, "','.join(['1', '2', '3', '4']) == '1,2,3,4'")
def test_color_lum1(config_persons):
check_vsql(config_persons, "#000.lum() == 0.0")
def test_color_lum2(config_persons):
check_vsql(config_persons, "#fff.lum() == 1.0")
def test_date_week(config_persons):
check_vsql(config_persons, "@(2000-02-29).week() == 9")
def test_datetime_week(config_persons):
check_vsql(config_persons, "@(2000-02-29T12:34:56).week() == 9")
|
py
|
1a5728f51031d36f1fbaa1a753bed2faac1d3f19
|
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from . import GCodeWriter
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"mesh_writer": {
"output": [{
"extension": "gcode",
"description": catalog.i18nc("@item:inlistbox", "G-code File"),
"mime_type": "text/x-gcode",
"mode": GCodeWriter.GCodeWriter.OutputMode.TextMode
}]
}
}
def register(app):
return { "mesh_writer": GCodeWriter.GCodeWriter() }
|
py
|
1a5729710825623e238745c3617417ddfaf047af
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_weapon import Weapon
from ... import dice as D, material as M
class BaseTwoHandedSword(Weapon):
pass
class TwoHandedSword(BaseTwoHandedSword):
def __init__(self):
super().__init__('two-handed sword', weight=150, damage=D.Dice.from_str('3d6'), material=M.Iron, hit=0)
class Tsurugi(BaseTwoHandedSword):
def __init__(self):
super().__init__('tsurugi', weight=60, damage=D.Dice.from_str('d8+2d6'), material=M.Metal, hit=0)
|
py
|
1a57298f8d0d839864edae5c723d90da3ba076d9
|
# coding=utf-8
import os
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
path = os.path.join(os.getcwd(), 'images', 'annotations')
if not os.path.exists(path):
raise ValueError('The images\\annotations firectory does not exist')
else:
files = os.listdir(path)
results = {}
files = [file for file in files if file.endswith('.XML') or file.endswith('.xml')]
for file in files:
objectsDetected = 0
filePath = os.path.join(path, file)
tree = ET.parse(filePath)
root = tree.getroot()
for member in root.findall('object'):
label = member[0].text
if label != 'hoja' and label != 'dano':
objectsDetected = objectsDetected + 1
if objectsDetected in results:
results[objectsDetected] = results[objectsDetected] + 1
else:
results[objectsDetected] = 1
print("Cantidad de objetos, Cantidad de imagenes")
for key, value in results.items():
print("{0},{1}".format(key, value))
plt.bar(list(results.keys()), results.values(), color='g', width=0.9)
plt.ylabel('Cantidad de imágenes')
plt.xlabel('Cantidad de objetos anotados (Excluyendo hojas y daños)')
plt.show()
|
py
|
1a572a8c3ed56148371ce9f32112a2669803563f
|
"""Test :py:mod:`lmp.util.tknzr` signatures."""
import inspect
from inspect import Parameter, Signature
from typing import Any
import lmp.util.tknzr
from lmp.tknzr import BaseTknzr
def test_module_attribute() -> None:
"""Ensure module attributes' signatures."""
assert hasattr(lmp.util.tknzr, 'FILE_NAME')
assert lmp.util.tknzr.FILE_NAME == 'tknzr.pkl'
def test_module_method() -> None:
"""Ensure module functions' signatures."""
assert hasattr(lmp.util.tknzr, 'create')
assert inspect.isfunction(lmp.util.tknzr.create)
assert inspect.signature(lmp.util.tknzr.create) == Signature(
parameters=[
Parameter(
name='tknzr_name',
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=Parameter.empty,
annotation=str,
),
Parameter(
name='kwargs',
kind=Parameter.VAR_KEYWORD,
default=Parameter.empty,
annotation=Any,
),
],
return_annotation=BaseTknzr,
)
assert hasattr(lmp.util.tknzr, 'load')
assert inspect.isfunction(lmp.util.tknzr.load)
assert inspect.signature(lmp.util.tknzr.load) == Signature(
parameters=[
Parameter(
name='exp_name',
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=Parameter.empty,
annotation=str,
),
],
return_annotation=BaseTknzr,
)
assert hasattr(lmp.util.tknzr, 'save')
assert inspect.isfunction(lmp.util.tknzr.save)
assert inspect.signature(lmp.util.tknzr.save) == Signature(
parameters=[
Parameter(
name='exp_name',
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=Parameter.empty,
annotation=str,
),
Parameter(
name='tknzr',
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=Parameter.empty,
annotation=BaseTknzr,
),
],
return_annotation=None,
)
|
py
|
1a572b05ba443e1e9f5b7fc494b43cb4aadd93f7
|
import numpy as np
import pyCubbyFlow
from pytest import approx
from pytest_utils import *
cnt = 0
def test_grid2():
global cnt
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
assert_bounding_box_similar(
a.boundingBox, pyCubbyFlow.BoundingBox2D((7, 5), (10, 13)))
f = a.cellCenterPosition
assert_vector_similar(f(0, 0), (7.5, 6))
b = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.HasSameShape(b)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < 3
assert idx[1] >= 0 and idx[1] < 4
cnt += 1
cnt = 0
a.ForEachCellIndex(func)
assert cnt == 12
def test_scalar_grid2():
global cnt
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
a.Resize(resolution=(12, 7),
gridSpacing=(3, 4),
gridOrigin=(9, 2))
assert a.resolution == (12, 7)
assert_vector_similar(a.gridOrigin, (9, 2))
assert_vector_similar(a.gridSpacing, (3, 4))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 0.0
a[5, 6] = 17.0
assert a[5, 6] == 17.0
a.Fill(42.0)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
def func(pt):
return pt.x ** 2 + pt.y ** 2
a.Fill(func)
pos = a.DataPosition()
acc = np.array(a.DataView(), copy=False)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
pt = pos(i, j)
assert func(pt) == a[i, j]
assert func(pt) == approx(a.Sample(pt))
assert acc[j, i] == a[i, j]
# Can't compare to analytic solution because FDM with such a coarse
# grid will return inaccurate results by design.
assert_vector_similar(a.GradientAtDataPoint((i, j)), a.Gradient(pt))
assert a.LaplacianAtDataPoint((i, j)) == a.Laplacian(pt)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < a.resolution.x + 1
assert idx[1] >= 0 and idx[1] < a.resolution.y + 1
cnt += 1
cnt = 0
a.ForEachDataPointIndex(func)
assert cnt == (a.resolution.x + 1) * (a.resolution.y + 1)
blob = a.Serialize()
b = pyCubbyFlow.VertexCenteredScalarGrid2()
b.Deserialize(blob)
assert b.resolution == (12, 7)
assert_vector_similar(b.gridOrigin, (9, 2))
assert_vector_similar(b.gridSpacing, (3, 4))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == b[i, j]
def test_cell_centered_scalar_grid2():
# CTOR
a = pyCubbyFlow.VertexCenteredScalarGrid2()
assert a.resolution == (1, 1)
assert_vector_similar(a.gridOrigin, (0.0, 0.0))
assert_vector_similar(a.gridSpacing, (1.0, 1.0))
a = pyCubbyFlow.VertexCenteredScalarGrid2((3, 4), (1, 2), (7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
domainSizeX=12.0,
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (4, 4))
# Properties
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert_vector_similar(a.dataSize, (4, 5))
assert_vector_similar(a.dataOrigin, (7, 5))
# Modifiers
b = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(6, 3),
gridSpacing=(5, 9),
gridOrigin=(1, 2))
a.Fill(42.0)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
a.Swap(b)
assert a.resolution == (6, 3)
assert_vector_similar(a.gridOrigin, (1, 2))
assert_vector_similar(a.gridSpacing, (5, 9))
assert b.resolution == (3, 4)
assert_vector_similar(b.gridOrigin, (7, 5))
assert_vector_similar(b.gridSpacing, (1, 2))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 0.0
for j in range(b.resolution.y):
for i in range(b.resolution.x):
assert b[i, j] == 42.0
a.Set(b)
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
c = a.Clone()
assert c.resolution == (3, 4)
assert_vector_similar(c.gridOrigin, (7, 5))
assert_vector_similar(c.gridSpacing, (1, 2))
for j in range(c.resolution.y):
for i in range(c.resolution.x):
assert c[i, j] == 42.0
# ------------------------------------------------------------------------------
def test_grid3():
global cnt
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 3))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
assert_bounding_box_similar(
a.boundingBox, pyCubbyFlow.BoundingBox3D((7, 5, 3), (10, 13, 18)))
f = a.cellCenterPosition
assert_vector_similar(f(0, 0, 0), (7.5, 6, 4.5))
b = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
assert a.HasSameShape(b)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < 3
assert idx[1] >= 0 and idx[1] < 4
assert idx[2] >= 0 and idx[2] < 5
cnt += 1
cnt = 0
a.ForEachCellIndex(func)
assert cnt == 60
def test_scalar_grid3():
global cnt
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
a.Resize(resolution=(12, 7, 2),
gridSpacing=(3, 4, 5),
gridOrigin=(9, 2, 5))
assert a.resolution == (12, 7, 2)
assert_vector_similar(a.gridOrigin, (9, 2, 5))
assert_vector_similar(a.gridSpacing, (3, 4, 5))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 0.0
a[5, 6, 1] = 17.0
assert a[5, 6, 1] == 17.0
a.Fill(42.0)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
def func(pt):
return pt.x ** 2 + pt.y ** 2 + pt.z ** 2
a.Fill(func)
pos = a.DataPosition()
acc = np.array(a.DataView(), copy=False)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
pt = pos(i, j, k)
assert func(pt) == a[i, j, k]
assert func(pt) == approx(a.Sample(pt))
assert acc[k, j, i] == a[i, j, k]
# Can't compare to analytic solution because FDM with such a
# coarse grid will return inaccurate results by design.
assert_vector_similar(
a.GradientAtDataPoint((i, j, k)), a.Gradient(pt))
assert a.LaplacianAtDataPoint((i, j, k)) == a.Laplacian(pt)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < a.resolution.x
assert idx[1] >= 0 and idx[1] < a.resolution.y
assert idx[2] >= 0 and idx[2] < a.resolution.z
cnt += 1
cnt = 0
a.ForEachDataPointIndex(func)
assert cnt == a.resolution.x * a.resolution.y * a.resolution.z
blob = a.Serialize()
b = pyCubbyFlow.CellCenteredScalarGrid3()
b.Deserialize(blob)
assert b.resolution == (12, 7, 2)
assert_vector_similar(b.gridOrigin, (9, 2, 5))
assert_vector_similar(b.gridSpacing, (3, 4, 5))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == b[i, j, k]
def test_cell_centered_scalar_grid3():
# CTOR
a = pyCubbyFlow.CellCenteredScalarGrid3()
assert a.resolution == (1, 1, 1)
assert_vector_similar(a.gridOrigin, (0.0, 0.0, 0.0))
assert_vector_similar(a.gridSpacing, (1.0, 1.0, 1.0))
a = pyCubbyFlow.CellCenteredScalarGrid3((3, 4, 5), (1, 2, 3), (7, 5, 2))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 2))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
domainSizeX=12.0,
gridOrigin=(7, 5, 2))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (4, 4, 4))
# Properties
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 2))
assert_vector_similar(a.dataSize, (3, 4, 5))
assert_vector_similar(a.dataOrigin, (7.5, 6, 3.5))
# Modifiers
b = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(6, 3, 7),
gridSpacing=(5, 9, 3),
gridOrigin=(1, 2, 8))
a.Fill(42.0)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
a.Swap(b)
assert a.resolution == (6, 3, 7)
assert_vector_similar(a.gridOrigin, (1, 2, 8))
assert_vector_similar(a.gridSpacing, (5, 9, 3))
assert b.resolution == (3, 4, 5)
assert_vector_similar(b.gridOrigin, (7, 5, 2))
assert_vector_similar(b.gridSpacing, (1, 2, 3))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 0.0
for k in range(b.resolution.z):
for j in range(b.resolution.y):
for i in range(b.resolution.x):
assert b[i, j, k] == 42.0
a.Set(b)
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
c = a.Clone()
assert c.resolution == (3, 4, 5)
assert_vector_similar(c.gridOrigin, (7, 5, 2))
assert_vector_similar(c.gridSpacing, (1, 2, 3))
for k in range(c.resolution.z):
for j in range(c.resolution.y):
for i in range(c.resolution.x):
assert c[i, j, k] == 42.0
|
py
|
1a572b9bb29df8fa0cb5fa647a51aaa54943cd27
|
from .residual import *
|
py
|
1a572d33863de7fdcd7d2da4bc775b9eeba4c393
|
import plotly.figure_factory as ff
import pandas as pd
import csv
data = pd.read_csv(r"C:\Users\bhuvi\Google Drive\Code\Python\Class 108 - Bell Curve\data.csv")
fig = ff.create_distplot([data["Weight(Pounds)"].tolist()], ["weight"], show_hist=False)
fig.show()
|
py
|
1a572f33d10726c25a0e185353ba4833bddb3c5d
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from decimal import Decimal
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_no_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitV0SignatureHash,
LegacySignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
bytes_to_hex_str,
)
from test_framework.eqpayconfig import *
from test_framework.eqpay import generatesynchronized
from test_framework.messages import COIN
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000 // FACTOR_REDUCED_BLOCK_TIME
SEGWIT_HEIGHT = 2020 if ENABLE_REDUCED_BLOCK_TIME else 520
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def submit_old_blocks(node, n):
node.importprivkey("cRComRro8wTGnDTGqgpyP5vwwo24Tn831cPu3PZEdr2532JVPjrZ")
pubkey = "03716d5678c829d09cdfdb4bec058712de3ecd99968a4a064336ffb592342e21f9"
num_blocks_old = node.getblockcount()
for i in range(0, n):
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx[0].vout[0].scriptPubKey = CScript([hex_str_to_bytes(pubkey), OP_CHECKSIG])
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize()))
assert_equal(node.getblockcount(), num_blocks_old+n)
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block))
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
# Avoid sending out msg_getdata in the mininode thread as a reply to invs.
# They are not needed and would only lead to races because we send msg_getdata out in the test thread
def on_inv(self, message):
pass
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "[email protected]"],
["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=4)
block.solve()
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
txid = block.vtx[0].sha256
submit_old_blocks(self.nodes[0], COINBASE_MATURITY - 1) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(int((INITIAL_BLOCK_REWARD-Decimal('0.01')) * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())
self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, (INITIAL_BLOCK_REWARD-1) * 100000000))
self.nodes[0].generate(1)
self.segwit_status = 'started' # we will advance to started immediately...
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == 1
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 100000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 1000000, script_pubkey)]
tx.vout.append(CTxOut(800000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 100000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 10000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 100 // FACTOR_REDUCED_BLOCK_TIME
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
update_vtixinwit_index = int(i/(2*NUM_DROPS))
update_stack_index = i%(2*NUM_DROPS)
update_base_length = len(block.vtx[-1].wit.vtxinwit[update_vtixinwit_index].scriptWitness.stack[update_stack_index])
block.vtx[-1].wit.vtxinwit[update_vtixinwit_index].scriptWitness.stack[update_stack_index] += b'a'*4
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > (2 * 1024 * 1024) // FACTOR_REDUCED_BLOCK_TIME
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
#cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
#block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
#block.vtx[0].vout.pop()
block.vtx[-1].wit.vtxinwit[update_vtixinwit_index].scriptWitness.stack[update_stack_index] = b'a'*4 if ENABLE_REDUCED_BLOCK_TIME else b'a'*8
add_witness_commitment(block)
block.solve()
print(get_virtual_size(block), MAX_BLOCK_BASE_SIZE)
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(block_2.serialize().hex())
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
MAX_SCRIPT_ELEMENT_SIZE = 128000
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_SCRIPT_ELEMENT_SIZE = 128000
MAX_PROGRAM_LENGTH = 129000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * (MAX_SCRIPT_ELEMENT_SIZE-50)] + [b'a'*996] + [OP_DROP]*46 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * (MAX_SCRIPT_ELEMENT_SIZE-50)] + [b'a'*996] + [OP_DROP]*45 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 100000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 100000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo into an segwit v1 output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000000, script_pubkey))
tx3.rehash()
# First we test this transaction against fRequireStandard=true node
# making sure the txid is added to the reject filter
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")
# Now the node will no longer ask for getdata of this transaction when advertised by same txid
self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False)
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
generatesynchronized(self.nodes[0], COINBASE_MATURITY-2, None, self.nodes)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 100000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 100000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 100000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 100000, CScript([OP_TRUE])))
(sig_hash, err) = LegacySignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 100000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
self.sync_blocks()
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
start_height = height
while height > 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
block_0 = self.nodes[0].getblock(block_hash)
block_2 = self.nodes[2].getblock(block_hash)
for key in ['hash', 'confirmations', 'strippedsize', 'size', 'weight', 'height', 'version', 'versionHex', 'merkleroot', 'hashStateRoot', 'hashUTXORoot', 'tx', 'time', 'mediantime', 'nonce', 'bits', 'difficulty', 'chainwork', 'nTx', 'previousblockhash', 'nextblockhash', 'flags', 'modifier']:
if height == start_height and key == 'nextblockhash':
continue # the chain tip won't have a nextblockhash
assert_equal(block_0[key], block_2[key])
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('qcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
if __name__ == '__main__':
SegWitTest().main()
|
py
|
1a572fa5f6ff60c55d6255eb59972a0debf0dc89
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for input_pipeline_proto_to_gviz."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorboard_plugin_profile.convert import diagnostics
from tensorboard_plugin_profile.protobuf import diagnostics_pb2
class DiagnosticsTest(tf.test.TestCase):
def test_error_simple(self):
diag = diagnostics_pb2.Diagnostics()
diag.info.append("info")
diag.warnings.append("warning")
diag.errors.append("error1")
diag.errors.append("error2")
diag_table = diagnostics.generate_diagnostics_table(diag)
# There're two columns: severity and message.
self.assertLen(diag_table.columns, 2)
self.assertEqual(4, diag_table.NumberOfRows(), "Error table has four rows.")
if __name__ == "__main__":
tf.test.main()
|
py
|
1a572fe46ffbdbd317909369a45d824df3ccb553
|
# -*- coding: utf-8 -*-
from .models import Team
from rest_framework import serializers
class TeamSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Team
fields = ('code', 'name', 'strength_defence_home', 'strength_attack_home',
'strength_overall_home', 'strength_attack_away', 'strength_defence_away',
'strength_overall_away')
|
py
|
1a57306cfbb2fded546521f925ffd6d31aca9db5
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
|
py
|
1a573128ce4b25e56a3789271edf2802dad2f7db
|
import pytest
from numbers import Number
from numpy import ndarray
from hypothesis import given
import hypothesis.strategies as hst
from qcodes.dataset.param_spec import ParamSpec
@pytest.fixture
def version_0_serializations():
sers = []
sers.append({'name': 'dmm_v1',
'paramtype': 'numeric',
'label': 'Gate v1',
'unit': 'V',
'inferred_from': [],
'depends_on': ['dac_ch1', 'dac_ch2']})
sers.append({'name': 'some_name',
'paramtype': 'array',
'label': 'My Array ParamSpec',
'unit': 'Ars',
'inferred_from': ['p1', 'p2' ],
'depends_on': []})
return sers
@pytest.fixture
def version_0_deserializations():
"""
The paramspecs that the above serializations should deserialize to
"""
ps = []
ps.append(ParamSpec('dmm_v1', paramtype='numeric', label='Gate v1',
unit='V', inferred_from=[],
depends_on=['dac_ch1', 'dac_ch2']))
ps.append(ParamSpec('some_name', paramtype='array',
label='My Array ParamSpec', unit='Ars',
inferred_from=['p1', 'p2'], depends_on=[]))
return ps
@given(name=hst.text(min_size=1),
sp1=hst.text(min_size=1), sp2=hst.text(min_size=1),
inff1=hst.text(min_size=1), inff2=hst.text(min_size=1),
paramtype=hst.lists(
elements=hst.sampled_from(['numeric', 'array', 'text']),
min_size=6, max_size=6))
def test_creation(name, sp1, sp2, inff1, inff2, paramtype):
invalid_types = ['np.array', 'ndarray', 'lala', '', Number,
ndarray, 0, None]
for inv_type in invalid_types:
with pytest.raises(ValueError):
ParamSpec(name, inv_type)
if not inff1.isidentifier():
inff1 = 'inff1'
if not sp1.isidentifier():
sp1 = 'sp1'
if not name.isidentifier():
with pytest.raises(ValueError):
ps = ParamSpec(name, paramtype[0], label=None, unit='V',
inferred_from=(inff1, inff2),
depends_on=(sp1, sp2))
name = 'name'
ps = ParamSpec(name, paramtype[1], label=None, unit='V',
inferred_from=(inff1, inff2),
depends_on=(sp1, sp2))
assert ps.inferred_from == f'{inff1}, {inff2}'
assert ps.depends_on == f'{sp1}, {sp2}'
ps1 = ParamSpec(sp1, paramtype[2])
p1 = ParamSpec(name, paramtype[3], depends_on=(ps1, sp2))
assert p1.depends_on == ps.depends_on
ps2 = ParamSpec(inff1, paramtype[4])
p2 = ParamSpec(name, paramtype[5], inferred_from=(ps2, inff2))
assert p2.inferred_from == ps.inferred_from
@given(name=hst.text(min_size=1))
def test_repr(name):
okay_types = ['array', 'numeric', 'text']
for okt in okay_types:
if name.isidentifier():
ps = ParamSpec(name, okt)
expected_repr = (f"ParamSpec('{name}', '{okt}', '', '', "
"inferred_from=[], depends_on=[])")
assert ps.__repr__() == expected_repr
else:
with pytest.raises(ValueError):
ps = ParamSpec(name, okt)
alphabet = "".join([chr(i) for i in range(ord("a"), ord("z"))])
@given(
name1=hst.text(min_size=4, alphabet=alphabet),
name2=hst.text(min_size=4, alphabet=alphabet),
name3=hst.text(min_size=4, alphabet=alphabet)
)
def test_add_depends_on(name1, name2, name3):
ps1 = ParamSpec(name1, "numeric")
ps2 = ParamSpec(name2, "numeric")
ps3 = ParamSpec(name3, "numeric")
ps1.add_depends_on([ps2, ps3])
assert ps1.depends_on == f"{ps2.name}, {ps3.name}"
@given(
name1=hst.text(min_size=4, alphabet=alphabet),
name2=hst.text(min_size=4, alphabet=alphabet),
name3=hst.text(min_size=4, alphabet=alphabet)
)
def test_add_inferred_from(name1, name2, name3):
ps1 = ParamSpec(name1, "numeric")
ps2 = ParamSpec(name2, "numeric")
ps3 = ParamSpec(name3, "numeric")
ps1.add_inferred_from([ps2, ps3])
assert ps1.inferred_from == f"{ps2.name}, {ps3.name}"
@given(
name1=hst.text(min_size=4, alphabet=alphabet),
name2=hst.text(min_size=4, alphabet=alphabet),
name3=hst.text(min_size=4, alphabet=alphabet),
)
def test_copy(name1, name2, name3):
ps_indep = ParamSpec(name1, "numeric")
ps = ParamSpec(name3, "numeric", depends_on=[ps_indep])
ps_copy = ps.copy()
att_names = ["name", "type", "label", "unit",
"_inferred_from", "_depends_on"]
attributes = {}
for att in att_names:
val = getattr(ps, att)
valc = getattr(ps_copy, att)
assert val == valc
attributes[att] = val
# Modifying the copy should not change the original
for att in att_names:
if not att.startswith('_'):
setattr(ps_copy, att, attributes[att] + "_modified")
else:
setattr(ps_copy, att, attributes[att] + ['bob'])
assert getattr(ps, att) == attributes[att]
def test_serialize():
p1 = ParamSpec('p1', 'numeric', 'paramspec one', 'no unit',
depends_on=['some', 'thing'], inferred_from=['bab', 'bob'])
ser = p1.serialize()
assert ser['name'] == p1.name
assert ser['paramtype'] == p1.type
assert ser['label'] == p1.label
assert ser['unit'] == p1.unit
assert ser['depends_on'] == p1._depends_on
assert ser['inferred_from'] == p1._inferred_from
def test_deserialize(version_0_serializations, version_0_deserializations):
for sdict, ps in zip(version_0_serializations, version_0_deserializations):
deps = ParamSpec.deserialize(sdict)
assert ps == deps
|
py
|
1a57318abba2de09806aca7fa31cf73beaf60c46
|
#
# Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
import numpy as np
from scipy.sparse import issparse
from sklearn.utils.extmath import row_norms
class PHKMeansOptimizer:
def __init__(self, n_clusters, n_features, n_samples, x, x_squared_norm):
self.n_clusters = n_clusters
self.n_features = n_features
self.n_samples = n_samples
self.x = x
self.x_squared_norm = x_squared_norm
self.sparse = issparse(x)
def init_partition(self, labels, t_size, t_centroid_sum, t_centroid_avg, t_squared_norm):
sparse = issparse(self.x)
for i in range(self.n_samples):
t = labels[i]
t_size[t] += 1
if sparse:
i_start = self.x.indptr[i]
i_end = self.x.indptr[i + 1]
v_indices = self.x.indices[i_start:i_end]
v_data = self.x.data[i_start:i_end]
t_centroid_sum[t, v_indices] += v_data
else:
t_centroid_sum[t, :] += self.x[i, :]
np.multiply(t_centroid_sum, (1 / t_size)[:, None], out=t_centroid_avg)
if sparse:
t_squared_norm[:] = row_norms(t_centroid_avg, squared=True)
else:
t_squared_norm[:] = 0
# calculate inertia
inertia = 0
for i in range(self.n_samples):
t = labels[i]
if sparse:
i_start = self.x.indptr[i]
i_end = self.x.indptr[i + 1]
v_indices = self.x.indices[i_start:i_end]
v_data = self.x.data[i_start:i_end]
inertia += (t_squared_norm[t] + self.x_squared_norm[i]
- 2 * np.dot(t_centroid_avg[t, v_indices], v_data))
else:
subtraction = t_centroid_avg[t, :] - self.x[i, :]
inertia += np.dot(subtraction, subtraction)
return inertia
def optimize(self, x_permutation, t_size, t_centroid_sum, t_centroid_avg,
t_squared_norm, labels, inertia, ref_labels=None):
return self.iterate(True, self.n_samples, self.x, self.x_squared_norm,
x_permutation, t_size, t_centroid_sum, t_centroid_avg,
t_squared_norm, labels, None, inertia, ref_labels)
def infer(self, n_samples, x, x_squared_norm, t_size, t_centroid_sum,
t_centroid_avg, t_squared_norm, labels, costs, ref_labels=None):
return self.iterate(False, n_samples, x, x_squared_norm, None, t_size,
t_centroid_sum, t_centroid_avg, t_squared_norm,
labels, costs, None, ref_labels)
def iterate(self, clustering_mode, n_samples, x, x_squared_norm, x_permutation,
t_size, t_centroid_sum, t_centroid_avg, t_squared_norm,
labels, costs, inertia, ref_labels=None):
n_changes = 0
total_cost = 0
if not self.sparse:
tmp_delta = np.empty_like(t_centroid_avg)
else:
tmp_delta = None
for i in range(n_samples):
x_id = x_permutation[i] if x_permutation is not None else i
old_t = labels[x_id]
if clustering_mode and t_size[old_t] == 1:
continue # if t is a singleton cluster we do not reduce it any further
# obtain local references
if self.sparse:
x_start = x.indptr[x_id]
x_end = x.indptr[x_id + 1]
x_indices = x.indices[x_start:x_end]
x_data = x.data[x_start:x_end]
x_squared_norm_x = x_squared_norm[x_id]
else:
x_indices = None
x_data = x[x_id, :]
x_squared_norm_x = None
if clustering_mode:
# withdraw x from its current cluster
if self.sparse:
t_centroid_sum[old_t, x_indices] -= x_data
dot_product = np.dot(x_data, t_centroid_sum[old_t, x_indices])
t_squared_norm[old_t] = (t_squared_norm[old_t] * (t_size[old_t] ** 2)
- x_squared_norm_x - 2 * dot_product) / ((t_size[old_t] - 1) ** 2)
else:
t_centroid_sum[old_t, :] -= x_data
np.multiply(t_centroid_sum[old_t, :], 1 / (t_size[old_t] - 1), out=t_centroid_avg[old_t, :])
t_size[old_t] -= 1
# select new_t
if self.sparse:
dot_product = (t_centroid_sum[:, x_indices] @ x_data) / t_size
tmp_costs = t_squared_norm + x_squared_norm_x - 2 * dot_product
else:
np.subtract(t_centroid_avg, x_data[np.newaxis, :], out=tmp_delta)
tmp_costs = (tmp_delta[:, None, :] @ tmp_delta[..., None]).ravel()
tmp_costs *= t_size / (t_size + 1)
new_t = np.argmin(tmp_costs).item()
if ref_labels is not None:
ref_t = ref_labels[x_id]
if new_t != ref_t and not np.isclose(tmp_costs[new_t], tmp_costs[ref_t]):
print("override t of cost=%.8f, with cost=%.8f" % (tmp_costs[new_t], tmp_costs[ref_t]))
new_t = ref_t
if clustering_mode:
# update membership
if self.sparse:
dot_product = np.dot(x_data, t_centroid_sum[new_t, x_indices])
t_squared_norm[new_t] = (t_squared_norm[new_t] * (t_size[new_t] ** 2)
+ x_squared_norm_x + 2 * dot_product) / ((t_size[new_t] + 1) ** 2)
t_centroid_sum[new_t, x_indices] += x_data
else:
t_centroid_sum[new_t, :] += x_data
np.multiply(t_centroid_sum[new_t, :], 1 / (t_size[new_t] + 1), out=t_centroid_avg[new_t, :])
t_size[new_t] += 1
# update stats
if new_t != old_t:
n_changes += 1
inertia -= tmp_costs[old_t] - tmp_costs[new_t]
else:
total_cost += tmp_costs[new_t]
costs[x_id, :] = tmp_costs
labels[x_id] = new_t
if clustering_mode:
return n_changes / self.n_samples if self.n_samples > 0 else 0, inertia
else:
return total_cost
|
py
|
1a5731a5c724317d68a6747f95d9ee06ab6cba3b
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The pty module handles pseudo-terminals.
Currently, the infrastructure here is only used to test llnl.util.tty.log.
If this is used outside a testing environment, we will want to reconsider
things like timeouts in ``ProcessController.wait()``, which are set to
get tests done quickly, not to avoid high CPU usage.
Note: The functionality in this module is unsupported on Windows
"""
from __future__ import print_function
import multiprocessing
import os
import re
import signal
import sys
import time
import traceback
import llnl.util.tty.log as log
from spack.util.executable import which
termios = None
try:
import termios as term_mod
termios = term_mod
except ImportError:
pass
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
This allows one process (the controller) to drive another (the
minion) similar to the way a shell would, by sending signals and I/O.
"""
def __init__(self, pid, controller_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
pid (int): id of process to control
controller_fd (int): controller fd attached to pid's stdin
timeout (int): time in seconds for wait operations to time out
(default 1 second)
sleep_time (int): time to sleep after signals, to control the
signal rate of the controller (default 1e-1)
debug (bool): whether ``horizontal_line()`` and ``status()`` should
produce output when called (default False)
``sleep_time`` allows the caller to insert delays after calls
that signal or modify the controlled process. Python behaves very
poorly if signals arrive too fast, and drowning a Python process
with a Python handler with signals can kill the process and hang
our tests, so we throttle this a closer-to-interactive rate.
"""
self.pid = pid
self.pgid = os.getpgid(pid)
self.controller_fd = controller_fd
self.timeout = timeout
self.sleep_time = sleep_time
self.debug = debug
# we need the ps command to wait for process statuses
self.ps = which("ps", required=True)
def get_canon_echo_attrs(self):
"""Get echo and canon attributes of the terminal of controller_fd."""
cfg = termios.tcgetattr(self.controller_fd)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
)
def horizontal_line(self, name):
"""Labled horizontal line for debugging."""
if self.debug:
sys.stderr.write(
"------------------------------------------- %s\n" % name
)
def status(self):
"""Print debug message with status info for the minion."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write("canon: %s, echo: %s\n" % (
"on" if canon else "off",
"on" if echo else "off",
))
sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background())
sys.stderr.write("\n")
def input_on(self):
"""True if keyboard input is enabled on the controller_fd pty."""
return self.get_canon_echo_attrs() == (False, False)
def background(self):
"""True if pgid is in a background pgroup of controller_fd's tty."""
return self.pgid != os.tcgetpgrp(self.controller_fd)
def tstp(self):
"""Send SIGTSTP to the controlled process."""
self.horizontal_line("tstp")
os.killpg(self.pgid, signal.SIGTSTP)
time.sleep(self.sleep_time)
def cont(self):
self.horizontal_line("cont")
os.killpg(self.pgid, signal.SIGCONT)
time.sleep(self.sleep_time)
def fg(self):
self.horizontal_line("fg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
time.sleep(self.sleep_time)
def bg(self):
self.horizontal_line("bg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.controller_fd, os.getpgrp())
time.sleep(self.sleep_time)
def write(self, byte_string):
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
os.write(self.controller_fd, byte_string)
def wait(self, condition):
start = time.time()
while (((time.time() - start) < self.timeout) and not condition()):
time.sleep(1e-2)
assert condition()
def wait_enabled(self):
self.wait(lambda: self.input_on() and not self.background())
def wait_disabled(self):
self.wait(lambda: not self.input_on() and self.background())
def wait_disabled_fg(self):
self.wait(lambda: not self.input_on() and not self.background())
def proc_status(self):
status = self.ps("-p", str(self.pid), "-o", "stat", output=str)
status = re.split(r"\s+", status.strip(), re.M)
return status[1]
def wait_stopped(self):
self.wait(lambda: "T" in self.proc_status())
def wait_running(self):
self.wait(lambda: "T" not in self.proc_status())
class PseudoShell(object):
"""Sets up controller and minion processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some
function responds to terminal input. This is a pseudo-shell from a
job control perspective; ``controller_function`` and ``minion_function``
are set up with a pseudoterminal (pty) so that the controller can drive
the minion through process control signals and I/O.
The two functions should have signatures like this::
def controller_function(proc, ctl, **kwargs)
def minion_function(**kwargs)
``controller_function`` is spawned in its own process and passed three
arguments:
proc
the ``multiprocessing.Process`` object representing the minion
ctl
a ``ProcessController`` object tied to the minion
kwargs
keyword arguments passed from ``PseudoShell.start()``.
``minion_function`` is only passed ``kwargs`` delegated from
``PseudoShell.start()``.
The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
``sys.stdin`` in the minion process. Both processes will share the
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
``PseudoShell``.
Here are the relationships between processes created::
._________________________________________________________.
| Minion Process | pid 2
| - runs minion_function | pgroup 2
|_________________________________________________________| session 1
^
| create process with controller_fd connected to stdin
| stdout, stderr are the same as caller
._________________________________________________________.
| Controller Process | pid 1
| - runs controller_function | pgroup 1
| - uses ProcessController and controller_fd to | session 1
| control minion |
|_________________________________________________________|
^
| create process
| stdin, stdout, stderr are the same as caller
._________________________________________________________.
| Caller | pid 0
| - Constructs, starts, joins PseudoShell | pgroup 0
| - provides controller_function, minion_function | session 0
|_________________________________________________________|
"""
def __init__(self, controller_function, minion_function):
self.proc = None
self.controller_function = controller_function
self.minion_function = minion_function
# these can be optionally set to change defaults
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
"""Start the controller and minion processes.
Arguments:
kwargs (dict): arbitrary keyword arguments that will be
passed to controller and minion functions
The controller process will create the minion, then call
``controller_function``. The minion process will call
``minion_function``.
"""
self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function,
args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
def join(self):
"""Wait for the minion process to finish, and return its exit code."""
self.proc.join()
return self.proc.exitcode
@staticmethod
def _set_up_and_run_minion_function(
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
"""Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
``minion_function``.
"""
# new process group, like a command or pipeline launched by a shell
os.setpgrp()
# take controlling terminal and set up pty IO
stdin_fd = os.open(tty_name, os.O_RDWR)
os.dup2(stdin_fd, sys.stdin.fileno())
os.dup2(stdout_fd, sys.stdout.fileno())
os.dup2(stderr_fd, sys.stderr.fileno())
os.close(stdin_fd)
if kwargs.get("debug"):
sys.stderr.write(
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
sys.stderr.write("minion: ready!\n")
ready.value = True
try:
minion_function(**kwargs)
except BaseException:
traceback.print_exc()
@staticmethod
def _set_up_and_run_controller_function(
controller_function, minion_function, controller_timeout,
sleep_time, **kwargs):
"""Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls
``controller_function``.
"""
os.setsid() # new session; this process is the controller
controller_fd, minion_fd = os.openpty()
pty_name = os.ttyname(minion_fd)
# take controlling terminal
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value('i', False)
minion_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, minion_function),
kwargs=kwargs,
)
minion_process.start()
# wait for subprocess to be running and connected.
while not ready.value:
time.sleep(1e-5)
pass
if kwargs.get("debug"):
sys.stderr.write("pid: %d\n" % os.getpid())
sys.stderr.write("pgid: %d\n" % os.getpgrp())
sys.stderr.write("sid: %d\n" % os.getsid(0))
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
sys.stderr.write("\n")
minion_pgid = os.getpgid(minion_process.pid)
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
sys.stderr.write(
"minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
# set up controller to ignore SIGTSTP, like a shell
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
# call the controller function once the minion is ready
try:
controller = ProcessController(
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
error = controller_function(minion_process, controller, **kwargs)
except BaseException:
error = 1
traceback.print_exc()
minion_process.join()
# return whether either the parent or minion failed
return error or minion_process.exitcode
|
py
|
1a5732a7831b2a26b30771508e479d7a9d0cbbee
|
from easypaystack.getters import Getters
from easypaystack.transfers import Transfer
from easypaystack.verification import PaystackVerification
name="easypaystack"
|
py
|
1a573342fec40705e86e73e248814ecefb4acd3c
|
from dankerize.dankerize import dankerize
|
py
|
1a5733ca283446e08ccfdfa995f03575c1785222
|
# MIT License
#
# Copyright (c) 2018 Mahmoud Aslan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
def cyclic_learning_rate(global_step,
learning_rate=0.01,
max_lr=0.1,
step_size=20.,
gamma=0.99994,
mode='triangular',
name=None):
"""Applies cyclic learning rate (CLR).
From the paper:
Smith, Leslie N. "Cyclical learning
rates for training neural networks." 2017.
[https://arxiv.org/pdf/1506.01186.pdf]
This method lets the learning rate cyclically
vary between reasonable boundary values
achieving improved classification accuracy and
often in fewer iterations.
This code varies the learning rate linearly between the
minimum (learning_rate) and the maximum (max_lr).
It returns the cyclic learning rate. It is computed as:
```python
cycle = floor( 1 + global_step /
( 2 * step_size ) )
x = abs( global_step / step_size – 2 * cycle + 1 )
clr = learning_rate +
( max_lr – learning_rate ) * max( 0 , 1 - x )
```
Polices:
'triangular':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle.
'triangular2':
The same as the triangular policy except the learning
rate difference is cut in half at the end of each cycle.
This means the learning rate difference drops after each cycle.
'exp_range':
The learning rate varies between the minimum and maximum
boundaries and each boundary value declines by an exponential
factor of: gamma^global_step.
Example: 'triangular2' mode cyclic learning rate.
'''python
...
global_step = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=
clr.cyclic_learning_rate(global_step=global_step, mode='triangular2'))
train_op = optimizer.minimize(loss_op, global_step=global_step)
...
with tf.Session() as sess:
sess.run(init)
for step in range(1, num_steps+1):
assign_op = global_step.assign(step)
sess.run(assign_op)
...
'''
Args:
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the cyclic computation. Must not be negative.
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate which is the lower bound
of the cycle (default = 0.1).
max_lr: A scalar. The maximum learning rate boundary.
step_size: A scalar. The number of iterations in half a cycle.
The paper suggests step_size = 2-8 x training iterations in epoch.
gamma: constant in 'exp_range' mode:
gamma**(global_step)
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
name: String. Optional name of the operation. Defaults to
'CyclicLearningRate'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The cyclic
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns
a function which in turn returns the decayed learning
rate Tensor. This can be useful for changing the learning
rate value across different invocations of optimizer functions.
@end_compatibility
"""
if global_step is None:
raise ValueError("global_step is required for cyclic_learning_rate.")
with ops.name_scope(name, "CyclicLearningRate",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
step_size = math_ops.cast(step_size, dtype)
def cyclic_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
# computing: cycle = floor( 1 + global_step / ( 2 * step_size ) )
double_step = math_ops.multiply(2., step_size)
global_div_double_step = math_ops.divide(global_step, double_step)
cycle = math_ops.floor(math_ops.add(1., global_div_double_step))
# computing: x = abs( global_step / step_size – 2 * cycle + 1 )
double_cycle = math_ops.multiply(2., cycle)
global_div_step = math_ops.divide(global_step, step_size)
tmp = math_ops.subtract(global_div_step, double_cycle)
x = math_ops.abs(math_ops.add(1., tmp))
# computing: clr = learning_rate + ( max_lr – learning_rate ) * max( 0, 1 - x )
a1 = math_ops.maximum(0., math_ops.subtract(1., x))
a2 = math_ops.subtract(max_lr, learning_rate)
clr = math_ops.multiply(a1, a2)
if mode == 'triangular2':
clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast(
cycle - 1, tf.int32)), tf.float32))
if mode == 'exp_range':
clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr)
return math_ops.add(clr, learning_rate, name=name)
if not context.executing_eagerly():
cyclic_lr = cyclic_lr()
return cyclic_lr
|
py
|
1a573496495033e0850b92dfa40e745fe8927506
|
from unittest import TestCase
from mock import Mock, patch
from data import FlixsterMovieDetails, Actor, RottenTomatoesMovieDetails, Movie
from data.parsers.movies import get_flixster_movie_details, get_rotten_tomatoes_movie_details, parse_actors, \
parse_release_date, parse_trailer_url, parse_flixster_movie_details, parse_actor, parse_rotten_tomatoes_movie_details, \
parse_movie
class TestGetFlixsterDetails(TestCase):
def test_returns_none_for_missing_details(self):
self.assertIsNone(get_flixster_movie_details({}))
@patch("data.parsers.movies.parse_flixster_movie_details")
def test_returns_parsed_flixster_movie_details(self, mocked_details_parser):
details = "details"
mocked_details_parser.return_value = details
self.assertEqual(details, get_flixster_movie_details({"flixster": "foo"}))
class TestGetRottenTomatoesMovieDetails(TestCase):
def test_returns_none_for_missing_details(self):
self.assertIsNone(get_rotten_tomatoes_movie_details({}))
@patch("data.parsers.movies.parse_rotten_tomatoes_movie_details")
def test_returns_parsed_rotten_tomatoes_movie_details(self, mocked_details_parser):
details = "details"
mocked_details_parser.return_value = details
self.assertEqual(details, get_rotten_tomatoes_movie_details({"rottenTomatoes": "foo"}))
class TestGetReleaseDate(TestCase):
def test_returns_none_for_empty_release_date(self):
self.assertIsNone(parse_release_date(""))
@patch("dateutil.parser.parse")
def test_returns_parsed_date(self, mocked_date_parser):
parsed_date = "parsed date"
mocked_date_parser.return_value = parsed_date
self.assertEqual(parsed_date, parse_release_date("foo"))
class TestGetActors(TestCase):
@patch("data.parsers.movies.parse_actor")
def test_returns_actors(self, mocked_actors_parser):
parsed_actor = "parsed actor"
mocked_actors_parser.return_value = parsed_actor
expected = [parsed_actor, parsed_actor]
self.assertEqual(expected, parse_actors([1, 2]))
class TestGetTrailerUrl(TestCase):
def test_returns_none_for_empty_hd_trailer(self):
self.assertIsNone(parse_trailer_url({}))
def test_returns_hd_trailer(self):
self.assertEqual("foo", parse_trailer_url({"hd": "foo"}))
class TestParseFlixsterMovieDetails(TestCase):
average = "average"
not_interested_count = "not interested count"
likability_score = "likability score"
scores_count = "scores count"
want_to_see_count = "want to see count"
popcorn_score = "popcorn score"
movie_details = {
"average": average,
"numNotInterested": not_interested_count,
"likeability": likability_score,
"numScores": scores_count,
"numWantToSee": want_to_see_count,
"popcornScore": popcorn_score
}
expected = FlixsterMovieDetails(average_rating=average, not_interested_count=not_interested_count,
likability_score=likability_score, scores_count=scores_count,
want_to_see_count=want_to_see_count, popcorn_score=popcorn_score)
def test_parses_successfully(self):
self.assertEqual(self.expected, parse_flixster_movie_details(self.movie_details))
class TestParseActor(TestCase):
id = "id"
name = "name"
url = "url"
actor_details = {
"id": id,
"name": name,
"url": url
}
expected = Actor(fid=id, name=name, url=url)
def test_parses_successfully(self):
self.assertEqual(self.expected, parse_actor(self.actor_details))
class TestParseRottenTomatoesMovieDetails(TestCase):
rating = "rating"
is_certified_fresh = "certified fresh"
consensus = "consensus"
movie_details = {
"rating": rating,
"certifiedFresh": is_certified_fresh,
"consensus": consensus
}
expected = RottenTomatoesMovieDetails(rating=rating, is_certified_fresh=is_certified_fresh, consensus=consensus)
@patch("data.parsers.movies.clean_html")
def test_parses_successfully(self, mocked_html_cleaner):
mocked_html_cleaner.return_value = self.consensus
self.assertEqual(self.expected, parse_rotten_tomatoes_movie_details(self.movie_details))
class TestParseMovie(TestCase):
id = "id"
release_date = "release date"
title = "title"
mpaa_rating = "mpaa rating"
run_time = "run time"
is_live = "is live"
is_opening = "is opening"
trailer_url = "trailer url"
actors = "actors"
flixster_movie_details = "flixster movie details"
rotten_tomatoes_movie_details = "rotten tomatoes movie details"
reviews = "reviews"
movie_details = {
"id": id,
"releaseDate": release_date,
"title": title,
"mpaa": mpaa_rating,
"runningTime": run_time,
"isLive": is_live,
"isOpening": is_opening,
"trailer": trailer_url,
"actors": actors,
"reviews": reviews
}
@patch("data.parsers.movies.parse_release_date")
@patch("data.parsers.movies.parse_trailer_url")
@patch("data.parsers.movies.parse_actors")
@patch("data.parsers.movies.get_flixster_movie_details")
@patch("data.parsers.movies.get_rotten_tomatoes_movie_details")
def test_parses_successfully(self, mocked_rotten_tomatoes_movie_details, mocked_flixster_movie_details, mocked_actors, mocked_trailer_url, mocked_release_date):
rotten_tomatoes_movie_details = "mocked rotten tomatoes movie details"
flixster_movie_details = "mocked flixster movie details"
actors = "mocked actors"
trailer_url = "mocked trailer url"
release_date = "mocked release date"
mocked_rotten_tomatoes_movie_details.return_value = rotten_tomatoes_movie_details
mocked_flixster_movie_details.return_value = flixster_movie_details
mocked_actors.return_value = actors
mocked_trailer_url.return_value = trailer_url
mocked_release_date.return_value = release_date
expected = Movie(fid=self.id, release_date=release_date, title=self.title, mpaa_rating=self.mpaa_rating,
run_time=self.run_time, is_live=self.is_live, is_opening=self.is_opening, trailer_url=trailer_url,
actors=actors, flixster_movie_details=flixster_movie_details, rotten_tomatoes_movie_details=rotten_tomatoes_movie_details)
self.assertEqual(expected, parse_movie(self.movie_details))
|
py
|
1a5734dea82122b77c95351190ebbe095b1b7d51
|
# -*- coding=utf -*-
from __future__ import absolute_import
from cubes.browser import *
from cubes.errors import *
from cubes.model import *
from .store import DEFAULT_TIME_HIERARCHY
from .utils import *
from collections import defaultdict
from datetime import datetime
import pytz
class _MixpanelResponseAggregator(object):
def __init__(self, browser, responses, aggregate_names, drilldown, split,
actual_time_level):
"""Aggregator for multiple mixpanel responses (multiple dimensions)
with drill-down post-aggregation.
Arguments:
* `browser` – owning browser
* `reposnes` – mixpanel responses by `measure_names`
* `aggregate_names` – list of collected measures
* `drilldown` – a `Drilldown` object from the browser aggregation
query
* `split` - a split Cell object from the browser aggregation query
Object attributes:
* `aggregate_names` – list of measure names from the response
* `aggregate_data` – a dictionary where keys are measure names and
values are actual data points.
* `time_cells` – an ordered dictionary of collected cells from the
response. Key is time path, value is cell contents without the time
dimension.
"""
self.browser = browser
self.logger = browser.logger
self.drilldown = drilldown
self.aggregate_names = aggregate_names
self.actual_time_level = actual_time_level
# Extract the data
self.aggregate_data = {}
for aggregate in aggregate_names:
self.aggregate_data = responses[aggregate]["data"]["values"]
# Get time drilldown levels, if we are drilling through time
time_drilldowns = drilldown.drilldown_for_dimension("time")
if time_drilldowns:
time_drilldown = time_drilldowns[0]
self.last_time_level = str(time_drilldown.levels[-1])
self.time_levels = ["time."+str(l) for l in time_drilldown.levels]
self.time_hierarchy = str(time_drilldown.hierarchy)
else:
time_drilldown = None
self.last_time_level = None
self.time_levels = []
self.time_hierarchy = DEFAULT_TIME_HIERARCHY
self.drilldown_on = None
for obj in drilldown:
if obj.dimension.name != "time":
# this is a DrilldownItem object. represent it as 'dim.level' or just 'dim' if flat
self.drilldown_on = ( "%s.%s" % (obj.dimension.name, obj.levels[-1].name) ) if ( not obj.dimension.is_flat ) else obj.dimension.name
self.drilldown_on_value_func = lambda x: x
if self.drilldown_on is None and split:
self.drilldown_on = SPLIT_DIMENSION_NAME
self.drilldown_on_value_func = lambda x: True if x == "true" else False
# Time-keyed cells:
# (time_path, group) -> dictionary
self.time_cells = {}
self.cells = []
# Do it:
#
# Collect, Map&Reduce, Order
# ==========================
#
# Process the response. The methods are operating on the instance
# variable `time_cells`
self._collect_cells()
# TODO: handle week
if actual_time_level != self.last_time_level:
self._reduce_cells()
self._finalize_cells()
# Result is stored in the `cells` instance variable.
def _collect_cells(self):
for aggregate in self.aggregate_names:
self._collect_aggregate_cells(aggregate)
def _collect_aggregate_cells(self, aggregate):
"""Collects the cells from the response in a time series dictionary
`time_cells` where keys are tuples: `(time_path, group)`. `group` is
drill-down key value for the cell, such as `New York` for `city`."""
# Note: For no-drilldown this would be only one pass and group will be
# a cube name
# TODO: To add multiple drill-down dimensions in the future, add them
# to the `group` part of the key tuple
for group_key, group_series in self.aggregate_data.items():
for time_key, value in group_series.items():
time_path = time_to_path(time_key, self.last_time_level,
self.time_hierarchy)
key = (time_path, group_key)
# self.logger.debug("adding cell %s" % (key, ))
cell = self.time_cells.setdefault(key, {})
cell[aggregate] = value
# FIXME: do this only on drilldown
if self.drilldown_on:
cell[self.drilldown_on] = group_key
def _reduce_cells(self):
"""Reduce the cells according to the time dimensions."""
def reduce_cell(result, cell):
# We assume only _sum aggergation
# All measures should be prepared so we can to this
for aggregate in self.aggregate_names:
result[aggregate] = result.get(aggregate, 0) + \
cell.get(aggregate, 0)
return result
# 1. Map cells to reduced time path
#
reduced_map = defaultdict(list)
reduced_len = len(self.time_levels)
for key, cell in self.time_cells.items():
time_path = key[0]
reduced_path = time_path[0:reduced_len]
reduced_key = (reduced_path, key[1])
# self.logger.debug("reducing %s -> %s" % (key, reduced_key))
reduced_map[reduced_key].append(cell)
self.browser.logger.debug("response cell count: %s reduced to: %s" %
(len(self.time_cells), len(reduced_map)))
# 2. Reduce the cells
#
# See the function reduce_cell() above for aggregation:
#
reduced_cells = {}
for key, cells in reduced_map.items():
# self.browser.logger.debug("Reducing: %s -> %s" % (key, cells))
cell = reduce(reduce_cell, cells, {})
reduced_cells[key] = cell
self.time_cells = reduced_cells
def _finalize_cells(self):
"""Orders the `time_cells` according to the time and "the other"
dimension and puts the result into the `cells` instance variable.
This method also adds the time dimension keys."""
# Order by time (as path) and then drilldown dimension value (group)
# The key[0] is a list of paths: time, another_drilldown
order = lambda left, right: cmp(left[0], right[0])
cells = self.time_cells.items()
cells.sort(order)
# compute the current datetime, convert to path
current_time_path = time_to_path(
pytz.timezone('UTC').localize(datetime.utcnow()).astimezone(self.browser.timezone).strftime("%Y-%m-%d %H:00:00"),
self.last_time_level,
self.time_hierarchy)
self.cells = []
for key, cell in cells:
# If we are aggregating at finer granularity than "all":
time_key = key[0]
if time_key:
# if time_key ahead of current time path, discard
if time_key > current_time_path:
continue
cell.update(zip(self.time_levels, time_key))
# append the drilldown_on attribute ref
if self.drilldown_on:
cell[self.drilldown_on] = self.drilldown_on_value_func(key[1])
self.cells.append(cell)
|
py
|
1a5736ad80d394579a3335d09a1dc9ce0b527b5b
|
# -*- coding: utf-8 -*-
"""Test that models can be executed."""
import importlib
import os
import unittest
from typing import Optional
import numpy
import torch
import pykeen.experiments
import pykeen.models
from pykeen.models import (
ERModel, EntityEmbeddingModel, EntityRelationEmbeddingModel, Model, MultimodalModel, _MODELS,
_NewAbstractModel, _OldAbstractModel, model_resolver,
)
from pykeen.models.predict import get_novelty_mask, predict
from pykeen.models.unimodal.trans_d import _project_entity
from pykeen.nn import Embedding
from pykeen.utils import all_in_bounds, clamp_norm, extend_batch
from tests import cases
from tests.constants import EPSILON
SKIP_MODULES = {
Model.__name__,
_OldAbstractModel.__name__,
_NewAbstractModel.__name__,
'DummyModel',
MultimodalModel.__name__,
EntityEmbeddingModel.__name__,
EntityRelationEmbeddingModel.__name__,
ERModel.__name__,
'MockModel',
'SimpleInteractionModel',
}
for cls in MultimodalModel.__subclasses__():
SKIP_MODULES.add(cls.__name__)
class TestComplex(cases.ModelTestCase):
"""Test the ComplEx model."""
model_cls = pykeen.models.ComplEx
class TestConvE(cases.ModelTestCase):
"""Test the ConvE model."""
model_cls = pykeen.models.ConvE
embedding_dim = 12
create_inverse_triples = True
model_kwargs = {
'output_channels': 2,
'embedding_height': 3,
'embedding_width': 4,
}
# 3x batch norm: bias + scale --> 6
# entity specific bias --> 1
# ==================================
# 7
num_constant_init = 7
class TestConvKB(cases.ModelTestCase):
"""Test the ConvKB model."""
model_cls = pykeen.models.ConvKB
model_kwargs = {
'num_filters': 2,
}
# two bias terms, one conv-filter
num_constant_init = 3
class TestDistMult(cases.ModelTestCase):
"""Test the DistMult model."""
model_cls = pykeen.models.DistMult
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.model.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
def _test_score_all_triples(self, k: Optional[int], batch_size: int = 16):
"""Test score_all_triples.
:param k: The number of triples to return. Set to None, to keep all.
:param batch_size: The batch size to use for calculating scores.
"""
top_triples, top_scores = predict(model=self.model, batch_size=batch_size, k=k)
# check type
assert torch.is_tensor(top_triples)
assert torch.is_tensor(top_scores)
assert top_triples.dtype == torch.long
assert top_scores.dtype == torch.float32
# check shape
actual_k, n_cols = top_triples.shape
assert n_cols == 3
if k is None:
assert actual_k == self.factory.num_entities ** 2 * self.factory.num_relations
else:
assert actual_k == min(k, self.factory.num_triples)
assert top_scores.shape == (actual_k,)
# check ID ranges
assert (top_triples >= 0).all()
assert top_triples[:, [0, 2]].max() < self.model.num_entities
assert top_triples[:, 1].max() < self.model.num_relations
def test_score_all_triples(self):
"""Test score_all_triples with a large batch size."""
# this is only done in one of the models
self._test_score_all_triples(k=15, batch_size=16)
def test_score_all_triples_singleton_batch(self):
"""Test score_all_triples with a batch size of 1."""
self._test_score_all_triples(k=15, batch_size=1)
def test_score_all_triples_large_batch(self):
"""Test score_all_triples with a batch size larger than k."""
self._test_score_all_triples(k=10, batch_size=16)
def test_score_all_triples_keep_all(self):
"""Test score_all_triples with k=None."""
# this is only done in one of the models
self._test_score_all_triples(k=None)
class TestERMLP(cases.ModelTestCase):
"""Test the ERMLP model."""
model_cls = pykeen.models.ERMLP
model_kwargs = {
'hidden_dim': 4,
}
# Two linear layer biases
num_constant_init = 2
class TestERMLPE(cases.ModelTestCase):
"""Test the extended ERMLP model."""
model_cls = pykeen.models.ERMLPE
model_kwargs = {
'hidden_dim': 4,
}
# Two BN layers, bias & scale
num_constant_init = 4
class TestHolE(cases.ModelTestCase):
"""Test the HolE model."""
model_cls = pykeen.models.HolE
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have at most unit L2 norm.
"""
assert all_in_bounds(self.model.entity_embeddings(indices=None).norm(p=2, dim=-1), high=1., a_tol=EPSILON)
class TestKG2EWithKL(cases.BaseKG2ETest):
"""Test the KG2E model with KL similarity."""
model_kwargs = {
'dist_similarity': 'KL',
}
class TestMuRE(cases.ModelTestCase):
"""Test the MuRE model."""
model_cls = pykeen.models.MuRE
num_constant_init = 2 # biases
class TestKG2EWithEL(cases.BaseKG2ETest):
"""Test the KG2E model with EL similarity."""
model_kwargs = {
'dist_similarity': 'EL',
}
class TestNTNLowMemory(cases.BaseNTNTest):
"""Test the NTN model with automatic memory optimization."""
model_kwargs = {
'num_slices': 2,
}
training_loop_kwargs = {
'automatic_memory_optimization': True,
}
class TestNTNHighMemory(cases.BaseNTNTest):
"""Test the NTN model without automatic memory optimization."""
model_kwargs = {
'num_slices': 2,
}
training_loop_kwargs = {
'automatic_memory_optimization': False,
}
class TestProjE(cases.ModelTestCase):
"""Test the ProjE model."""
model_cls = pykeen.models.ProjE
class TestRESCAL(cases.ModelTestCase):
"""Test the RESCAL model."""
model_cls = pykeen.models.RESCAL
class TestRGCNBasis(cases.BaseRGCNTest):
"""Test the R-GCN model."""
model_kwargs = {
'interaction': "transe",
'interaction_kwargs': dict(p=1),
'decomposition': "bases",
"decomposition_kwargs": dict(
num_bases=3,
),
}
#: one bias per layer
num_constant_init = 2
class TestRGCNBlock(cases.BaseRGCNTest):
"""Test the R-GCN model with block decomposition."""
embedding_dim = 6
model_kwargs = {
'interaction': "distmult",
'decomposition': "block",
"decomposition_kwargs": dict(
num_blocks=3,
),
'edge_weighting': "symmetric",
'use_batch_norm': True,
}
#: (scale & bias for BN) * layers
num_constant_init = 4
class TestRotatE(cases.ModelTestCase):
"""Test the RotatE model."""
model_cls = pykeen.models.RotatE
def _check_constraints(self):
"""Check model constraints.
Relation embeddings' entries have to have absolute value 1 (i.e. represent a rotation in complex plane)
"""
relation_abs = (
self.model
.relation_embeddings(indices=None)
.view(self.factory.num_relations, -1, 2)
.norm(p=2, dim=-1)
)
assert torch.allclose(relation_abs, torch.ones_like(relation_abs))
class TestSimplE(cases.ModelTestCase):
"""Test the SimplE model."""
model_cls = pykeen.models.SimplE
class _BaseTestSE(cases.ModelTestCase):
"""Test the Structured Embedding model."""
model_cls = pykeen.models.StructuredEmbedding
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
norms = self.model.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(norms, torch.ones_like(norms))
class TestSELowMemory(_BaseTestSE):
"""Tests SE with low memory."""
training_loop_kwargs = {
'automatic_memory_optimization': True,
}
class TestSEHighMemory(_BaseTestSE):
"""Tests SE with low memory."""
training_loop_kwargs = {
'automatic_memory_optimization': False,
}
class TestTransD(cases.DistanceModelTestCase):
"""Test the TransD model."""
model_cls = pykeen.models.TransD
model_kwargs = {
'relation_dim': 4,
}
def _check_constraints(self):
"""Check model constraints.
Entity and relation embeddings have to have at most unit L2 norm.
"""
for emb in (self.model.entity_embeddings, self.model.relation_embeddings):
assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1., a_tol=EPSILON)
def test_score_hrt_manual(self):
"""Manually test interaction function of TransD."""
# entity embeddings
weights = torch.as_tensor(data=[[2., 2.], [4., 4.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.model.entity_embeddings = entity_embeddings
projection_weights = torch.as_tensor(data=[[3., 3.], [2., 2.]], dtype=torch.float)
entity_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_projection_embeddings._embeddings.weight.data.copy_(projection_weights)
self.model.entity_projections = entity_projection_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[4.], [4.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=1,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[5.], [3.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=1,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 1]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
self.assertAlmostEqual(first_score, -16, delta=0.01)
# Use different dimension for relation embedding: relation_dim > entity_dim
# relation embeddings
relation_weights = torch.as_tensor(data=[[3., 3., 3.], [3., 3., 3.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[4., 4., 4.], [4., 4., 4.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertAlmostEqual(scores.item(), -27, delta=0.01)
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 0]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
second_score = scores[1].item()
self.assertAlmostEqual(first_score, -27, delta=0.01)
self.assertAlmostEqual(second_score, -27, delta=0.01)
# Use different dimension for relation embedding: relation_dim < entity_dim
# entity embeddings
weights = torch.as_tensor(data=[[1., 1., 1.], [1., 1., 1.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.model.entity_embeddings = entity_embeddings
projection_weights = torch.as_tensor(data=[[2., 2., 2.], [2., 2., 2.]], dtype=torch.float)
entity_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
entity_projection_embeddings._embeddings.weight.data.copy_(projection_weights)
self.model.entity_projections = entity_projection_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[3., 3.], [3., 3.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[4., 4.], [4., 4.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 0]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
second_score = scores[1].item()
self.assertAlmostEqual(first_score, -18, delta=0.01)
self.assertAlmostEqual(second_score, -18, delta=0.01)
def test_project_entity(self):
"""Test _project_entity."""
# random entity embeddings & projections
e = torch.rand(1, self.model.num_entities, self.embedding_dim, generator=self.generator)
e = clamp_norm(e, maxnorm=1, p=2, dim=-1)
e_p = torch.rand(1, self.model.num_entities, self.embedding_dim, generator=self.generator)
# random relation embeddings & projections
r = torch.rand(self.batch_size, 1, self.model.relation_dim, generator=self.generator)
r = clamp_norm(r, maxnorm=1, p=2, dim=-1)
r_p = torch.rand(self.batch_size, 1, self.model.relation_dim, generator=self.generator)
# project
e_bot = _project_entity(e=e, e_p=e_p, r=r, r_p=r_p)
# check shape:
assert e_bot.shape == (self.batch_size, self.model.num_entities, self.model.relation_dim)
# check normalization
assert (torch.norm(e_bot, dim=-1, p=2) <= 1.0 + 1.0e-06).all()
class TestTransE(cases.DistanceModelTestCase):
"""Test the TransE model."""
model_cls = pykeen.models.TransE
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.model.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
class TestTransH(cases.DistanceModelTestCase):
"""Test the TransH model."""
model_cls = pykeen.models.TransH
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.model.normal_vector_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
class TestTransR(cases.DistanceModelTestCase):
"""Test the TransR model."""
model_cls = pykeen.models.TransR
model_kwargs = {
'relation_dim': 4,
}
def test_score_hrt_manual(self):
"""Manually test interaction function of TransR."""
# entity embeddings
weights = torch.as_tensor(data=[[2., 2.], [3., 3.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.model.entity_embeddings = entity_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[4., 4], [5., 5.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.model.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[5., 5., 6., 6.], [7., 7., 8., 8.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=4,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.model.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 1]], dtype=torch.long)
scores = self.model.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
# second_score = scores[1].item()
self.assertAlmostEqual(first_score, -32, delta=0.01)
def _check_constraints(self):
"""Check model constraints.
Entity and relation embeddings have to have at most unit L2 norm.
"""
for emb in (self.model.entity_embeddings, self.model.relation_embeddings):
assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1., a_tol=1.0e-06)
class TestTuckEr(cases.ModelTestCase):
"""Test the TuckEr model."""
model_cls = pykeen.models.TuckER
model_kwargs = {
'relation_dim': 4,
}
#: 2xBN (bias & scale)
num_constant_init = 4
class TestUM(cases.DistanceModelTestCase):
"""Test the Unstructured Model."""
model_cls = pykeen.models.UnstructuredModel
class TestTesting(unittest.TestCase):
"""Yo dawg, I heard you like testing, so I wrote a test to test the tests so you can test while you're testing."""
def test_documentation(self):
"""Test all models have appropriate structured documentation."""
for name, model_cls in sorted(model_resolver.lookup_dict.items()):
with self.subTest(name=name):
try:
docdata = model_cls.__docdata__
except AttributeError:
self.fail('missing __docdata__')
self.assertIn('citation', docdata)
self.assertIn('author', docdata['citation'])
self.assertIn('link', docdata['citation'])
self.assertIn('year', docdata['citation'])
def test_testing(self):
"""Check that there's a test for all models.
For now, this is excluding multimodel models. Not sure how to test those yet.
"""
model_names = {
model_cls.__name__
for model_cls in model_resolver.lookup_dict.values()
if not issubclass(model_cls, ERModel)
}
model_names -= SKIP_MODULES
tested_model_names = {
value.model_cls.__name__
for name, value in globals().items()
if (
isinstance(value, type)
and issubclass(value, cases.ModelTestCase)
and not name.startswith('_')
and not issubclass(value.model_cls, (ERModel, MultimodalModel))
)
}
tested_model_names -= SKIP_MODULES
self.assertEqual(model_names, tested_model_names, msg='Some models have not been tested')
def test_importing(self):
"""Test that all models are available from :mod:`pykeen.models`."""
models_path = os.path.abspath(os.path.dirname(pykeen.models.__file__))
model_names = set()
for directory, _, filenames in os.walk(models_path):
for filename in filenames:
if not filename.endswith('.py'):
continue
path = os.path.join(directory, filename)
relpath = os.path.relpath(path, models_path)
if relpath.endswith('__init__.py'):
continue
import_path = 'pykeen.models.' + relpath[:-len('.py')].replace(os.sep, '.')
module = importlib.import_module(import_path)
for name in dir(module):
value = getattr(module, name)
if (
isinstance(value, type)
and issubclass(value, Model)
):
model_names.add(value.__name__)
star_model_names = _remove_non_models(set(pykeen.models.__all__) - SKIP_MODULES)
model_names = _remove_non_models(model_names - SKIP_MODULES)
self.assertEqual(model_names, star_model_names, msg='Forgot to add some imports')
def test_models_have_experiments(self):
"""Test that each model has an experiment folder in :mod:`pykeen.experiments`."""
experiments_path = os.path.abspath(os.path.dirname(pykeen.experiments.__file__))
experiment_blacklist = {
'DistMultLiteral', # FIXME
'ComplExLiteral', # FIXME
'UnstructuredModel',
'StructuredEmbedding',
'RESCAL',
'NTN',
'ERMLP',
'ProjE', # FIXME
'ERMLPE', # FIXME
'PairRE',
}
model_names = _remove_non_models(set(pykeen.models.__all__) - SKIP_MODULES - experiment_blacklist)
for model in _remove_non_models(model_names):
with self.subTest(model=model):
self.assertTrue(
os.path.exists(os.path.join(experiments_path, model.lower())),
msg=f'Missing experimental configuration for {model}',
)
def _remove_non_models(elements):
rv = set()
for element in elements:
try:
model_resolver.lookup(element)
except ValueError: # invalid model name - aka not actually a model
continue
else:
rv.add(element)
return rv
class TestModelUtilities(unittest.TestCase):
"""Extra tests for utility functions."""
def test_abstract(self):
"""Test that classes are checked as abstract properly."""
self.assertTrue(EntityEmbeddingModel._is_base_model)
self.assertTrue(EntityRelationEmbeddingModel._is_base_model)
self.assertTrue(MultimodalModel._is_base_model)
for model_cls in _MODELS:
self.assertFalse(
model_cls._is_base_model,
msg=f'{model_cls.__name__} should not be marked as a a base model',
)
def test_get_novelty_mask(self):
"""Test `get_novelty_mask()`."""
num_triples = 7
base = torch.arange(num_triples)
mapped_triples = torch.stack([base, base, 3 * base], dim=-1)
query_ids = torch.randperm(num_triples).numpy()[:num_triples // 2]
exp_novel = query_ids != 0
col = 2
other_col_ids = numpy.asarray([0, 0])
mask = get_novelty_mask(
mapped_triples=mapped_triples,
query_ids=query_ids,
col=col,
other_col_ids=other_col_ids,
)
assert mask.shape == query_ids.shape
assert (mask == exp_novel).all()
def test_extend_batch(self):
"""Test `_extend_batch()`."""
batch = torch.tensor([[a, b] for a in range(3) for b in range(4)]).view(-1, 2)
all_ids = [2 * i for i in range(5)]
batch_size = batch.shape[0]
num_choices = len(all_ids)
for dim in range(3):
h_ext_batch = extend_batch(batch=batch, all_ids=all_ids, dim=dim)
# check shape
assert h_ext_batch.shape == (batch_size * num_choices, 3)
# check content
actual_content = set(tuple(map(int, hrt)) for hrt in h_ext_batch)
exp_content = set()
for i in all_ids:
for b in batch:
c = list(map(int, b))
c.insert(dim, i)
exp_content.add(tuple(c))
assert actual_content == exp_content
|
py
|
1a57387af4adb0debadedb02dc9639952922290e
|
import grpc
import hello_pb2
import hello_pb2_grpc
def run():
# connect grpc server
channel = grpc.insecure_channel('localhost:8089')
# send grpc
stub = hello_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(hello_pb2.Request(name = 'cpx'))
print("Greeter client received: " + response.message)
if __name__ == '__main__':
run()
|
py
|
1a573890a50f6835b962603b40d413562b87a38c
|
from .. import Provider as BankProvider
class Provider(BankProvider):
"""Implement bank provider for ``nl_NL`` locale."""
bban_format = '????##########'
country_code = 'NL'
|
py
|
1a5739996ff6ee4b97ea55982e21154c40a065b3
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.constants import STATUS_RESOLVED, STATUS_UNRESOLVED
from sentry.models import GroupBookmark, GroupTagValue
from sentry.search.django.backend import DjangoSearchBackend
from sentry.testutils import TestCase
class DjangoSearchBackendTest(TestCase):
def create_backend(self):
return DjangoSearchBackend()
def setUp(self):
self.backend = self.create_backend()
self.project1 = self.create_project(name='foo')
self.project2 = self.create_project(name='bar')
self.group1 = self.create_group(
project=self.project1,
checksum='a' * 32,
message='foo',
times_seen=5,
status=STATUS_UNRESOLVED,
last_seen=datetime(2013, 8, 13, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 13, 3, 8, 24, 880386),
)
self.event1 = self.create_event(
event_id='a' * 32,
group=self.group1,
tags={
'server': 'example.com',
'env': 'production',
}
)
self.group2 = self.create_group(
project=self.project1,
checksum='b' * 32,
message='bar',
times_seen=10,
status=STATUS_RESOLVED,
last_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
)
self.event2 = self.create_event(
event_id='b' * 32,
group=self.group2,
tags={
'server': 'example.com',
'env': 'staging',
'url': 'http://example.com',
}
)
for key, value in self.event1.data['tags']:
GroupTagValue.objects.create(
group=self.group1,
key=key,
value=value,
)
for key, value in self.event2.data['tags']:
GroupTagValue.objects.create(
group=self.group2,
key=key,
value=value,
)
GroupBookmark.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
)
self.backend.index(self.event1)
self.backend.index(self.event2)
def test_query(self):
backend = self.create_backend()
results = self.backend.query(self.project1, query='foo')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, query='bar')
assert len(results) == 1
assert results[0] == self.group2
def test_sort(self):
backend = self.create_backend()
results = self.backend.query(self.project1, sort_by='date')
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
results = self.backend.query(self.project1, sort_by='new')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
results = self.backend.query(self.project1, sort_by='freq')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
def test_status(self):
results = self.backend.query(self.project1, status=STATUS_UNRESOLVED)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, status=STATUS_RESOLVED)
assert len(results) == 1
assert results[0] == self.group2
def test_tags(self):
results = self.backend.query(self.project1, tags={'env': 'staging'})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'example.com'})
assert len(results) == 0
def test_bookmarked_by(self):
results = self.backend.query(self.project1, bookmarked_by=self.user)
assert len(results) == 1
assert results[0] == self.group2
def test_project(self):
results = self.backend.query(self.project2)
assert len(results) == 0
def test_limit_and_offset(self):
results = self.backend.query(self.project1, limit=1)
assert len(results) == 1
results = self.backend.query(self.project1, offset=1, limit=1)
assert len(results) == 1
results = self.backend.query(self.project1, offset=2, limit=1)
assert len(results) == 0
def test_first_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group2.first_seen,
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1, date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_from=self.group1.first_seen,
date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen',
)
assert len(results) == 1
assert results[0] == self.group1
def test_last_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group1.last_seen,
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1,
date_from=self.group2.last_seen,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen',
)
assert len(results) == 1
assert results[0] == self.group2
|
py
|
1a5739b74e6ff6f20c1efb399bdd9543b530b8af
|
#!/usr/bin/env python
"""Unit test for the linux cmd parser."""
import os
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.parsers import linux_cmd_parser
class LinuxCmdParserTest(test_lib.GRRBaseTest):
"""Test parsing of linux command output."""
def testDpkgCmdParser(self):
"""Ensure we can extract packages from dpkg output."""
parser = linux_cmd_parser.DpkgCmdParser()
content = open(os.path.join(self.base_path, "dpkg.out")).read()
out = list(parser.Parse("/usr/bin/dpkg", ["--list"], content, "", 0, 5,
None))
self.assertEqual(len(out), 181)
self.assertTrue(isinstance(out[1], rdfvalue.SoftwarePackage))
self.assertTrue(out[0].name, "acpi-support-base")
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
py
|
1a573a806bab05046212a0adb00f774415083590
|
#!/usr/bin/env python
"""Testing the accuracy of clustering methods for splitting
homologous blocks into reference-orthologous blocks.
Requires:
- scikit-learn
- kmodes
- biopython
- sonLib
- pandas
"""
from argparse import ArgumentParser
from StringIO import StringIO
from collections import defaultdict, namedtuple
from glob import glob
import itertools
import random
import os
import logging
from sklearn.cluster import KMeans
from sklearn.preprocessing import OneHotEncoder
from kmodes.kmodes import KModes
from Bio import Phylo
from Bio.Phylo import TreeConstruction
from Bio.Phylo.BaseTree import Clade, Tree
from Bio.Phylo.Applications import RaxmlCommandline
import numpy as np
import pandas as pd
from sonLib.bioio import fastaRead, system
from simulator import BirthDeathSimulator, GeneralizedReversibleSimulator, get_parent, prune_lineages
cluster_methods = ['k-modes', 'k-means', 'neighbor-joining', 'upgma', 'guided-neighbor-joining',
'maximum-likelihood', 'split-decomposition']
evaluation_methods = ['split-decomposition', 'relaxed-split-decomposition', 'none']
def seqs_to_columns(seqs, seq_order):
"""
Transform a dict of sequences into a list of columns.
Each column is represented by a list of entries. The order of each
sequence within the entries is the same as the order in the
parameter seq_order.
"""
assert len(seq_order) == len(seqs.keys()), \
"'seq_order' has more or fewer entries than 'seqs'"
assert all([seq_name in seqs for seq_name in seq_order]), \
"'seq_order' refers to a sequence not present in 'seqs'"
assert len(set([len(seq) for seq in seqs.values()])), \
"All sequences must have the same length"
columns = []
for seq_name in seq_order:
seq = seqs[seq_name]
if len(columns) == 0:
columns = [[] for _ in xrange(len(seq))]
for i, char in enumerate(seq):
columns[i].append(char)
return columns
def columns_to_matrix(cols, one_hot_encode=True):
"""
Take a nested list of DNA columns and convert them into a np feature array.
"""
def nuc_to_number(nucleotide):
nucleotide = nucleotide.lower()
if nucleotide == 'a':
return 0
elif nucleotide == 'c':
return 1
elif nucleotide == 'g':
return 2
elif nucleotide == 't':
return 3
else:
return 4
transformed_cols = [map(nuc_to_number, col) for col in cols]
raw_matrix = np.matrix(transformed_cols).transpose()
encoder = OneHotEncoder()
encoded_matrix = encoder.fit_transform(raw_matrix)
return encoded_matrix
def cluster_matrix(matrix, cluster_method):
"""
Run a clustering method on a matrix and return the cluster assignments.
"""
if cluster_method == 'k-means':
return KMeans(n_clusters=2).fit_predict(matrix)
elif cluster_method == 'k-modes':
return KModes(n_clusters=2).fit_predict(matrix.todense())
else:
raise RuntimeError('Unknown cluster method: %s' % cluster_method)
def distance_matrix_from_columns(columns, distance_correction):
"""
Get a distance matrix (as a np array) from a nested list of DNA columns.
"""
num_seqs = len(columns[0])
matrix = np.zeros([num_seqs, num_seqs], dtype=int)
for column in columns:
for i, entry_1 in enumerate(column):
for j in xrange(i + 1, len(column)):
entry_2 = column[j]
if entry_1.lower() != entry_2.lower():
matrix[i, j] += 1
matrix[j, i] += 1
uncorrected = np.true_divide(matrix, len(columns))
if distance_correction == 'none':
return uncorrected
elif distance_correction == 'jukes-cantor':
return -0.75 * np.log(1 - 1.333333 * uncorrected)
else:
raise RuntimeError("Unknown distance correction method: %s" % distance_correction)
def satisfies_four_point_criterion(matrix, split1, split2, relaxed=False,
enforce_three_point=True):
"""Tests whether a split satisfies the d-split criterion of Bandelt
and Dress 1992.
The "relaxed" version is the same version that is in the paper,
where the internal distance may be larger than one of the
inter-split distances. Otherwise, it must be smaller than both.
The "enforce_three_point" parameter determines whether to enforce
the inter-vs-intra distance comparison even when one side of the
split is a singleton. This isn't justified by the tree metric, but
may work in practice.
"""
split1 = list(split1)
split2 = list(split2)
if len(split1) < len(split2):
# We call split1 the larger split, to simplify the
# "enforce_three_point" logic
split1, split2 = split2, split1
for i, j in itertools.combinations(split1, 2):
if len(split2) == 1 and enforce_three_point:
intra = matrix[i, j]
k = split2[0]
inter = matrix[i, k] + matrix[j, k]
if intra > inter:
return False
for k, l in itertools.combinations(split2, 2):
intra = matrix[i, j] + matrix[k, l]
inter1 = matrix[i, k] + matrix[j, l]
inter2 = matrix[i, l] + matrix[j, k]
if relaxed:
if intra >= inter1 and intra >= inter2:
return False
else:
if intra >= inter1 or intra >= inter2:
return False
return True
def is_good_split(cluster_assignments, columns, evaluation_method, distance_correction):
"""
Run the "split-evaluation" method on the columns, and return True if it calls this split good.
"""
assert all([i == 0 or i == 1 for i in cluster_assignments]), \
"A valid split should only split into two partitions"
if evaluation_method == 'none':
return True
elif evaluation_method in ('split-decomposition', 'relaxed-split-decomposition'):
distance_matrix = distance_matrix_from_columns(columns, distance_correction)
split1 = [i for i, cluster in enumerate(cluster_assignments) if cluster == 0]
split2 = [i for i, cluster in enumerate(cluster_assignments) if cluster == 1]
if evaluation_method == 'relaxed-split-decomposition':
return satisfies_four_point_criterion(distance_matrix, split1, split2, relaxed=True)
else:
return satisfies_four_point_criterion(distance_matrix, split1, split2, relaxed=False)
def build_tree_top_down(columns, seq_names, cluster_method, evaluation_method):
"""
Build a tree top-down using successive applications of a clustering method.
"""
all_columns = columns
all_seq_names = seq_names
def is_finished(cluster, columns):
return len(cluster) <= 2 or all([len(set(column)) == 1 for column in columns])
def recurse(columns, seq_names):
matrix = columns_to_matrix(columns, one_hot_encode=(cluster_method == 'k-means'))
cluster_assignments = cluster_matrix(matrix, cluster_method)
cluster0_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 0]
cluster1_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 1]
cluster0 = [seq_names[i] for i in cluster0_indices]
cluster1 = [seq_names[i] for i in cluster1_indices]
cluster0_columns = [[column[i] for i in cluster0_indices] for column in columns]
if is_finished(cluster0, cluster0_columns):
if len(cluster0) == 1:
clade0 = Clade(name=cluster0[0])
else:
clade0 = Clade(clades=map(lambda x: Clade(name=x), cluster0))
else:
clade0 = Clade(clades=recurse(cluster0_columns, cluster0))
cluster1_columns = [[column[i] for i in cluster1_indices] for column in columns]
if is_finished(cluster1, cluster1_columns):
if len(cluster1) == 1:
clade1 = Clade(name=cluster1[0])
else:
clade1 = Clade(clades=map(lambda x: Clade(name=x), cluster1))
else:
clade1 = Clade(clades=recurse(cluster1_columns, cluster1))
return (clade0, clade1)
tree = Tree(Clade(clades=recurse(columns, seq_names)))
return tree
def random_sequence(length):
"""
Get a random DNA sequence of a certain length.
"""
seq = []
for _ in xrange(length):
seq.append(random.choice(['A', 'a', 'C', 'c', 'G', 'g', 'T', 't']))
return seq
def generate_gene_tree_and_sequences(gene_tree_sim, grt_sim, num_columns, observed_species=None):
"""
Get a random gene tree and the corresponding sequences of the leaves.
The root sequence is a random DNA sequence of length num_columns.
observed_species is either None (in which case all species are
observed) or a list of species to prune from the gene tree /
sequences.
"""
gene_tree = gene_tree_sim.generate()
seqs = grt_sim.generate_leaf_sequences(gene_tree, random_sequence(num_columns))
if observed_species is not None and len(gene_tree.root) > 0:
leaf_clades_to_remove = [leaf for leaf in gene_tree.get_terminals() if leaf.name.split('.')[0] not in observed_species]
gene_tree = prune_lineages(gene_tree, leaf_clades_to_remove)
seqs = dict([(name, v) for name, v in seqs.iteritems() if name.split('.')[0] in observed_species])
return gene_tree, seqs
def flatten_list(l):
"""
Flatten a list.
"""
return [item for sublist in l for item in sublist]
def get_num_skips(parent, child):
"""
Get the number of intermediate nodes between a parent and a child.
"""
if parent == child:
return 0
path = parent.get_path(child)
return len(path) - 1
def calculate_join_costs(species_tree, dup_cost=0.0, loss_cost=0.1):
"""
Calculate the join-cost dictionary needed for guided neighbor-joining.
For each pair of species, the returned dictionary specifies the cost of joining them.
"""
join_costs = defaultdict(dict)
for species1 in species_tree.find_clades():
for species2 in species_tree.find_clades():
if species1 == species2:
join_costs[species1][species2] = 0.0
else:
mrca = species_tree.common_ancestor(species1, species2)
if mrca in (species1, species2):
cost_from_dups = dup_cost
else:
cost_from_dups = 0.0
num_losses = get_num_skips(mrca, species1) + get_num_skips(mrca, species2)
cost_from_losses = num_losses * loss_cost
join_costs[species1][species2] = cost_from_dups + cost_from_losses
return join_costs
def guided_neighbor_joining(distance_matrix, seq_names, species_tree):
"""
Runs a somewhat stripped-down version of the guided neighbor-joining algorithm.
Currently missing the "confidence" correction for join distances.
"""
join_costs = calculate_join_costs(species_tree)
recon = []
for name in seq_names:
matching_species = [clade for clade in species_tree.find_clades(name=name.split('.')[0])]
assert len(matching_species) == 1
recon.append(matching_species[0])
r = []
for i in xrange(len(seq_names)):
r_i = 0.0
for j in xrange(len(seq_names)):
if i == j:
continue
r_i += distance_matrix[i][j]
r_i /= len(seq_names) - 2
r.append(r_i)
clades = [Clade(name=name) for name in seq_names]
num_joins_left = len(seq_names) - 1;
while num_joins_left > 0:
min_dist = float('inf')
min_i = -1
min_j = -1
for i in xrange(len(seq_names)):
for j in xrange(i + 1, len(seq_names)):
if clades[i] == None or clades[j] == None:
continue
assert distance_matrix[i][j] == distance_matrix[j][i]
dist = distance_matrix[i][j] + join_costs[recon[i]][recon[j]] - r[i] - r[j]
if dist < min_dist:
min_i = i
min_j = j
min_dist = dist
dist = distance_matrix[min_i][min_j]
branch_length_mini = (dist + r[min_i] - r[min_j]) / 2
branch_length_minj = dist - branch_length_mini
clades[min_i].branch_length = max(branch_length_mini, 0.0)
clades[min_j].branch_length = max(branch_length_minj, 0.0)
new_clade = Clade(clades=[clades[min_i], clades[min_j]])
clades[min_j] = None
clades[min_i] = new_clade
recon[min_i] = species_tree.common_ancestor(recon[min_i], recon[min_j])
recon[min_j] = None
r[min_j] = 0.0
# Update distance matrix
for k in xrange(len(seq_names)):
if clades[k] == None or k == min_i:
# Don't have to update
continue
dist_mini_k = distance_matrix[min_i][k]
dist_minj_k = distance_matrix[min_j][k]
distance_matrix[min_i][k] = (dist_mini_k + dist_minj_k - dist) / 2
distance_matrix[k][min_i] = distance_matrix[min_i][k]
distance_matrix[min_j][k] = distance_matrix[k][min_j] = -1000000.0
# Update r[k]
if num_joins_left > 2:
r[k] = ((r[k] * (num_joins_left - 1)) - dist_mini_k - dist_minj_k + distance_matrix[min_i][k]) / (num_joins_left - 2)
else:
r[k] = 0.0
# Update r for new column
r[min_i] = 0.0
if num_joins_left > 2:
for k in xrange(len(seq_names)):
if clades[k] == None:
continue
r[min_i] += distance_matrix[min_i][k]
r[min_i] /= num_joins_left - 2
num_joins_left -= 1
return Tree(clades[0])
def raxml_tree(seqs):
"""
Run RAxML on the given sequences and return a Bio.Phylo tree (arbitrarily binarized and rooted).
"""
# TODO: do this properly so it doesn't leave a bunch of temp files everywhere
faPath = 'tmp.fa'
with open(faPath, 'w') as f:
for seq_name, seq in seqs.iteritems():
f.write('>%s\n' % seq_name)
f.write('%s\n' % seq)
cmd = RaxmlCommandline(sequences=faPath, model='GTRCAT', name='test')
system(str(cmd) + ' -V >/dev/null 2>&1')
tree = Phylo.read(open('RAxML_bestTree.test'), 'newick')
for path in glob('RAxML_*.test'):
os.remove(path)
# Arbitrarily binarize the tree so it can get rerooted properly.
assert(len(tree.root.clades) == 3)
tree.root.clades=[Clade(branch_length=0.0, clades=tree.root.clades[0:2]), tree.root.clades[2]]
return tree
class DSplit(namedtuple('DSplit_base', ['set1', 'set2', 'isolation_index'])):
def __eq__(self, other):
if ((other.set1 == self.set1 and other.set2 == other.set2) \
or (other.set2 == self.set1 and other.set1 == self.set2)) \
and other.isolation_index == self.isolation_index:
return True
else:
return False
def __hash__(self):
return hash((frozenset((self.set1, self.set2)), self.isolation_index))
def get_d_splits(distance_matrix, relaxed=False):
d_splits = set()
for i in xrange(1, len(distance_matrix)):
new_d_splits = set()
singleton_split = (frozenset(range(i)), frozenset([i]))
if satisfies_four_point_criterion(distance_matrix, singleton_split[0], singleton_split[1], enforce_three_point=False):
new_d_splits.add(singleton_split)
for d_split in d_splits:
new_d_split_1 = (d_split[0] | frozenset([i]), d_split[1])
new_d_split_2 = (d_split[0], d_split[1] | frozenset([i]))
if satisfies_four_point_criterion(distance_matrix, new_d_split_1[0], new_d_split_1[1], relaxed=relaxed, enforce_three_point=False):
new_d_splits.add(new_d_split_1)
if satisfies_four_point_criterion(distance_matrix, new_d_split_2[0], new_d_split_2[1], relaxed=relaxed, enforce_three_point=False):
new_d_splits.add(new_d_split_2)
d_splits = new_d_splits
# Filter out all trivial splits
d_splits = set(split for split in d_splits if len(split[0]) > 1 and len(split[1]) > 1)
d_splits = set(DSplit(split[0], split[1], isolation_index(distance_matrix, split[0], split[1])) for split in d_splits)
return d_splits
def isolation_index(dm, split1, split2):
min_isolations = []
for i, j in itertools.combinations(split1, 2):
min_isolations.append(min([max(dm[i, j] + dm[k, l],
dm[i, k] + dm[j, l],
dm[i, l] + dm[j, k])
- dm[i, j] - dm[k, l] for k, l in itertools.combinations(split2, 2)]))
return min(min_isolations) / 2
def greedy_split_decomposition(distance_matrix, seq_names, relaxed=False):
def get_set_to_split(split, tree):
split_1_clades = [clade for clade in tree.get_terminals() if clade.name in split.set1]
split_2_clades = [clade for clade in tree.get_terminals() if clade.name in split.set2]
split_1_parents = set([get_parent(tree, clade) for clade in split_1_clades])
split_2_parents = set([get_parent(tree, clade) for clade in split_2_clades])
if len(split_1_parents) == 1:
return split.set1
if len(split_2_parents) == 1:
return split.set2
return None
d_splits = sorted(list(get_d_splits(distance_matrix)), key=lambda x: x.isolation_index, reverse=True)
tree = Tree(Clade(clades=[Clade(name=i) for i in xrange(len(seq_names))]))
for split in d_splits:
compatible_set = get_set_to_split(split, tree)
if compatible_set is None:
# This split is not compatible with the splits that have already been selected.
continue
clades = [clade for clade in tree.get_terminals() if clade.name in compatible_set]
parent = tree.common_ancestor(clades)
parent.clades = filter(lambda x: x.name not in compatible_set, parent.clades)
parent.clades.append(Clade(clades=clades))
# Remap leaf names to be names and not ints
leaves = [clade for clade in tree.get_terminals()]
for leaf in leaves:
leaf.name = seq_names[int(leaf.name)]
return tree
def build_tree_bottom_up(seqs, columns, seq_names, species_tree, cluster_method, evaluation_method,
distance_correction):
"""
Build a tree using a NJ-esque clustering method.
"""
distance_matrix = distance_matrix_from_columns(columns, distance_correction)
# Biopython annoyingly (but understandably) wants the matrix in
# lower triangular format, i.e. only everything below the diagonal
triangular_matrix = [[entry for j,entry in enumerate(row) if j <= i] for i, row in enumerate(distance_matrix.tolist())]
tree_constructor = TreeConstruction.DistanceTreeConstructor()
triangular_matrix = TreeConstruction._DistanceMatrix(seq_names, triangular_matrix)
if cluster_method == 'neighbor-joining':
tree = tree_constructor.nj(triangular_matrix)
elif cluster_method == 'upgma':
tree = tree_constructor.upgma(triangular_matrix)
elif cluster_method == 'guided-neighbor-joining':
tree = guided_neighbor_joining(distance_matrix, seq_names, species_tree)
elif cluster_method == 'maximum-likelihood':
tree = raxml_tree(seqs)
elif cluster_method == 'split-decomposition':
logging.info(seq_names)
logging.info(distance_matrix)
tree = greedy_split_decomposition(distance_matrix, seq_names, relaxed=True)
else:
raise RuntimeError('Unrecognized bottom-up method: %s' % cluster_method)
return tree
def build_tree(seqs, species_tree, cluster_method, evaluation_method, distance_correction, outgroups):
"""
Build a tree using some clustering method and some split-evaluation method.
"""
seq_names = seqs.keys()
columns = seqs_to_columns(seqs, seq_names)
if cluster_method in ['k-means', 'k-modes']:
tree = build_tree_top_down(columns, seq_names, cluster_method, evaluation_method)
else:
tree = build_tree_bottom_up(seqs, columns, seq_names, species_tree, cluster_method,
evaluation_method, distance_correction)
# Evaluate each split in the resulting tree.
for internal_node in tree.get_nonterminals():
split = [child for child in internal_node]
if len(split) != 2:
continue
split0 = set([leaf.name for leaf in split[0].get_terminals()])
split1 = set([leaf.name for leaf in split[1].get_terminals()])
leaf_names = [node.name for node in internal_node.get_terminals()]
relevant_seq_names = [name for name in seq_names if name in leaf_names]
relevant_indices = [i for i, name in enumerate(seq_names) if name in relevant_seq_names]
multifurcate = False
relevant_columns = columns
cluster_assignments0 = [int(seq_name in split0) for seq_name in relevant_seq_names]
cluster_assignments1 = [int(seq_name in split1) for seq_name in relevant_seq_names]
split0_good = is_good_split(cluster_assignments0, relevant_columns, evaluation_method,
distance_correction)
split1_good = is_good_split(cluster_assignments1, relevant_columns, evaluation_method,
distance_correction)
if not split0_good or not split1_good:
multifurcate = True
if multifurcate:
# Need to make this node into a multifurcation.
internal_node.clades = flatten_list([[grandchild for grandchild in child] for child in split])
# workaround for biopython bug.
for node in tree.find_clades():
node.clades = list(node.clades)
tree.root_with_outgroup(tree.common_ancestor([node for node in tree.get_terminals() if node.name in outgroups]), outgroup_branch_length=0.0)
logging.info('%s built: %s' % (cluster_method, tree))
return tree
def evaluate_tree(true_tree, test_tree):
"""
Given a true tree and a test tree, give some stats on how well the test tree reflects the truth.
"""
true_leaf_sets = set()
true_splits = {}
for internal_node in true_tree.get_nonterminals():
leaf_set = frozenset([node.name for node in internal_node.get_terminals()])
split_sets = frozenset([frozenset([node.name for node in node.get_terminals()]) for node in internal_node])
true_leaf_sets.add(leaf_set)
true_splits[leaf_set] = split_sets
overcollapses = 0
undercollapses = 0
wrong_splits = 0
mismatching_leaf_sets = 0
perfect_splits = 0
for internal_node in test_tree.get_nonterminals():
leaf_set = frozenset([node.name for node in internal_node.get_terminals()])
if leaf_set in true_leaf_sets:
split_sets = frozenset([frozenset([node.name for node in node.get_terminals()]) for node in internal_node])
if split_sets == true_splits[leaf_set]:
perfect_splits += 1
else:
true_split = true_splits[leaf_set]
if len(true_split) > len(split_sets):
overcollapses += 1
elif len(true_split) < len(split_sets):
undercollapses += 1
else:
wrong_splits += 1
else:
mismatching_leaf_sets += 1
return { 'overcollapses': overcollapses,
'undercollapses': undercollapses,
'flipped_splits': wrong_splits,
'mismatching_leaf_sets': mismatching_leaf_sets,
'perfect_splits': perfect_splits }
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('species_tree', help='species tree (newick format)')
parser.add_argument('--duplication-rate',
type=float,
default=None,
help='Gene duplication rate')
parser.add_argument('--loss-rate',
type=float,
default=None,
help='Gene loss rate')
parser.add_argument('--num-columns',
type=int,
default=200,
help='Number of columns')
parser.add_argument('--evaluation-methods',
nargs='+',
default=evaluation_methods,
help='Methods to collapse the uncertain splits in the trees')
parser.add_argument('--cluster-methods',
nargs='+',
default=cluster_methods,
help='Methods to build the trees')
parser.add_argument('--num-tests',
type=int,
default=100,
help='Number of trees to build for each combination of dup-rate, '
'loss-rate, cluster-method, evaluation-method')
parser.add_argument('--observed-species',
nargs='+',
help='A subset of leaf species that will be used for tree-building')
parser.add_argument('--distance-correction',
choices=['jukes-cantor', 'none'],
default='jukes-cantor',
help='The type of correction to use for distance-based methods')
parser.add_argument('--summary',
default=False,
action='store_true',
help='Instead of printing out all results, print a summary of the results')
parser.add_argument('--log-level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'],
help='Log level')
return parser.parse_args()
def tree_to_newick(tree):
f = StringIO()
Phylo.write(tree, f, 'newick')
return f.getvalue().strip()
def run_simulated_tests(gene_tree_sim, grt_sim, species_tree, args):
tree_evaluations = []
for _ in xrange(args.num_tests):
true_tree, leaf_seqs = generate_gene_tree_and_sequences(gene_tree_sim, grt_sim,
args.num_columns,
args.observed_species)
if len(true_tree.get_terminals()) < 4:
# Not enough leaves to build a tree
continue
for cluster_method in args.cluster_methods:
for evaluation_method in args.evaluation_methods:
# Choose the second child of the root as the outgroup for no good reason
outgroups = [node.name for node in true_tree.root[1].get_terminals()]
logging.info('true tree: %s' % true_tree)
built_tree = build_tree(leaf_seqs, species_tree, cluster_method, evaluation_method,
args.distance_correction, outgroups)
evaluation = evaluate_tree(true_tree, built_tree)
evaluation['cluster_method'] = cluster_method
evaluation['evaluation_method'] = evaluation_method
evaluation['distance_correction'] = args.distance_correction
evaluation['tree'] = tree_to_newick(built_tree)
evaluation['true_tree'] = tree_to_newick(true_tree)
evaluation['loss_rate'] = gene_tree_sim.extinction_rate
evaluation['duplication_rate'] = gene_tree_sim.duplication_rate
evaluation['fraction_perfect_splits'] = float(evaluation['perfect_splits']) / len(true_tree.get_nonterminals())
tree_evaluations.append(evaluation)
return tree_evaluations
def main():
args = parse_args()
if args.log_level is not None:
# Shamelessly stolen from the python logging module docs.
level = getattr(logging, args.log_level)
# Clear the global logging info since some library "helpfully" set it for us.
logging.getLogger('').handlers = []
# No need to do input checking here since argparse ensures the input is valid.
logging.basicConfig(level=level)
species_tree = Phylo.read(StringIO(args.species_tree), 'newick')
if args.duplication_rate is None:
# Test several duplication rates
duplication_range = np.arange(0.0, 0.4, 0.1)
else:
# Only one duplication rate
duplication_range = [args.duplication_rate]
if args.loss_rate is None:
# Test several loss rates
loss_range = np.arange(0.0, 0.5, 0.1)
else:
# Only one loss rate
loss_range = [args.loss_rate]
tree_evaluations = []
for duplication_rate in duplication_range:
for loss_rate in loss_range:
logging.info('Starting with dup rate %s, loss rate %s' % (duplication_rate, loss_rate))
gene_tree_sim = BirthDeathSimulator(species_tree,
duplication_rate,
loss_rate)
grt_sim = GeneralizedReversibleSimulator(0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25)
tree_evaluations.extend(run_simulated_tests(gene_tree_sim, grt_sim, species_tree, args))
df = pd.DataFrame(tree_evaluations)
if args.summary:
print df.groupby(['cluster_method', 'evaluation_method']).sum()
else:
print df.to_csv()
if __name__ == '__main__':
main()
|
py
|
1a573af789c0c04e247f6abe4cc14d4feac4e492
|
"""Constants for tests."""
with open("tests/fixtures/event_d2d_service_message_available.json") as file:
D2D_SERVICE_MESSAGE_AVAILABLE_SAMPLE = file.read()
with open("tests/fixtures/event_ms_channel_connect.json") as file:
MS_CHANNEL_CONNECT_SAMPLE = file.read()
with open("tests/fixtures/event_ms_channel_disconnect.json") as file:
MS_CHANNEL_DISCONNECT_SAMPLE = file.read()
with open("tests/fixtures/event_ms_channel_ready.json") as file:
MS_CHANNEL_READY_SAMPLE = file.read()
with open("tests/fixtures/event_ed_edentv_update.json") as file:
ED_EDENTV_UPDATE_SAMPLE = file.read()
with open("tests/fixtures/event_ed_apps_launch.json") as file:
ED_APPS_LAUNCH_SAMPLE = file.read()
with open("tests/fixtures/event_ed_installedApp_get.json") as file:
ED_INSTALLED_APP_SAMPLE = file.read()
with open("tests/fixtures/event_ms_error.json") as file:
MS_ERROR_SAMPLE = file.read()
with open("tests/fixtures/event_ms_voiceapp_hide.json") as file:
MS_VOICEAPP_HIDE_SAMPLE = file.read()
|
py
|
1a573cc4b7287730908a9ee71a674d2230e7798e
|
import asyncio
import logging
from dataclasses import dataclass
from pathlib import Path
from shutil import rmtree
from typing import Optional, List, Dict
import pytest
import pytest_asyncio
from blspy import G1Element
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH
from chia.pools.pool_wallet_info import PoolWalletInfo, PoolSingletonState
from chia.protocols import full_node_protocol
from chia.protocols.full_node_protocol import RespondBlock
from chia.rpc.rpc_server import start_rpc_server
from chia.rpc.wallet_rpc_api import WalletRpcApi
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.peer_info import PeerInfo
from chia.util.bech32m import encode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.wallet.derive_keys import find_authentication_sk, find_owner_sk
from chia.wallet.wallet_node import WalletNode
from tests.block_tools import get_plot_dir
from chia.util.config import load_config
from chia.util.ints import uint16, uint32
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.wallet_types import WalletType
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets, bt
from tests.time_out_assert import time_out_assert
from tests.util.socket import find_available_listen_port
# TODO: Compare deducted fees in all tests against reported total_fee
log = logging.getLogger(__name__)
FEE_AMOUNT = 2000000000000
def get_pool_plot_dir():
return get_plot_dir() / Path("pool_tests")
@dataclass
class TemporaryPoolPlot:
p2_singleton_puzzle_hash: bytes32
plot_id: Optional[bytes32] = None
async def __aenter__(self):
plot_id: bytes32 = await bt.new_plot(self.p2_singleton_puzzle_hash, get_pool_plot_dir())
assert plot_id is not None
await bt.refresh_plots()
self.plot_id = plot_id
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
await bt.delete_plot(self.plot_id)
async def create_pool_plot(p2_singleton_puzzle_hash: bytes32) -> Optional[bytes32]:
plot_id = await bt.new_plot(p2_singleton_puzzle_hash, get_pool_plot_dir())
await bt.refresh_plots()
return plot_id
async def wallet_is_synced(wallet_node: WalletNode, full_node_api):
assert wallet_node.wallet_state_manager is not None
return (
await wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to()
== full_node_api.full_node.blockchain.get_peak_height()
)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
PREFARMED_BLOCKS = 4
class TestPoolWalletRpc:
@pytest_asyncio.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest_asyncio.fixture(scope="function")
async def one_wallet_node_and_rpc(self):
rmtree(get_pool_plot_dir(), ignore_errors=True)
async for nodes in setup_simulators_and_wallets(1, 1, {}):
full_nodes, wallets = nodes
full_node_api = full_nodes[0]
wallet_node_0, wallet_server_0 = wallets[0]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
our_ph = await wallet_0.get_new_puzzlehash()
await self.farm_blocks(full_node_api, our_ph, PREFARMED_BLOCKS)
api_user = WalletRpcApi(wallet_node_0)
config = bt.config
daemon_port = config["daemon_port"]
test_rpc_port = find_available_listen_port("rpc_port")
rpc_cleanup = await start_rpc_server(
api_user,
self_hostname,
daemon_port,
test_rpc_port,
lambda x: None,
bt.root_path,
config,
connect_to_daemon=False,
)
client = await WalletRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config)
yield client, wallet_node_0, full_node_api
client.close()
await client.await_closed()
await rpc_cleanup()
@pytest_asyncio.fixture(scope="function")
async def setup(self, two_wallet_nodes):
rmtree(get_pool_plot_dir(), ignore_errors=True)
full_nodes, wallets = two_wallet_nodes
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
our_ph_record = await wallet_node_0.wallet_state_manager.get_unused_derivation_record(1, False, True)
pool_ph_record = await wallet_node_1.wallet_state_manager.get_unused_derivation_record(1, False, True)
our_ph = our_ph_record.puzzle_hash
pool_ph = pool_ph_record.puzzle_hash
api_user = WalletRpcApi(wallet_node_0)
config = bt.config
daemon_port = config["daemon_port"]
test_rpc_port = find_available_listen_port("rpc_port")
rpc_cleanup = await start_rpc_server(
api_user,
self_hostname,
daemon_port,
test_rpc_port,
lambda x: None,
bt.root_path,
config,
connect_to_daemon=False,
)
client = await WalletRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config)
return (
full_nodes,
[wallet_node_0, wallet_node_1],
[our_ph, pool_ph],
client, # wallet rpc client
rpc_cleanup,
)
async def get_total_block_rewards(self, num_blocks):
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
return funds
async def farm_blocks(self, full_node_api, ph: bytes32, num_blocks: int):
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
return num_blocks
# TODO also return calculated block rewards
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_create_new_pool_wallet_self_farm(self, one_wallet_node_and_rpc, fee, trusted):
client, wallet_node_0, full_node_api = one_wallet_node_and_rpc
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
total_block_rewards = await self.get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(10, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(10, wallet_node_0.wallet_state_manager.blockchain.get_peak_height, PREFARMED_BLOCKS)
our_ph = await wallet_0.get_new_puzzlehash()
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
summaries_response = await client.get_wallets()
wallet_id: Optional[int] = None
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
wallet_id = summary["id"]
assert wallet_id is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
assert status.target is None
assert status.current.owner_pubkey == G1Element.from_bytes(
bytes.fromhex(
"b286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
)
assert status.current.pool_url is None
assert status.current.relative_lock_height == 0
assert status.current.version == 1
# Check that config has been written properly
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == 1
pool_config = pool_list[0]
assert (
pool_config["owner_public_key"]
== "0xb286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
# It can be one of multiple launcher IDs, due to selecting a different coin
launcher_id = None
for addition in creation_tx.additions:
if addition.puzzle_hash == SINGLETON_LAUNCHER_HASH:
launcher_id = addition.name()
break
assert hexstr_to_bytes(pool_config["launcher_id"]) == launcher_id
assert pool_config["pool_url"] == ""
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_create_new_pool_wallet_farm_to_pool(self, one_wallet_node_and_rpc, fee, trusted):
client, wallet_node_0, full_node_api = one_wallet_node_and_rpc
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
total_block_rewards = await self.get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(10, wallet_node_0.wallet_state_manager.blockchain.get_peak_height, PREFARMED_BLOCKS)
await time_out_assert(10, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
our_ph = await wallet_0.get_new_puzzlehash()
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "http://pool.example.com", 10, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(5, wallet_is_synced, True, wallet_node_0, full_node_api)
summaries_response = await client.get_wallets()
wallet_id: Optional[int] = None
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
wallet_id = summary["id"]
assert wallet_id is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value
assert status.target is None
assert status.current.owner_pubkey == G1Element.from_bytes(
bytes.fromhex(
"b286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
)
assert status.current.pool_url == "http://pool.example.com"
assert status.current.relative_lock_height == 10
assert status.current.version == 1
# Check that config has been written properly
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == 1
pool_config = pool_list[0]
assert (
pool_config["owner_public_key"]
== "0xb286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
# It can be one of multiple launcher IDs, due to selecting a different coin
launcher_id = None
for addition in creation_tx.additions:
if addition.puzzle_hash == SINGLETON_LAUNCHER_HASH:
launcher_id = addition.name()
break
assert hexstr_to_bytes(pool_config["launcher_id"]) == launcher_id
assert pool_config["pool_url"] == "http://pool.example.com"
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_create_multiple_pool_wallets(self, one_wallet_node_and_rpc, fee, trusted):
client, wallet_node_0, full_node_api = one_wallet_node_and_rpc
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
total_block_rewards = await self.get_total_block_rewards(PREFARMED_BLOCKS)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
await time_out_assert(10, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(10, wallet_node_0.wallet_state_manager.blockchain.get_peak_height, PREFARMED_BLOCKS)
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
our_ph_1 = await wallet_0.get_new_puzzlehash()
our_ph_2 = await wallet_0.get_new_puzzlehash()
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph_1, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
creation_tx_2: TransactionRecord = await client.create_new_pool_wallet(
our_ph_1, self_hostname, 12, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx_2.spend_bundle,
creation_tx_2.name,
)
await self.farm_blocks(full_node_api, our_ph_2, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx_2.name) is None
await asyncio.sleep(3)
status_2: PoolWalletInfo = (await client.pw_status(2))[0]
status_3: PoolWalletInfo = (await client.pw_status(3))[0]
if status_2.current.state == PoolSingletonState.SELF_POOLING.value:
assert status_3.current.state == PoolSingletonState.FARMING_TO_POOL.value
else:
assert status_2.current.state == PoolSingletonState.FARMING_TO_POOL.value
assert status_3.current.state == PoolSingletonState.SELF_POOLING.value
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == 2
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(3)) == 0
# Doing a reorg reverts and removes the pool wallets
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(uint32(0), uint32(20), our_ph_2))
await asyncio.sleep(5)
summaries_response = await client.get_wallets()
assert len(summaries_response) == 1
with pytest.raises(ValueError):
await client.pw_status(2)
with pytest.raises(ValueError):
await client.pw_status(3)
# Create some CAT wallets to increase wallet IDs
for i in range(5):
await asyncio.sleep(2)
res = await client.create_new_cat_and_wallet(20)
await asyncio.sleep(2)
summaries_response = await client.get_wallets()
assert res["success"]
cat_0_id = res["wallet_id"]
asset_id = bytes.fromhex(res["asset_id"])
assert len(asset_id) > 0
await self.farm_blocks(full_node_api, our_ph_2, 6)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
bal_0 = await client.get_wallet_balance(cat_0_id)
assert bal_0["confirmed_wallet_balance"] == 20
# Test creation of many pool wallets. Use untrusted since that is the more complicated protocol, but don't
# run this code more than once, since it's slow.
if fee == 0 and not trusted:
for i in range(22):
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx_3: TransactionRecord = await client.create_new_pool_wallet(
our_ph_1, self_hostname, 5, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx_3.spend_bundle,
creation_tx_3.name,
)
await self.farm_blocks(full_node_api, our_ph_2, 2)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == i + 3
if i == 0:
# Ensures that the CAT creation does not cause pool wallet IDs to increment
for wallet in wallet_node_0.wallet_state_manager.wallets.values():
if wallet.type() == WalletType.POOLING_WALLET:
status: PoolWalletInfo = (await client.pw_status(wallet.id()))[0]
assert (await wallet.get_pool_wallet_index()) < 5
auth_sk = find_authentication_sk(
[wallet_0.wallet_state_manager.private_key], status.current.owner_pubkey
)
assert auth_sk is not None
owner_sk = find_owner_sk(
[wallet_0.wallet_state_manager.private_key], status.current.owner_pubkey
)
assert owner_sk is not None
assert owner_sk != auth_sk
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_absorb_self(self, one_wallet_node_and_rpc, fee, trusted):
client, wallet_node_0, full_node_api = one_wallet_node_and_rpc
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
total_block_rewards = await self.get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(10, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(10, wallet_node_0.wallet_state_manager.blockchain.get_peak_height, PREFARMED_BLOCKS)
our_ph = await wallet_0.get_new_puzzlehash()
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 1)
await asyncio.sleep(2)
status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
async with TemporaryPoolPlot(status.p2_singleton_puzzle_hash) as pool_plot:
all_blocks = await full_node_api.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
3,
block_list_input=all_blocks,
force_plot_id=pool_plot.plot_id,
farmer_reward_puzzle_hash=our_ph,
guarantee_transaction_block=True,
)
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-2]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
await asyncio.sleep(2)
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 2 * 1750000000000
# Claim 2 * 1.75, and farm a new 1.75
absorb_tx: TransactionRecord = (await client.pw_absorb_rewards(2, fee))["transaction"]
await time_out_assert(
5,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx.spend_bundle,
absorb_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 2)
await asyncio.sleep(2)
new_status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current == new_status.current
assert status.tip_singleton_coin_id != new_status.tip_singleton_coin_id
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 1 * 1750000000000
# Claim another 1.75
absorb_tx1: TransactionRecord = (await client.pw_absorb_rewards(2, fee))["transaction"]
absorb_tx1.spend_bundle.debug()
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx1.spend_bundle,
absorb_tx1.name,
)
await self.farm_blocks(full_node_api, our_ph, 2)
await asyncio.sleep(2)
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 0
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
tr: TransactionRecord = await client.send_transaction(
1, 100, encode_puzzle_hash(status.p2_singleton_puzzle_hash, "txch")
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
await self.farm_blocks(full_node_api, our_ph, 2)
# Balance ignores non coinbase TX
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 0
with pytest.raises(ValueError):
await client.pw_absorb_rewards(2, fee)
tx1 = await client.get_transactions(1)
assert (250000000000 + fee) in [tx.additions[0].amount for tx in tx1]
# await time_out_assert(10, wallet_0.get_confirmed_balance, total_block_rewards)
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_absorb_pooling(self, one_wallet_node_and_rpc, fee, trusted):
client, wallet_node_0, full_node_api = one_wallet_node_and_rpc
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
total_block_rewards = await self.get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(10, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(10, wallet_node_0.wallet_state_manager.blockchain.get_peak_height, PREFARMED_BLOCKS)
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
our_ph = await wallet_0.get_new_puzzlehash()
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
# Balance stars at 6 XCH
assert (await wallet_0.get_confirmed_balance()) == 6000000000000
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "http://123.45.67.89", 10, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 1)
await asyncio.sleep(2)
status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value
async with TemporaryPoolPlot(status.p2_singleton_puzzle_hash) as pool_plot:
all_blocks = await full_node_api.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
3,
block_list_input=all_blocks,
force_plot_id=pool_plot.plot_id,
farmer_reward_puzzle_hash=our_ph,
guarantee_transaction_block=True,
)
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-2]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
await asyncio.sleep(5)
bal = await client.get_wallet_balance(2)
# Pooled plots don't have balance
assert bal["confirmed_wallet_balance"] == 0
# Claim 2 * 1.75, and farm a new 1.75
absorb_tx: TransactionRecord = (await client.pw_absorb_rewards(2, fee))["transaction"]
await time_out_assert(
5,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx.spend_bundle,
absorb_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 2)
await asyncio.sleep(5)
new_status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current == new_status.current
assert status.tip_singleton_coin_id != new_status.tip_singleton_coin_id
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 0
# Claim another 1.75
ret = await client.pw_absorb_rewards(2, fee)
absorb_tx: TransactionRecord = ret["transaction"]
absorb_tx.spend_bundle.debug()
await time_out_assert(
5,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx.spend_bundle,
absorb_tx.name,
)
if fee == 0:
assert ret["fee_transaction"] is None
else:
assert ret["fee_transaction"].fee_amount == fee
assert absorb_tx.fee_amount == fee
await self.farm_blocks(full_node_api, our_ph, 2)
await asyncio.sleep(5)
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 0
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
assert (
wallet_node_0.wallet_state_manager.blockchain.get_peak_height()
== full_node_api.full_node.blockchain.get_peak().height
)
# Balance stars at 6 XCH and 5 more blocks are farmed, total 22 XCH
assert (await wallet_0.get_confirmed_balance()) == 21999999999999
num_trials = 3
status = new_status
await asyncio.sleep(2)
if fee == 0:
for i in range(num_trials):
all_blocks = await full_node_api.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
10,
block_list_input=all_blocks,
force_plot_id=pool_plot.plot_id,
farmer_reward_puzzle_hash=our_ph,
guarantee_transaction_block=True,
)
for block in blocks[-10:]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await asyncio.sleep(2)
ret = await client.pw_absorb_rewards(2, fee)
absorb_tx: TransactionRecord = ret["transaction"]
await time_out_assert(
5,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx.spend_bundle,
absorb_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 2)
await asyncio.sleep(2)
new_status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current == new_status.current
assert status.tip_singleton_coin_id != new_status.tip_singleton_coin_id
status = new_status
assert ret["fee_transaction"] is None
bal2 = await client.get_wallet_balance(2)
assert bal2["confirmed_wallet_balance"] == 0
# Note: as written, confirmed balance will not reflect on absorbs, because the fee
# is paid back into the same client's wallet in this test.
tx1 = await client.get_transactions(1)
assert (250000000000 + fee) in [tx.additions[0].amount for tx in tx1]
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True])
@pytest.mark.parametrize("fee", [0])
async def test_self_pooling_to_pooling(self, setup, fee, trusted):
"""This tests self-pooling -> pooling"""
num_blocks = 4 # Num blocks to farm at a time
total_blocks = 0 # Total blocks farmed so far
full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup
wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]
wallet_node_0 = wallet_nodes[0]
our_ph = receive_address[0]
pool_ph = receive_address[1]
full_node_api = full_nodes[0]
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
try:
total_blocks += await self.farm_blocks(full_node_api, our_ph, num_blocks)
total_block_rewards = await self.get_total_block_rewards(total_blocks)
await time_out_assert(10, wallets[0].get_unconfirmed_balance, total_block_rewards)
await time_out_assert(10, wallets[0].get_confirmed_balance, total_block_rewards)
await time_out_assert(10, wallets[0].get_spendable_balance, total_block_rewards)
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
assert total_block_rewards > 0
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
creation_tx_2: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx_2.spend_bundle,
creation_tx_2.name,
)
await self.farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(10, wallet_is_synced, True, wallet_node_0, full_node_api)
summaries_response = await client.get_wallets()
wallet_id: Optional[int] = None
wallet_id_2: Optional[int] = None
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
if wallet_id is not None:
wallet_id_2 = summary["id"]
else:
wallet_id = summary["id"]
await asyncio.sleep(1)
assert wallet_id is not None
assert wallet_id_2 is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
status_2: PoolWalletInfo = (await client.pw_status(wallet_id_2))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
assert status_2.current.state == PoolSingletonState.SELF_POOLING.value
assert status.target is None
assert status_2.target is None
join_pool_tx: TransactionRecord = await client.pw_join_pool(
wallet_id,
pool_ph,
"https://pool.example.com",
10,
fee,
)
join_pool_tx_2: TransactionRecord = await client.pw_join_pool(
wallet_id_2,
pool_ph,
"https://pool.example.com",
10,
fee,
)
assert join_pool_tx is not None
assert join_pool_tx_2 is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
status_2: PoolWalletInfo = (await client.pw_status(wallet_id_2))[0]
async def tx_is_in_mempool(wid, tx: TransactionRecord):
fetched: Optional[TransactionRecord] = await client.get_transaction(wid, tx.name)
return fetched is not None and fetched.is_in_mempool()
await time_out_assert(5, tx_is_in_mempool, True, wallet_id, join_pool_tx)
await time_out_assert(5, tx_is_in_mempool, True, wallet_id_2, join_pool_tx_2)
assert status.current.state == PoolSingletonState.SELF_POOLING.value
assert status.target is not None
assert status.target.state == PoolSingletonState.FARMING_TO_POOL.value
assert status_2.current.state == PoolSingletonState.SELF_POOLING.value
assert status_2.target is not None
assert status_2.target.state == PoolSingletonState.FARMING_TO_POOL.value
await self.farm_blocks(full_node_api, our_ph, 6)
total_blocks += await self.farm_blocks(full_node_api, our_ph, num_blocks)
async def status_is_farming_to_pool(w_id: int):
pw_status: PoolWalletInfo = (await client.pw_status(w_id))[0]
return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value
await time_out_assert(20, status_is_farming_to_pool, True, wallet_id)
await time_out_assert(20, status_is_farming_to_pool, True, wallet_id_2)
assert len(await wallets[0].wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
finally:
client.close()
await client.await_closed()
await rpc_cleanup()
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize(
"fee",
[0, FEE_AMOUNT],
)
async def test_leave_pool(self, setup, fee, trusted):
"""This tests self-pooling -> pooling -> escaping -> self pooling"""
full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup
our_ph = receive_address[0]
wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]
pool_ph = receive_address[1]
full_node_api = full_nodes[0]
if trusted:
wallet_nodes[0].config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_nodes[0].config["trusted_peers"] = {}
await wallet_nodes[0].server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
WAIT_SECS = 200
try:
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
async def have_chia():
await self.farm_blocks(full_node_api, our_ph, 1)
return (await wallets[0].get_confirmed_balance()) > 0
await time_out_assert(timeout=WAIT_SECS, function=have_chia)
await time_out_assert(10, wallet_is_synced, True, wallet_nodes[0], full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(10, wallet_is_synced, True, wallet_nodes[0], full_node_api)
summaries_response = await client.get_wallets()
wallet_id: Optional[int] = None
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
wallet_id = summary["id"]
assert wallet_id is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
assert status.target is None
join_pool_tx: TransactionRecord = await client.pw_join_pool(
wallet_id,
pool_ph,
"https://pool.example.com",
5,
fee,
)
assert join_pool_tx is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
assert status.current.pool_url is None
assert status.current.relative_lock_height == 0
assert status.current.state == 1
assert status.current.version == 1
assert status.target
assert status.target.pool_url == "https://pool.example.com"
assert status.target.relative_lock_height == 5
assert status.target.state == 3
assert status.target.version == 1
async def status_is_farming_to_pool():
await self.farm_blocks(full_node_api, our_ph, 1)
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)
await time_out_assert(10, wallet_is_synced, True, wallet_nodes[0], full_node_api)
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
leave_pool_tx: TransactionRecord = await client.pw_self_pool(wallet_id, fee)
assert leave_pool_tx.wallet_id == wallet_id
assert leave_pool_tx.amount == 1
async def status_is_leaving():
await self.farm_blocks(full_node_api, our_ph, 1)
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving)
async def status_is_self_pooling():
# Farm enough blocks to wait for relative_lock_height
await self.farm_blocks(full_node_api, our_ph, 1)
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.SELF_POOLING.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_self_pooling)
assert len(await wallets[0].wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
finally:
client.close()
await client.await_closed()
await rpc_cleanup()
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_change_pools(self, setup, fee, trusted):
"""This tests Pool A -> escaping -> Pool B"""
full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup
our_ph = receive_address[0]
pool_a_ph = receive_address[1]
wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]
pool_b_ph = await wallets[1].get_new_puzzlehash()
full_node_api = full_nodes[0]
if trusted:
wallet_nodes[0].config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_nodes[0].config["trusted_peers"] = {}
await wallet_nodes[0].server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
WAIT_SECS = 200
try:
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
async def have_chia():
await self.farm_blocks(full_node_api, our_ph, 1)
return (await wallets[0].get_confirmed_balance()) > 0
await time_out_assert(timeout=WAIT_SECS, function=have_chia)
await time_out_assert(10, wallet_is_synced, True, wallet_nodes[0], full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
pool_a_ph, "https://pool-a.org", 5, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(10, wallet_is_synced, True, wallet_nodes[0], full_node_api)
summaries_response = await client.get_wallets()
wallet_id: Optional[int] = None
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
wallet_id = summary["id"]
assert wallet_id is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value
assert status.target is None
async def status_is_farming_to_pool():
await self.farm_blocks(full_node_api, our_ph, 1)
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)
pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert pw_info.current.pool_url == "https://pool-a.org"
assert pw_info.current.relative_lock_height == 5
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
join_pool_tx: TransactionRecord = await client.pw_join_pool(
wallet_id,
pool_b_ph,
"https://pool-b.org",
10,
fee,
)
assert join_pool_tx is not None
async def status_is_leaving():
await self.farm_blocks(full_node_api, our_ph, 1)
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving)
pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)
pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert pw_info.current.pool_url == "https://pool-b.org"
assert pw_info.current.relative_lock_height == 10
assert len(await wallets[0].wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
finally:
client.close()
await client.await_closed()
await rpc_cleanup()
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted", [True, False])
@pytest.mark.parametrize("fee", [0, FEE_AMOUNT])
async def test_change_pools_reorg(self, setup, fee, trusted):
"""This tests Pool A -> escaping -> reorg -> escaping -> Pool B"""
full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup
our_ph = receive_address[0]
pool_a_ph = receive_address[1]
wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]
pool_b_ph = await wallets[1].get_new_puzzlehash()
full_node_api = full_nodes[0]
WAIT_SECS = 30
if trusted:
wallet_nodes[0].config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_nodes[0].config["trusted_peers"] = {}
await wallet_nodes[0].server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
try:
summaries_response = await client.get_wallets()
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
assert False
async def have_chia():
await self.farm_blocks(full_node_api, our_ph, 1)
return (await wallets[0].get_confirmed_balance()) > 0
await time_out_assert(timeout=WAIT_SECS, function=have_chia)
await time_out_assert(10, wallet_is_synced, True, wallet_nodes[0], full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
pool_a_ph, "https://pool-a.org", 5, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(5, wallet_is_synced, True, wallet_nodes[0], full_node_api)
summaries_response = await client.get_wallets()
wallet_id: Optional[int] = None
for summary in summaries_response:
if WalletType(int(summary["type"])) == WalletType.POOLING_WALLET:
wallet_id = summary["id"]
assert wallet_id is not None
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value
assert status.target is None
async def status_is_farming_to_pool():
await self.farm_blocks(full_node_api, our_ph, 1)
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)
pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert pw_info.current.pool_url == "https://pool-a.org"
assert pw_info.current.relative_lock_height == 5
join_pool_tx: TransactionRecord = await client.pw_join_pool(
wallet_id,
pool_b_ph,
"https://pool-b.org",
10,
fee,
)
assert join_pool_tx is not None
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
join_pool_tx.spend_bundle,
join_pool_tx.name,
)
await self.farm_blocks(full_node_api, our_ph, 1)
async def status_is_leaving_no_blocks():
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value
async def status_is_farming_to_pool_no_blocks():
pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value
await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving_no_blocks)
current_blocks = await full_node_api.get_all_full_blocks()
more_blocks = full_node_api.bt.get_consecutive_blocks(
3,
farmer_reward_puzzle_hash=pool_a_ph,
pool_reward_puzzle_hash=pool_b_ph,
block_list_input=current_blocks[:-1],
force_overflow=True,
guarantee_transaction_block=True,
seed=32 * b"4",
transaction_data=join_pool_tx.spend_bundle,
)
for block in more_blocks[-3:]:
await full_node_api.full_node.respond_block(RespondBlock(block))
await asyncio.sleep(5)
await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving_no_blocks)
# Eventually, leaves pool
await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)
finally:
client.close()
await client.await_closed()
await rpc_cleanup()
|
py
|
1a573d3d31067595d2e6b6fa0ecab979623263a8
|
from logging import getLogger
from typing import Optional
from state_manager.models.state_managers.base import back_to_pre_state_, BaseStateManager
from state_manager.models.state import StateData
from state_manager.types.aiogram import aiogram_context
from state_manager.types.generals import Data
logger = getLogger(__name__)
class AiogramStateManager(BaseStateManager):
context: aiogram_context
async def set_next_state(self, state_name: str, *, data: Data = None) -> None:
logger.debug(f"set_next_state, state_name={state_name}, data={data}")
state_data = StateData(current_state=state_name, data=data)
await self.storage.put(self.context.from_user.id, state_data)
async def back_to_pre_state(self, *, data: Data = None) -> None:
logger.debug(f"back_to_pre_state, data={data}")
await back_to_pre_state_(self.storage, self.context.from_user.id, data)
async def _get_state_data(self) -> Optional[StateData]:
logger.debug(f"get_storage")
return await self.storage.get(self.context.from_user.id)
class Config:
arbitrary_types_allowed = True
|
py
|
1a573e179f509bfa6d08960b14811ddd6f5af910
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2020-2021, Saarland University
# Copyright (C) 2020-2021, Maximilian Köhl <[email protected]>
# Copyright (C) 2020-2021, Michaela Klauck <[email protected]>
from __future__ import annotations
import dataclasses as d
import typing as t
import enum
from . import errors, expressions, operators, types
if t.TYPE_CHECKING:
from . import context
@d.dataclass(frozen=True)
class Aggregate(expressions.Expression):
"""
Applies an aggregation function over a set of states.
Attributes
----------
function:
The aggregation function to apply.
values:
The values to aggregate over.
predcate:
The predicate used to identify the states to aggregate over.
"""
function: operators.AggregationFunction
values: expressions.Expression
predicate: expressions.Expression
def infer_type(self, scope: context.Scope) -> types.Type:
predicate_type = self.predicate.infer_type(scope)
if not predicate_type == types.BOOL:
raise errors.InvalidTypeError(
f"expected types.BOOL but got {predicate_type}"
)
values_type = self.values.infer_type(scope)
if values_type not in self.function.allowed_values_type:
raise errors.InvalidTypeError(
f"invalid type {values_type} of values in filter function"
)
return self.function.infer_result_type(values_type)
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.predicate,)
class StatePredicate(enum.Enum):
"""
An enum of state predicates to be used with :class:`Aggregate`.
"""
INITIAL = "initial"
""" The state is an initial state. """
DEADLOCK = "deadlock"
""" The state is a deadlock state. """
TIMELOCK = "timelock"
""" The state is a timelock state. """
@d.dataclass(frozen=True)
class StateSelector(expressions.Expression):
"""
State selector expression using :class:`StatePredicate`.
Attributes
----------
predicate:
A :class:`StatePredicate`.
"""
predicate: StatePredicate
def infer_type(self, scope: context.Scope) -> types.Type:
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return ()
INITIAL_STATES = StateSelector(StatePredicate.INITIAL)
DEADLOCK_STATES = StateSelector(StatePredicate.DEADLOCK)
TIMELOCK_STATES = StateSelector(StatePredicate.TIMELOCK)
@d.dataclass(frozen=True)
class Probability(expressions.Expression):
"""
Probability property.
Attributes
----------
operator:
*Min* or *max* probability (:class:`~momba.model.operators.MinMax`).
formula:
Boolean expression to compute the probability for.
"""
operator: operators.MinMax
formula: expressions.Expression
def infer_type(self, scope: context.Scope) -> types.Type:
formula_type = self.formula.infer_type(scope)
if not formula_type == types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {formula_type}")
return types.REAL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.formula,)
@d.dataclass(frozen=True)
class PathQuantifier(expressions.Expression):
"""
A temporal path quantifier property.
Attributes
----------
quantifier:
The quantifier (:class:`~momba.model.operators.Quantifier`).
formula:
The inner formula.
"""
quantifier: operators.Quantifier
formula: expressions.Expression
def infer_type(self, scope: context.Scope) -> types.Type:
formula_type = self.formula.infer_type(scope)
if not formula_type == types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {formula_type}")
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.formula,)
class AccumulationInstant(enum.Enum):
"""
En enumeration of reward accumulation instants.
"""
STEPS = "steps"
""" Accumulate at each step. """
TIME = "time"
""" Accumulate with the progression of time. """
EXIT = "exit"
""" Accumulate after exiting a state. """
@d.dataclass(frozen=True)
class ExpectedReward(expressions.Expression):
"""
Expected reward property.
Attributes
----------
operator:
*Min* or *max* probability (:class:`~momba.model.operators.MinMax`).
reward:
Expression to compute the reward.
accumulate:
A set of accumulation instants.
reachability:
step_instant:
time_instant:
reward_instants:
"""
operator: operators.MinMax
reward: expressions.Expression
accumulate: t.Optional[t.FrozenSet[AccumulationInstant]] = None
reachability: t.Optional[expressions.Expression] = None
step_instant: t.Optional[expressions.Expression] = None
time_instant: t.Optional[expressions.Expression] = None
reward_instants: t.Optional[t.Sequence[RewardInstant]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
# TODO: check the types of the provided arguments
return types.REAL
@property
def children(self) -> t.Sequence[expressions.Expression]:
children: t.List[expressions.Expression] = []
if self.reachability is not None:
children.append(self.reachability)
if self.step_instant is not None:
children.append(self.step_instant)
if self.time_instant is not None:
children.append(self.time_instant)
if self.reward_instants is not None:
for reward_instant in self.reward_instants:
children.extend(reward_instant.children)
return children
@d.dataclass(frozen=True)
class RewardInstant:
"""
A reward instant.
Attributes
----------
expression:
accumulate:
instant:
"""
expression: expressions.Expression
accumulate: t.FrozenSet[AccumulationInstant]
instant: expressions.Expression
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.expression, self.instant)
@d.dataclass(frozen=True)
class SteadyState(expressions.Expression):
"""
A *steady-state* property.
Attributes
----------
operator:
formula:
accumulate:
"""
operator: operators.MinMax
formula: expressions.Expression
accumulate: t.Optional[t.FrozenSet[AccumulationInstant]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
# TODO: check the types of the provided arguments
return types.REAL
@property
def children(self) -> t.Sequence[expressions.Expression]:
return (self.formula,)
@d.dataclass(frozen=True)
class BinaryPathFormula(expressions.Expression):
"""
A temporal binary path formula.
Attributes
----------
operator:
left:
right:
step_bounds:
time_bounds:
reward_bounds:
"""
operator: operators.BinaryPathOperator
left: expressions.Expression
right: expressions.Expression
step_bounds: t.Optional[Interval] = None
time_bounds: t.Optional[Interval] = None
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
left_type = self.left.infer_type(scope)
if left_type != types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {left_type}")
right_type = self.left.infer_type(scope)
if right_type != types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {right_type}")
# TODO: check the types of the other arguments
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
children: t.List[expressions.Expression] = [self.left, self.right]
if self.step_bounds is not None:
children.extend(self.step_bounds.expressions)
if self.time_bounds is not None:
children.extend(self.time_bounds.expressions)
if self.reward_bounds is not None:
for reward_bound in self.reward_bounds:
children.extend(reward_bound.expressions)
return children
@d.dataclass(frozen=True)
class UnaryPathFormula(expressions.Expression):
"""
A temporal unary path formula.
Attributes
----------
operator:
formula:
step_bounds:
time_bounds:
reward_bounds:
"""
operator: operators.UnaryPathOperator
formula: expressions.Expression
step_bounds: t.Optional[Interval] = None
time_bounds: t.Optional[Interval] = None
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None
def infer_type(self, scope: context.Scope) -> types.Type:
formula_type = self.formula.infer_type(scope)
if formula_type != types.BOOL:
raise errors.InvalidTypeError(f"expected types.BOOL but got {formula_type}")
# TODO: check the types of the other arguments
return types.BOOL
@property
def children(self) -> t.Sequence[expressions.Expression]:
children: t.List[expressions.Expression] = [self.formula]
if self.step_bounds is not None:
children.extend(self.step_bounds.expressions)
if self.time_bounds is not None:
children.extend(self.time_bounds.expressions)
if self.reward_bounds is not None:
for reward_bound in self.reward_bounds:
children.extend(reward_bound.expressions)
return children
@d.dataclass(frozen=True)
class Interval:
"""
An interval.
Attributes
----------
lower:
The lower bound of the interval or :code:`None`.
upper:
The upper bound of the interval or :code:`None`.
lower_exclusive:
Whether the lower bound is exclusive.
upper_exclusive:
Whether the upper bound is exclusive.
"""
lower: t.Optional[expressions.Expression] = None
upper: t.Optional[expressions.Expression] = None
lower_exclusive: t.Optional[expressions.Expression] = None
upper_exclusive: t.Optional[expressions.Expression] = None
@property
def expressions(self) -> t.Sequence[expressions.Expression]:
return [
expr
for expr in [
self.lower,
self.upper,
self.lower_exclusive,
self.upper_exclusive,
]
if expr is not None
]
@d.dataclass(frozen=True)
class RewardBound:
"""
A *reward bound*.
Attributes
----------
expression:
accumulate:
bounds:
"""
expression: expressions.Expression
accumulate: t.FrozenSet[AccumulationInstant]
bounds: Interval
@property
def expressions(self) -> t.Sequence[expressions.Expression]:
expressions = [self.expression]
expressions.extend(self.bounds.expressions)
return expressions
def aggregate(
function: operators.AggregationFunction,
values: expressions.Expression,
states: expressions.Expression = INITIAL_STATES,
) -> expressions.Expression:
"""
Creates an :class:`Aggregate` property.
"""
return Aggregate(function, values, states)
def min_prob(formula: expressions.Expression) -> expressions.Expression:
"""
Constructs a :math:`P_\\mathit{min}` property.
"""
return Probability(operators.MinMax.MIN, formula)
def max_prob(formula: expressions.Expression) -> expressions.Expression:
"""
Constructs a :math:`P_\\mathit{max}` property.
"""
return Probability(operators.MinMax.MAX, formula)
def forall_paths(formula: expressions.Expression) -> expressions.Expression:
"""
CTL :math:`\\forall` path operator.
"""
return PathQuantifier(operators.Quantifier.FORALL, formula)
def exists_path(formula: expressions.Expression) -> expressions.Expression:
"""
CTL :math:`\\exists` exists operator.
"""
return PathQuantifier(operators.Quantifier.EXISTS, formula)
def min_expected_reward(
reward: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
reachability: t.Optional[expressions.Expression] = None,
step_instant: t.Optional[expressions.Expression] = None,
time_instant: t.Optional[expressions.Expression] = None,
reward_instants: t.Optional[t.Sequence[RewardInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`E_\\mathit{min}` property.
"""
return ExpectedReward(
operators.MinMax.MIN,
reward,
accumulate=None if accumulate is None else frozenset(accumulate),
reachability=reachability,
step_instant=step_instant,
time_instant=time_instant,
reward_instants=reward_instants,
)
def max_expected_reward(
reward: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
reachability: t.Optional[expressions.Expression] = None,
step_instant: t.Optional[expressions.Expression] = None,
time_instant: t.Optional[expressions.Expression] = None,
reward_instants: t.Optional[t.Sequence[RewardInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`E_\\mathit{max}` property.
"""
return ExpectedReward(
operators.MinMax.MAX,
reward,
accumulate=None if accumulate is None else frozenset(accumulate),
reachability=reachability,
step_instant=step_instant,
time_instant=time_instant,
reward_instants=reward_instants,
)
def min_steady_state(
formula: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`S_\\mathit{min}` property.
"""
return SteadyState(
operators.MinMax.MIN,
formula,
accumulate=None if accumulate is None else frozenset(accumulate),
)
def max_steady_state(
formula: expressions.Expression,
*,
accumulate: t.Optional[t.AbstractSet[AccumulationInstant]] = None,
) -> expressions.Expression:
"""
Constructs a :math:`S_\\mathit{max}` property.
"""
return SteadyState(
operators.MinMax.MAX,
formula,
accumulate=None if accumulate is None else frozenset(accumulate),
)
def until(
left: expressions.Expression,
right: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *until* property.
"""
return BinaryPathFormula(
operators.BinaryPathOperator.UNTIL,
left,
right,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def weak_until(
left: expressions.Expression,
right: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *weak-until* property.
"""
return BinaryPathFormula(
operators.BinaryPathOperator.WEAK_UNTIL,
left,
right,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def release(
left: expressions.Expression,
right: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *release* property.
"""
return BinaryPathFormula(
operators.BinaryPathOperator.RELEASE,
left,
right,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def eventually(
formula: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *evenutally* property.
"""
return UnaryPathFormula(
operators.UnaryPathOperator.EVENTUALLY,
formula,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
def globally(
formula: expressions.Expression,
*,
step_bounds: t.Optional[Interval] = None,
time_bounds: t.Optional[Interval] = None,
reward_bounds: t.Optional[t.Sequence[RewardBound]] = None,
) -> expressions.Expression:
"""
Constructs a temporal *globally* property.
"""
return UnaryPathFormula(
operators.UnaryPathOperator.GLOBALLY,
formula,
step_bounds=step_bounds,
time_bounds=time_bounds,
reward_bounds=reward_bounds,
)
|
py
|
1a573e4c80d9d367ce51adc411ee49babe9b9966
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestSpatialPyramidPooling2D(unittest.TestCase):
pyramid_height = 3
output_dim = 63 # channels(c=3) * (1 + 4 + 16) = 63
n, c, h, w = 2, 3, 9, 8
pooling_class = functions.MaxPooling2D
def setUp(self):
# Avoid unstability of numerical gradient
self.x = numpy.random.randn(
self.n, self.c, self.h, self.w).astype(numpy.float32)
self.one = numpy.ones(
(self.n, self.c, self.h, self.w)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (self.n, self.output_dim, 1, 1))
self.gy = self.gy.astype(numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, self.pooling_class,
use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def check_forward_ones(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn)
y_data = cuda.to_cpu(y.data)
self.assertEqual((self.n, self.output_dim, 1, 1), y_data.shape)
gradient_check.assert_allclose(y_data, numpy.ones_like(y_data))
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
self.check_forward_ones(self.one)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
self.check_forward_ones(cuda.to_gpu(self.one))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), False)
self.check_forward_ones(cuda.to_gpu(self.one), False)
def check_backward(self, x_data, y_grad, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(
cuda.to_cpu(gx),
cuda.to_cpu(x.grad),
atol=1e-04)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
testing.run_module(__name__, __file__)
|
py
|
1a573fddc641052e28e24cbf1dd4ff3aef4946ab
|
from conans import ConanFile, tools, CMake
import os
class SDL2ImageConan(ConanFile):
name = "sdl2_image"
description = "SDL_image is an image file loading library"
topics = ("sdl2_image", "sdl_image", "sdl2", "sdl", "images", "opengl")
url = "https://github.com/bincrafters/community"
homepage = "https://www.libsdl.org/projects/SDL_image/"
license = "MIT"
exports_sources = ["CMakeLists.txt"]
generators = ["cmake", "cmake_find_package_multi"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"bmp": [True, False],
"gif": [True, False],
"lbm": [True, False],
"pcx": [True, False],
"pnm": [True, False],
"svg": [True, False],
"tga": [True, False],
"xcf": [True, False],
"xpm": [True, False],
"xv": [True, False],
"jpg": [True, False],
"tif": [True, False],
"png": [True, False],
"webp": [True, False],
"imageio": [True, False]}
default_options = {
"shared": False,
"fPIC": True,
"bmp": True,
"gif": True,
"lbm": True,
"pcx": True,
"pnm": True,
"svg": True,
"tga": True,
"xcf": True,
"xpm": True,
"xv": True,
"jpg": True,
"tif": True,
"png": True,
"webp": True,
"imageio": False
}
_cmake = None
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def config_options(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Macos":
del self.options.imageio
def requirements(self):
self.requires("sdl2/2.0.14@bincrafters/stable")
if self.options.tif:
self.requires("libtiff/4.0.9")
if self.options.jpg:
self.requires("libjpeg/9d")
if self.options.png:
self.requires("libpng/1.6.37")
if self.options.webp:
self.requires("libwebp/1.0.3")
self.requires("zlib/1.2.11")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "SDL2_image-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BMP"] = self.options.bmp
self._cmake.definitions["GIF"] = self.options.gif
self._cmake.definitions["IMAGEIO"] = self.options.get_safe("imageio")
self._cmake.definitions["JPG"] = self.options.jpg
self._cmake.definitions["LBM"] = self.options.lbm
self._cmake.definitions["PCX"] = self.options.pcx
self._cmake.definitions["PNG"] = self.options.png
self._cmake.definitions["PNM"] = self.options.pnm
self._cmake.definitions["SVG"] = self.options.svg
self._cmake.definitions["TGA"] = self.options.tga
self._cmake.definitions["TIF"] = self.options.tif
self._cmake.definitions["WEBP"] = self.options.webp
self._cmake.definitions["XCF"] = self.options.xcf
self._cmake.definitions["XPM"] = self.options.xpm
self._cmake.definitions["XV"] = self.options.xv
# TODO: https://github.com/bincrafters/community/pull/1317#pullrequestreview-584847138
self._cmake.definitions["TIF_DYNAMIC"] = self.options["libtiff"].shared if self.options.tif else False
self._cmake.definitions["JPG_DYNAMIC"] = self.options["libjpeg"].shared if self.options.jpg else False
self._cmake.definitions["PNG_DYNAMIC"] = self.options["libpng"].shared if self.options.png else False
self._cmake.definitions["WEBP_DYNAMIC"] = self.options["libwebp"].shared if self.options.webp else False
self._cmake.definitions["SDL_IS_SHARED"] = self.options["sdl2"].shared
self._cmake.configure(build_dir="build")
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.txt", dst="license", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["SDL2_image"]
self.cpp_info.includedirs.append(os.path.join("include", "SDL2"))
# TODO: Add components in a sane way. SDL2_image might be incorrect, as the current dev version uses SDL2::image
# The current dev version is the first version with official CMake support
self.cpp_info.names["cmake_find_package"] = "SDL2_image"
self.cpp_info.names["cmake_find_package_multi"] = "SDL2_image"
|
py
|
1a57405f86d1a32b629c55e5a06c5674699bd938
|
"""LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from traitlets import Unicode, default
from traitlets.config import Config
from nbconvert.filters.highlight import Highlight2Latex
from nbconvert.filters.filter_links import resolve_references
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom tranformers/filters. Inherit from it if
you are writing your own HTML template and need custom tranformers/filters.
If you don't need custom tranformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
export_from_notebook = "latex"
@default('file_extension')
def _file_extension_default(self):
return '.tex'
@default('template_file')
def _template_file_default(self):
return 'article.tplx'
# Latex constants
@default('default_template_path')
def _default_template_path_default(self):
return os.path.join("..", "templates", "latex")
@default('template_skeleton_path')
def _template_skeleton_path_default(self):
return os.path.join("..", "templates", "latex", "skeleton")
#Extension that the template files use.
template_extension = Unicode(".tplx").tag(config=True)
output_mimetype = 'text/latex'
def default_filters(self):
for x in super(LatexExporter, self).default_filters():
yield x
yield ('resolve_references', resolve_references)
@property
def default_config(self):
c = Config({
'NbConvertBase': {
'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
},
'ExtractOutputPreprocessor': {
'enabled':True
},
'SVG2PDFPreprocessor': {
'enabled':True
},
'LatexPreprocessor': {
'enabled':True
},
'SphinxPreprocessor': {
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(LatexExporter,self).default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
self.register_filter('highlight_code',
Highlight2Latex(pygments_lexer=lexer, parent=self))
return super(LatexExporter, self).from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super(LatexExporter, self)._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
py
|
1a5740a686ff8ee83d6cd5e57c69e45ba1e368d5
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ApplicationList(ListResource):
""" """
def __init__(self, version, account_sid):
"""
Initialize the ApplicationList
:param Version version: Version that contains the resource
:param account_sid: A string that uniquely identifies this resource
:returns: twilio.rest.api.v2010.account.application.ApplicationList
:rtype: twilio.rest.api.v2010.account.application.ApplicationList
"""
super(ApplicationList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
}
self._uri = '/Accounts/{account_sid}/Applications.json'.format(**self._solution)
def create(self, friendly_name, api_version=values.unset,
voice_url=values.unset, voice_method=values.unset,
voice_fallback_url=values.unset, voice_fallback_method=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
voice_caller_id_lookup=values.unset, sms_url=values.unset,
sms_method=values.unset, sms_fallback_url=values.unset,
sms_fallback_method=values.unset, sms_status_callback=values.unset,
message_status_callback=values.unset):
"""
Create a new ApplicationInstance
:param unicode friendly_name: The friendly_name
:param unicode api_version: The API version to use
:param unicode voice_url: URL Twilio will make requests to when relieving a call
:param unicode voice_method: HTTP method to use with the URL
:param unicode voice_fallback_url: Fallback URL
:param unicode voice_fallback_method: HTTP method to use with the fallback url
:param unicode status_callback: URL to hit with status updates
:param unicode status_callback_method: HTTP method to use with the status callback
:param bool voice_caller_id_lookup: True or False
:param unicode sms_url: URL Twilio will request when receiving an SMS
:param unicode sms_method: HTTP method to use with sms_url
:param unicode sms_fallback_url: Fallback URL if there's an error parsing TwiML
:param unicode sms_fallback_method: HTTP method to use with sms_fallback_method
:param unicode sms_status_callback: URL Twilio with request with status updates
:param unicode message_status_callback: URL to make requests to with status updates
:returns: Newly created ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'ApiVersion': api_version,
'VoiceUrl': voice_url,
'VoiceMethod': voice_method,
'VoiceFallbackUrl': voice_fallback_url,
'VoiceFallbackMethod': voice_fallback_method,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'VoiceCallerIdLookup': voice_caller_id_lookup,
'SmsUrl': sms_url,
'SmsMethod': sms_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsFallbackMethod': sms_fallback_method,
'SmsStatusCallback': sms_status_callback,
'MessageStatusCallback': message_status_callback,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ApplicationInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
)
def stream(self, friendly_name=values.unset, limit=None, page_size=None):
"""
Streams ApplicationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode friendly_name: Filter by friendly name
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.application.ApplicationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
friendly_name=friendly_name,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, friendly_name=values.unset, limit=None, page_size=None):
"""
Lists ApplicationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode friendly_name: Filter by friendly name
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.application.ApplicationInstance]
"""
return list(self.stream(
friendly_name=friendly_name,
limit=limit,
page_size=page_size,
))
def page(self, friendly_name=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of ApplicationInstance records from the API.
Request is executed immediately
:param unicode friendly_name: Filter by friendly name
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationPage
"""
params = values.of({
'FriendlyName': friendly_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ApplicationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ApplicationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ApplicationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ApplicationContext
:param sid: Fetch by unique Application Sid
:returns: twilio.rest.api.v2010.account.application.ApplicationContext
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
return ApplicationContext(
self._version,
account_sid=self._solution['account_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ApplicationContext
:param sid: Fetch by unique Application Sid
:returns: twilio.rest.api.v2010.account.application.ApplicationContext
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
return ApplicationContext(
self._version,
account_sid=self._solution['account_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ApplicationList>'
class ApplicationPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the ApplicationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A string that uniquely identifies this resource
:returns: twilio.rest.api.v2010.account.application.ApplicationPage
:rtype: twilio.rest.api.v2010.account.application.ApplicationPage
"""
super(ApplicationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ApplicationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.application.ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
return ApplicationInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ApplicationPage>'
class ApplicationContext(InstanceContext):
""" """
def __init__(self, version, account_sid, sid):
"""
Initialize the ApplicationContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param sid: Fetch by unique Application Sid
:returns: twilio.rest.api.v2010.account.application.ApplicationContext
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
super(ApplicationContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/Applications/{sid}.json'.format(**self._solution)
def delete(self):
"""
Deletes the ApplicationInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def fetch(self):
"""
Fetch a ApplicationInstance
:returns: Fetched ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ApplicationInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def update(self, friendly_name=values.unset, api_version=values.unset,
voice_url=values.unset, voice_method=values.unset,
voice_fallback_url=values.unset, voice_fallback_method=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
voice_caller_id_lookup=values.unset, sms_url=values.unset,
sms_method=values.unset, sms_fallback_url=values.unset,
sms_fallback_method=values.unset, sms_status_callback=values.unset,
message_status_callback=values.unset):
"""
Update the ApplicationInstance
:param unicode friendly_name: Human readable description of this resource
:param unicode api_version: The API version to use
:param unicode voice_url: URL Twilio will make requests to when relieving a call
:param unicode voice_method: HTTP method to use with the URL
:param unicode voice_fallback_url: Fallback URL
:param unicode voice_fallback_method: HTTP method to use with the fallback url
:param unicode status_callback: URL to hit with status updates
:param unicode status_callback_method: HTTP method to use with the status callback
:param bool voice_caller_id_lookup: True or False
:param unicode sms_url: URL Twilio will request when receiving an SMS
:param unicode sms_method: HTTP method to use with sms_url
:param unicode sms_fallback_url: Fallback URL if there's an error parsing TwiML
:param unicode sms_fallback_method: HTTP method to use with sms_fallback_method
:param unicode sms_status_callback: URL Twilio with request with status updates
:param unicode message_status_callback: URL to make requests to with status updates
:returns: Updated ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'ApiVersion': api_version,
'VoiceUrl': voice_url,
'VoiceMethod': voice_method,
'VoiceFallbackUrl': voice_fallback_url,
'VoiceFallbackMethod': voice_fallback_method,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'VoiceCallerIdLookup': voice_caller_id_lookup,
'SmsUrl': sms_url,
'SmsMethod': sms_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsFallbackMethod': sms_fallback_method,
'SmsStatusCallback': sms_status_callback,
'MessageStatusCallback': message_status_callback,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ApplicationInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.ApplicationContext {}>'.format(context)
class ApplicationInstance(InstanceResource):
""" """
def __init__(self, version, payload, account_sid, sid=None):
"""
Initialize the ApplicationInstance
:returns: twilio.rest.api.v2010.account.application.ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
super(ApplicationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'friendly_name': payload['friendly_name'],
'message_status_callback': payload['message_status_callback'],
'sid': payload['sid'],
'sms_fallback_method': payload['sms_fallback_method'],
'sms_fallback_url': payload['sms_fallback_url'],
'sms_method': payload['sms_method'],
'sms_status_callback': payload['sms_status_callback'],
'sms_url': payload['sms_url'],
'status_callback': payload['status_callback'],
'status_callback_method': payload['status_callback_method'],
'uri': payload['uri'],
'voice_caller_id_lookup': payload['voice_caller_id_lookup'],
'voice_fallback_method': payload['voice_fallback_method'],
'voice_fallback_url': payload['voice_fallback_url'],
'voice_method': payload['voice_method'],
'voice_url': payload['voice_url'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ApplicationContext for this ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
if self._context is None:
self._context = ApplicationContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: A string that uniquely identifies this resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version to use
:rtype: unicode
"""
return self._properties['api_version']
@property
def date_created(self):
"""
:returns: Date this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: Date this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: Human readable description of this resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def message_status_callback(self):
"""
:returns: URL to make requests to with status updates
:rtype: unicode
"""
return self._properties['message_status_callback']
@property
def sid(self):
"""
:returns: A string that uniquely identifies this resource
:rtype: unicode
"""
return self._properties['sid']
@property
def sms_fallback_method(self):
"""
:returns: HTTP method to use with sms_fallback_method
:rtype: unicode
"""
return self._properties['sms_fallback_method']
@property
def sms_fallback_url(self):
"""
:returns: Fallback URL if there's an error parsing TwiML
:rtype: unicode
"""
return self._properties['sms_fallback_url']
@property
def sms_method(self):
"""
:returns: HTTP method to use with sms_url
:rtype: unicode
"""
return self._properties['sms_method']
@property
def sms_status_callback(self):
"""
:returns: URL Twilio with request with status updates
:rtype: unicode
"""
return self._properties['sms_status_callback']
@property
def sms_url(self):
"""
:returns: URL Twilio will request when receiving an SMS
:rtype: unicode
"""
return self._properties['sms_url']
@property
def status_callback(self):
"""
:returns: URL to hit with status updates
:rtype: unicode
"""
return self._properties['status_callback']
@property
def status_callback_method(self):
"""
:returns: HTTP method to use with the status callback
:rtype: unicode
"""
return self._properties['status_callback_method']
@property
def uri(self):
"""
:returns: URI for this resource
:rtype: unicode
"""
return self._properties['uri']
@property
def voice_caller_id_lookup(self):
"""
:returns: True or False
:rtype: bool
"""
return self._properties['voice_caller_id_lookup']
@property
def voice_fallback_method(self):
"""
:returns: HTTP method to use with the fallback url
:rtype: unicode
"""
return self._properties['voice_fallback_method']
@property
def voice_fallback_url(self):
"""
:returns: Fallback URL
:rtype: unicode
"""
return self._properties['voice_fallback_url']
@property
def voice_method(self):
"""
:returns: HTTP method to use with the URL
:rtype: unicode
"""
return self._properties['voice_method']
@property
def voice_url(self):
"""
:returns: URL Twilio will make requests to when relieving a call
:rtype: unicode
"""
return self._properties['voice_url']
def delete(self):
"""
Deletes the ApplicationInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def fetch(self):
"""
Fetch a ApplicationInstance
:returns: Fetched ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset, api_version=values.unset,
voice_url=values.unset, voice_method=values.unset,
voice_fallback_url=values.unset, voice_fallback_method=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
voice_caller_id_lookup=values.unset, sms_url=values.unset,
sms_method=values.unset, sms_fallback_url=values.unset,
sms_fallback_method=values.unset, sms_status_callback=values.unset,
message_status_callback=values.unset):
"""
Update the ApplicationInstance
:param unicode friendly_name: Human readable description of this resource
:param unicode api_version: The API version to use
:param unicode voice_url: URL Twilio will make requests to when relieving a call
:param unicode voice_method: HTTP method to use with the URL
:param unicode voice_fallback_url: Fallback URL
:param unicode voice_fallback_method: HTTP method to use with the fallback url
:param unicode status_callback: URL to hit with status updates
:param unicode status_callback_method: HTTP method to use with the status callback
:param bool voice_caller_id_lookup: True or False
:param unicode sms_url: URL Twilio will request when receiving an SMS
:param unicode sms_method: HTTP method to use with sms_url
:param unicode sms_fallback_url: Fallback URL if there's an error parsing TwiML
:param unicode sms_fallback_method: HTTP method to use with sms_fallback_method
:param unicode sms_status_callback: URL Twilio with request with status updates
:param unicode message_status_callback: URL to make requests to with status updates
:returns: Updated ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
api_version=api_version,
voice_url=voice_url,
voice_method=voice_method,
voice_fallback_url=voice_fallback_url,
voice_fallback_method=voice_fallback_method,
status_callback=status_callback,
status_callback_method=status_callback_method,
voice_caller_id_lookup=voice_caller_id_lookup,
sms_url=sms_url,
sms_method=sms_method,
sms_fallback_url=sms_fallback_url,
sms_fallback_method=sms_fallback_method,
sms_status_callback=sms_status_callback,
message_status_callback=message_status_callback,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.ApplicationInstance {}>'.format(context)
|
py
|
1a5740d044b4f49fb1e736ecc287e6c55d6271f3
|
"""
Solves problem 7 of the One Hundred Dollars, One Hundred Digits Challenge.
"""
import numpy as np
from pysparse.sparse import spmatrix
from pysparse.itsolvers.krylov import minres
from pysparse.precon import precon
def get_primes(nofPrimes):
primes = np.zeros(nofPrimes, 'i')
primes[0] = 2
nof = 1
i = 3
while 1:
for p in primes[:nof]:
if i%p == 0 or p*p > i: break
if i%p != 0:
primes[nof] = i
nof += 1
if nof >= nofPrimes:
break
i = i+2
return primes
n = 20000
print('Generating first %d primes...' % n)
primes = get_primes(n)
print('Assembling coefficient matrix...')
A = spmatrix.ll_mat_sym(n, n*8)
d = 1
while d < n:
for i in range(d, n):
A[i,i-d] = 1.0
d *= 2
for i in range(n):
A[i,i] = 1.0 * primes[i]
A = A.to_sss()
K = precon.ssor(A)
print('Solving linear system...')
b = np.zeros(n); b[0] = 1.0
x = np.empty(n)
info, iter, relres = minres(A, b, x, 1e-16, n, K)
print(info, iter, relres)
print('%.16e' % x[0])
|
py
|
1a5741061c09157b5cae23df1a2d62623e654c7d
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class GetTLSClientAuthSettingRequest(JDCloudRequest):
"""
TLS 客户端授权要求星盾使用客户端证书连接到您的源服务器(Enterprise Only)。
"""
def __init__(self, parameters, header=None, version="v1"):
super(GetTLSClientAuthSettingRequest, self).__init__(
'/zones/{zone_identifier}/settings$$tls_client_auth', 'GET', header, version)
self.parameters = parameters
class GetTLSClientAuthSettingParameters(object):
def __init__(self, zone_identifier, ):
"""
:param zone_identifier:
"""
self.zone_identifier = zone_identifier
|
py
|
1a57441c946f8236f5a8d6618fd404193ab15dd8
|
import numpy as np
import scipy.interpolate as si
import tools
def stdextr(data, x1, x2, variance=None, mask=None, interp=False):
"""
Standard box extraction of spectrum. Step 4 of Horne (1989).
Parameters:
-----------
data: 2D float ndarray
Sky-subtracted spectrum image of shape [nwavelength, nposition].
x1: Integer
Left X boundary of region to extract the spectrum.
x2: Integer
Right X boundary of region to extract the spectrum.
Note that: 0 <= x1 <= x2 <= nx
variance: 2D float ndarray
Variance image from processed image.
mask: 2D integer ndarray
Mask of the data image (1 = good pixel, 0 = bad pixel).
interp: Bool
If True, lineraly interpolate the data for bad pixels.
Returns:
--------
stdspec: 1D float ndarray
The extracted spectrum.
stdvar: 1D float ndarray
Variance of extracted spectrum.
Example:
--------
>>> import sys
>>> import astropy.io.fits as fits
>>> import matplotlib.pyplot as plt
>>> sys.path.append("./src/")
>>> import stdextr as se
>>> data = fits.getdata("./images/ex1.fits")
>>> spec, sstd = se.stdextr(data, 230, 270)
>>> plt.plot(spec)
"""
# Check inputs:
nwave, nx = np.shape(data)
if variance is None:
variance = np.ones((nwave, nx), np.double)
if mask is None:
mask = np.ones((nwave, nx), np.byte)
if x1 < 0 or x2 <= x1 or nx < x2:
tools.error("Invalid x1, x2 boundaries (={:d}, {:d}), the values must "
"satisfy:\n 0 <= x1 < x2 <= nx (={:d}).".format(x1, x2, nx))
if np.shape(variance) != (nwave, nx):
tools.error("Incompatible shapes between data image ({:d}, {:d}) and "
"variance image ({:d}, {:d}).".format(nwave, nx, *np.shape(variance)))
if np.shape(mask) != (nwave, nx):
tools.error("Incompatible shapes between data image ({:d}, {:d}) and "
"mask image ({:d}, {:d}).".format(nwave, nx, *np.shape(mask)))
# Interpolate over bad pixels:
if interp:
stdspec = np.zeros(nwave)
for i in np.arange(nwave):
bad = np.where(mask[i, x1:x2] == 0)
good = np.where(mask[i, x1:x2] == 1)
datav = np.copy(data[i, x1:x2])
if len(bad) != 0:
interpol = si.interp1d(datav[good], good[0], kind="linear")
datav[bad] = interpol(bad[0])
stdspec[i] = np.sum(datav)
return stdspec, np.zeros(nwave)
# Standard extraction:
stdspec = np.sum((data * mask)[:, x1:x2], axis=1)
stdvar = np.sum((variance * mask)[:, x1:x2], axis=1)
return stdspec, stdvar
|
py
|
1a5744c266b3c7bf24f8c63a8ea44be18138d64f
|
# Auto-generated at 2021-09-27T17:12:34.234998+08:00
# from: Justice Social Service (1.17.1)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class BulkStatItemOperationResult(Model):
"""Bulk stat item operation result
Properties:
success: (success) OPTIONAL bool
stat_code: (statCode) OPTIONAL str
details: (details) OPTIONAL Dict[str, Any]
"""
# region fields
success: bool # OPTIONAL
stat_code: str # OPTIONAL
details: Dict[str, Any] # OPTIONAL
# endregion fields
# region with_x methods
def with_success(self, value: bool) -> BulkStatItemOperationResult:
self.success = value
return self
def with_stat_code(self, value: str) -> BulkStatItemOperationResult:
self.stat_code = value
return self
def with_details(self, value: Dict[str, Any]) -> BulkStatItemOperationResult:
self.details = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "success") and self.success:
result["success"] = bool(self.success)
elif include_empty:
result["success"] = bool()
if hasattr(self, "stat_code") and self.stat_code:
result["statCode"] = str(self.stat_code)
elif include_empty:
result["statCode"] = str()
if hasattr(self, "details") and self.details:
result["details"] = {str(k0): v0 for k0, v0 in self.details.items()}
elif include_empty:
result["details"] = {}
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
success: Optional[bool] = None,
stat_code: Optional[str] = None,
details: Optional[Dict[str, Any]] = None,
) -> BulkStatItemOperationResult:
instance = cls()
if success is not None:
instance.success = success
if stat_code is not None:
instance.stat_code = stat_code
if details is not None:
instance.details = details
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> BulkStatItemOperationResult:
instance = cls()
if not dict_:
return instance
if "success" in dict_ and dict_["success"] is not None:
instance.success = bool(dict_["success"])
elif include_empty:
instance.success = bool()
if "statCode" in dict_ and dict_["statCode"] is not None:
instance.stat_code = str(dict_["statCode"])
elif include_empty:
instance.stat_code = str()
if "details" in dict_ and dict_["details"] is not None:
instance.details = {str(k0): v0 for k0, v0 in dict_["details"].items()}
elif include_empty:
instance.details = {}
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"success": "success",
"statCode": "stat_code",
"details": "details",
}
# endregion static methods
|
py
|
1a5745633264d23ce20a034e3f0d0e26e2552427
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcls import __version__
from mmcls.apis import init_random_seed, set_random_seed, train_model
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
from mmcls.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--device', help='device used for training. (Deprecated)')
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
if args.device:
warnings.warn(
'--device is deprecated. To use cpu to train, please '
'refers to https://mmclassification.readthedocs.io/en/latest/'
'getting_started.html#train-a-model')
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
model = build_classifier(cfg.model)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
# save mmcls version, config file content and class names in
# runner as meta data
meta.update(
dict(
mmcls_version=__version__,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES))
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
device='cpu' if args.device == 'cpu' else 'cuda',
meta=meta)
if __name__ == '__main__':
main()
|
py
|
1a57463d9868f479ca34e066f4cc0cbc7b755cf2
|
import numpy as np
import logging
from pyrfr import regression
from smac.epm.base_epm import AbstractEPM
from smac.configspace import (
CategoricalHyperparameter,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
Constant,
)
class RandomForestWithInstances(AbstractEPM):
"""
Base EPM. Very similar to SMAC3s EPM.
Interface to the random forest that takes instance features
into account.
Attributes
----------
rf_opts :
Random forest hyperparameter
n_points_per_tree : int
rf : regression.binary_rss_forest
Only available after training
hypers: list
List of random forest hyperparameters
seed : int
types : list
bounds : list
rng : np.random.RandomState
logger : logging.logger
"""
def __init__(self,
configspace,
types: np.ndarray,
bounds: np.ndarray,
seed: int,
num_trees: int = 10,
do_bootstrapping: bool = True,
n_points_per_tree: int = -1,
ratio_features: float = 5. / 6.,
min_samples_split: int = 3,
min_samples_leaf: int = 3,
max_depth: int = 20,
eps_purity: int = 1e-8,
max_num_nodes: int = 2 ** 20,
logged_y: bool = True,
**kwargs):
"""Constructor
Parameters
----------
configspace: ConfigurationSpace
configspace to be passed to random forest (used to impute inactive parameter-values)
types : np.ndarray (D)
Specifies the number of categorical values of an input dimension where
the i-th entry corresponds to the i-th input dimension. Let's say we
have 2 dimension where the first dimension consists of 3 different
categorical choices and the second dimension is continuous than we
have to pass np.array([2, 0]). Note that we count starting from 0.
bounds : np.ndarray (D, 2)
Specifies the bounds for continuous features.
seed : int
The seed that is passed to the random_forest_run library.
num_trees : int
The number of trees in the random forest.
do_bootstrapping : bool
Turns on / off bootstrapping in the random forest.
n_points_per_tree : int
Number of points per tree. If <= 0 X.shape[0] will be used
in _train(X, y) instead
ratio_features : float
The ratio of features that are considered for splitting.
min_samples_split : int
The minimum number of data points to perform a split.
min_samples_leaf : int
The minimum number of data points in a leaf.
max_depth : int
The maximum depth of a single tree.
eps_purity : float
The minimum difference between two target values to be considered
different
max_num_nodes : int
The maxmimum total number of nodes in a tree
logged_y: bool
Indicates if the y data is transformed (i.e. put on logscale) or not
"""
super().__init__(configspace=configspace, types=types, bounds=bounds, seed=seed, **kwargs)
self.configspace = configspace
self.types = types
self.bounds = bounds
self.rng = regression.default_random_engine(seed)
self.rf_opts = regression.forest_opts()
self.rf_opts.num_trees = num_trees
self.rf_opts.do_bootstrapping = do_bootstrapping
max_features = 0 if ratio_features > 1.0 else \
max(1, int(types.shape[0] * ratio_features))
self.rf_opts.tree_opts.max_features = max_features
self.rf_opts.tree_opts.min_samples_to_split = min_samples_split
self.rf_opts.tree_opts.min_samples_in_leaf = min_samples_leaf
self.rf_opts.tree_opts.max_depth = max_depth
self.rf_opts.tree_opts.epsilon_purity = eps_purity
self.rf_opts.tree_opts.max_num_nodes = max_num_nodes
self.rf_opts.compute_law_of_total_variance = False # Always off. No need for this in our base EPM
self.n_points_per_tree = n_points_per_tree
self.rf = None # type: regression.binary_rss_forest
self.logged_y = logged_y
# This list well be read out by save_iteration() in the solver
self.hypers = [num_trees, max_num_nodes, do_bootstrapping,
n_points_per_tree, ratio_features, min_samples_split,
min_samples_leaf, max_depth, eps_purity, seed]
self.seed = seed
self.impute_values = {}
self.logger = logging.getLogger(self.__module__ + "." +
self.__class__.__name__)
def _impute_inactive(self, X: np.ndarray) -> np.ndarray:
X = X.copy()
for idx, hp in enumerate(self.configspace.get_hyperparameters()):
if idx not in self.impute_values:
parents = self.configspace.get_parents_of(hp.name)
if len(parents) == 0:
self.impute_values[idx] = None
else:
if isinstance(hp, CategoricalHyperparameter):
self.impute_values[idx] = len(hp.choices)
elif isinstance(hp, (UniformFloatHyperparameter, UniformIntegerHyperparameter)):
self.impute_values[idx] = -1
elif isinstance(hp, Constant):
self.impute_values[idx] = 1
else:
raise ValueError
nonfinite_mask = ~np.isfinite(X[:, idx])
X[nonfinite_mask, idx] = self.impute_values[idx]
return X
def _train(self, X: np.ndarray, y: np.ndarray, **kwargs):
"""Trains the random forest on X and y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, ]
The corresponding target values.
Returns
-------
self
"""
self.X = self._impute_inactive(X)
self.y = y.flatten()
if self.n_points_per_tree <= 0:
self.rf_opts.num_data_points_per_tree = self.X.shape[0]
else:
self.rf_opts.num_data_points_per_tree = self.n_points_per_tree
self.rf = regression.binary_rss_forest()
self.rf.options = self.rf_opts
data = self.__init_data_container(self.X, self.y)
self.rf.fit(data, rng=self.rng)
return self
def __init_data_container(self, X: np.ndarray, y: np.ndarray):
"""
Biggest difference to SMAC3s EPM. We fit the forrest on a transformation and predict the untransformed result.
Fills a pyrfr default data container, s.t. the forest knows
categoricals and bounds for continous data
Parameters
----------
X : np.ndarray [n_samples, n_features]
Input data points
y : np.ndarray [n_samples, ]
Corresponding target values
Returns
-------
data : regression.default_data_container
The filled data container that pyrfr can interpret
"""
# retrieve the types and the bounds from the ConfigSpace
data = regression.default_data_container(X.shape[1])
if self.logged_y:
y = y.reshape((-1, 1))
y = np.hstack((y, np.power(10, y)))
for i, (mn, mx) in enumerate(self.bounds):
if np.isnan(mx):
data.set_type_of_feature(i, mn)
else:
data.set_bounds_of_feature(i, mn, mx)
for row_X, row_y in zip(X, y):
data.add_data_point(row_X, row_y)
return data
def _predict(self, X: np.ndarray, cov_return_type='diagonal_cov'):
"""Predict means and variances for given X.
Parameters
----------
X : np.ndarray of shape = [n_samples,
n_features (config + instance features)]
Returns
-------
means : np.ndarray of shape = [n_samples, 1]
Predictive mean
vars : np.ndarray of shape = [n_samples, 1]
Predictive variance
"""
if len(X.shape) != 2:
raise ValueError(
'Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != len(self._initial_types):
raise ValueError('Rows in X should have %d entries but have %d!' %
(len(self._initial_types), X.shape[1]))
if cov_return_type != 'diagonal_cov':
raise ValueError("'cov_return_type' can only take 'diagonal_cov' for this model")
means, vars_ = [], []
X = self._impute_inactive(X)
for row_X in X:
mean, var = self.rf.predict_mean_var(row_X)
means.append(mean)
vars_.append(var)
means = np.array(means)
vars_ = np.array(vars_)
return means.reshape((-1, 1)), vars_.reshape((-1, 1))
|
py
|
1a574809eb43e6080d8186d2ac28c935a2bc878e
|
import os
import sys
current_file = os.path.dirname(__file__)
path1 = os.path.abspath(os.path.join(current_file, '..'))
path2 = os.path.abspath(os.path.join(current_file, 'tests'))
sys.path.append(path1) # noqa
sys.path.append(path2) # noqa
|
py
|
1a57481b517eb5ee26ab64620d412fa57011eab5
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('training', '0003_training_published_mecc_url'),
]
operations = [
migrations.AddField(
model_name='training',
name='is_existing_rof',
field=models.BooleanField(verbose_name="Témoin d'existence dans ROF", default=True),
),
migrations.AddField(
model_name='training',
name='recup_atb_ens',
field=models.BooleanField(verbose_name='Témoin de récupération des responsables, coef. et notes seuil', default=False),
),
]
|
py
|
1a5748687899b86e374468a8f71ab5791dce7c76
|
"""
@Project: LibrarySystemGUIPy
@Author: loyio
@Date: 6/11/21
"""
|
py
|
1a5748f10621e8461c55ce99d060094e7658d52c
|
"""Computes the similarity of molecular scaffolds between two datasets."""
from itertools import product
import math
import os
import sys
from typing import List
from typing_extensions import Literal
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from tqdm import tqdm
from tap import Tap # pip install typed-argument-parser (https://github.com/swansonk14/typed-argument-parser)
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from chemprop.data import get_data, scaffold_to_smiles
class Args(Tap):
data_path_1: str # Path to first data CSV file
data_path_2: str # Path to second data CSV file
smiles_column_1: str = None # Name of the column containing SMILES strings for the first data. By default, uses the first column.
smiles_column_2: str = None # Name of the column containing SMILES strings for the second data. By default, uses the first column.
similarity_measure: Literal['scaffold', 'morgan'] # Similarity measure to use to compare the two datasets
radius: int = 3 # Radius of Morgan fingerprint
sample_rate: float = 1.0 # Rate at which to sample pairs of molecules for Morgan similarity (to reduce time)
def scaffold_similarity(smiles_1: List[str], smiles_2: List[str]):
"""
Determines the similarity between the scaffolds of two lists of smiles strings.
:param smiles_1: A list of smiles strings.
:param smiles_2: A list of smiles strings.
"""
# Get scaffolds
scaffold_to_smiles_1 = scaffold_to_smiles(smiles_1)
scaffold_to_smiles_2 = scaffold_to_smiles(smiles_2)
scaffolds_1, smiles_sets_1 = zip(*scaffold_to_smiles_1.items())
scaffolds_2, smiles_sets_2 = zip(*scaffold_to_smiles_2.items())
smiles_to_scaffold = {smiles: scaffold for scaffold, smiles_set in scaffold_to_smiles_1.items() for smiles in smiles_set}
smiles_to_scaffold.update({smiles: scaffold for scaffold, smiles_set in scaffold_to_smiles_2.items() for smiles in smiles_set})
# Determine similarity
scaffolds_1, scaffolds_2 = set(scaffolds_1), set(scaffolds_2)
smiles_1, smiles_2 = set(smiles_1), set(smiles_2)
all_scaffolds = scaffolds_1 | scaffolds_2
all_smiles = smiles_1 | smiles_2
scaffolds_intersection = scaffolds_1 & scaffolds_2
# smiles_intersection is smiles with a scaffold that appears in both datasets
smiles_intersection = {smiles for smiles in all_smiles if smiles_to_scaffold[smiles] in scaffolds_intersection}
smiles_in_1_with_scaffold_in_2 = {smiles for smiles in smiles_1 if smiles_to_scaffold[smiles] in scaffolds_2}
smiles_in_2_with_scaffold_in_1 = {smiles for smiles in smiles_2 if smiles_to_scaffold[smiles] in scaffolds_1}
sizes_1 = np.array([len(smiles_set) for smiles_set in smiles_sets_1])
sizes_2 = np.array([len(smiles_set) for smiles_set in smiles_sets_2])
# Print results
print()
print(f'Number of molecules = {len(all_smiles):,}')
print(f'Number of scaffolds = {len(all_scaffolds):,}')
print()
print(f'Number of scaffolds in both datasets = {len(scaffolds_intersection):,}')
print(f'Scaffold intersection over union = {len(scaffolds_intersection) / len(all_scaffolds):.4f}')
print()
print(f'Number of molecules with scaffold in both datasets = {len(smiles_intersection):,}')
print(f'Molecule intersection over union = {len(smiles_intersection) / len(all_smiles):.4f}')
print()
print(f'Number of molecules in dataset 1 = {np.sum(sizes_1):,}')
print(f'Number of scaffolds in dataset 1 = {len(scaffolds_1):,}')
print()
print(f'Number of molecules in dataset 2 = {np.sum(sizes_2):,}')
print(f'Number of scaffolds in dataset 2 = {len(scaffolds_2):,}')
print()
print(f'Percent of scaffolds in dataset 1 which are also in dataset 2 = {100 * len(scaffolds_intersection) / len(scaffolds_1):.2f}%')
print(f'Percent of scaffolds in dataset 2 which are also in dataset 1 = {100 * len(scaffolds_intersection) / len(scaffolds_2):.2f}%')
print()
print(f'Number of molecules in dataset 1 with scaffolds in dataset 2 = {len(smiles_in_1_with_scaffold_in_2):,}')
print(f'Percent of molecules in dataset 1 with scaffolds in dataset 2 = {100 * len(smiles_in_1_with_scaffold_in_2) / len(smiles_1):.2f}%')
print()
print(f'Number of molecules in dataset 2 with scaffolds in dataset 1 = {len(smiles_in_2_with_scaffold_in_1):,}')
print(f'Percent of molecules in dataset 2 with scaffolds in dataset 1 = {100 * len(smiles_in_2_with_scaffold_in_1) / len(smiles_2):.2f}%')
print()
print(f'Average number of molecules per scaffold in dataset 1 = {np.mean(sizes_1):.4f} +/- {np.std(sizes_1):.4f}')
print('Percentiles for molecules per scaffold in dataset 1')
print(' | '.join([f'{i}% = {int(np.percentile(sizes_1, i)):,}' for i in range(0, 101, 10)]))
print()
print(f'Average number of molecules per scaffold in dataset 2 = {np.mean(sizes_2):.4f} +/- {np.std(sizes_2):.4f}')
print('Percentiles for molecules per scaffold in dataset 2')
print(' | '.join([f'{i}% = {int(np.percentile(sizes_2, i)):,}' for i in range(0, 101, 10)]))
def morgan_similarity(smiles_1: List[str], smiles_2: List[str], radius: int, sample_rate: float):
"""
Determines the similarity between the morgan fingerprints of two lists of smiles strings.
:param smiles_1: A list of smiles strings.
:param smiles_2: A list of smiles strings.
:param radius: The radius of the morgan fingerprints.
:param sample_rate: Rate at which to sample pairs of molecules for Morgan similarity (to reduce time).
"""
# Compute similarities
similarities = []
num_pairs = len(smiles_1) * len(smiles_2)
# Sample to improve speed
if sample_rate < 1.0:
sample_num_pairs = sample_rate * num_pairs
sample_size = math.ceil(math.sqrt(sample_num_pairs))
sample_smiles_1 = np.random.choice(smiles_1, size=sample_size, replace=True)
sample_smiles_2 = np.random.choice(smiles_2, size=sample_size, replace=True)
else:
sample_smiles_1, sample_smiles_2 = smiles_1, smiles_2
sample_num_pairs = len(sample_smiles_1) * len(sample_smiles_2)
for smile_1, smile_2 in tqdm(product(sample_smiles_1, sample_smiles_2), total=sample_num_pairs):
mol_1, mol_2 = Chem.MolFromSmiles(smile_1), Chem.MolFromSmiles(smile_2)
fp_1, fp_2 = AllChem.GetMorganFingerprint(mol_1, radius), AllChem.GetMorganFingerprint(mol_2, radius)
similarity = DataStructs.TanimotoSimilarity(fp_1, fp_2)
similarities.append(similarity)
similarities = np.array(similarities)
# Print results
print()
print(f'Average dice similarity = {np.mean(similarities):.4f} +/- {np.std(similarities):.4f}')
print(f'Minimum dice similarity = {np.min(similarities):.4f}')
print(f'Maximum dice similarity = {np.max(similarities):.4f}')
print()
print('Percentiles for dice similarity')
print(' | '.join([f'{i}% = {np.percentile(similarities, i):.4f}' for i in range(0, 101, 10)]))
if __name__ == '__main__':
args = Args().parse_args()
data_1 = get_data(path=args.data_path_1, smiles_column=args.smiles_column_1)
data_2 = get_data(path=args.data_path_2, smiles_column=args.smiles_column_2)
if args.similarity_measure == 'scaffold':
scaffold_similarity(data_1.smiles(), data_2.smiles())
elif args.similarity_measure == 'morgan':
morgan_similarity(data_1.smiles(), data_2.smiles(), args.radius, args.sample_rate)
else:
raise ValueError(f'Similarity measure "{args.similarity_measure}" not supported.')
|
py
|
1a5749c940e200400646ea7c530adaf68f81ef23
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import os
import re
import requests
import socket
import urllib2
from collections import defaultdict, Counter, deque
from math import ceil
# project
from checks import AgentCheck
from config import _is_affirmative
from utils.dockerutil import DockerUtil, MountException
from utils.kubeutil import KubeUtil
from utils.platform import Platform
from utils.service_discovery.sd_backend import get_sd_backend
EVENT_TYPE = 'docker'
SERVICE_CHECK_NAME = 'docker.service_up'
SIZE_REFRESH_RATE = 5 # Collect container sizes every 5 iterations of the check
MAX_CGROUP_LISTING_RETRIES = 3
CONTAINER_ID_RE = re.compile('[0-9a-f]{64}')
GAUGE = AgentCheck.gauge
RATE = AgentCheck.rate
HISTORATE = AgentCheck.generate_historate_func(["container_name"])
HISTO = AgentCheck.generate_histogram_func(["container_name"])
FUNC_MAP = {
GAUGE: {True: HISTO, False: GAUGE},
RATE: {True: HISTORATE, False: RATE}
}
UNIT_MAP = {
'kb': 1000,
'mb': 1000000,
'gb': 1000000000,
'tb': 1000000000000
}
CGROUP_METRICS = [
{
"cgroup": "memory",
"file": "memory.stat",
"metrics": {
"cache": ("docker.mem.cache", GAUGE),
"rss": ("docker.mem.rss", GAUGE),
"swap": ("docker.mem.swap", GAUGE),
},
"to_compute": {
# We only get these metrics if they are properly set, i.e. they are a "reasonable" value
"docker.mem.limit": (["hierarchical_memory_limit"], lambda x: float(x) if float(x) < 2 ** 60 else None, GAUGE),
"docker.mem.sw_limit": (["hierarchical_memsw_limit"], lambda x: float(x) if float(x) < 2 ** 60 else None, GAUGE),
"docker.mem.in_use": (["rss", "hierarchical_memory_limit"], lambda x,y: float(x)/float(y) if float(y) < 2 ** 60 else None, GAUGE),
"docker.mem.sw_in_use": (["swap", "rss", "hierarchical_memsw_limit"], lambda x,y,z: float(x + y)/float(z) if float(z) < 2 ** 60 else None, GAUGE)
}
},
{
"cgroup": "cpuacct",
"file": "cpuacct.stat",
"metrics": {
"user": ("docker.cpu.user", RATE),
"system": ("docker.cpu.system", RATE),
},
},
{
"cgroup": "blkio",
"file": 'blkio.throttle.io_service_bytes',
"metrics": {
"io_read": ("docker.io.read_bytes", RATE),
"io_write": ("docker.io.write_bytes", RATE),
},
},
]
DEFAULT_CONTAINER_TAGS = [
"docker_image",
"image_name",
"image_tag",
]
DEFAULT_PERFORMANCE_TAGS = [
"container_name",
"docker_image",
"image_name",
"image_tag",
]
DEFAULT_IMAGE_TAGS = [
'image_name',
'image_tag'
]
TAG_EXTRACTORS = {
"docker_image": lambda c: [c["Image"]],
"image_name": lambda c: DockerUtil.image_tag_extractor(c, 0),
"image_tag": lambda c: DockerUtil.image_tag_extractor(c, 1),
"container_command": lambda c: [c["Command"]],
"container_name": DockerUtil.container_name_extractor,
"container_id": lambda c: [c["Id"]],
}
CONTAINER = "container"
PERFORMANCE = "performance"
FILTERED = "filtered"
IMAGE = "image"
def get_filters(include, exclude):
# The reasoning is to check exclude first, so we can skip if there is no exclude
if not exclude:
return
filtered_tag_names = []
exclude_patterns = []
include_patterns = []
# Compile regex
for rule in exclude:
exclude_patterns.append(re.compile(rule))
filtered_tag_names.append(rule.split(':')[0])
for rule in include:
include_patterns.append(re.compile(rule))
filtered_tag_names.append(rule.split(':')[0])
return set(exclude_patterns), set(include_patterns), set(filtered_tag_names)
class DockerDaemon(AgentCheck):
"""Collect metrics and events from Docker API and cgroups."""
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception("Docker check only supports one configured instance.")
AgentCheck.__init__(self, name, init_config,
agentConfig, instances=instances)
self.init_success = False
self.init()
self._service_discovery = agentConfig.get('service_discovery') and \
agentConfig.get('service_discovery_backend') == 'docker'
def is_k8s(self):
return 'KUBERNETES_PORT' in os.environ
def init(self):
try:
instance = self.instances[0]
# We configure the check with the right cgroup settings for this host
# Just needs to be done once
self.docker_util = DockerUtil()
self.docker_client = self.docker_util.client
if self.is_k8s():
self.kubeutil = KubeUtil()
self._mountpoints = self.docker_util.get_mountpoints(CGROUP_METRICS)
self.cgroup_listing_retries = 0
self._latest_size_query = 0
self._filtered_containers = set()
self._disable_net_metrics = False
# Set tagging options
self.custom_tags = instance.get("tags", [])
self.collect_labels_as_tags = instance.get("collect_labels_as_tags", [])
self.kube_labels = {}
self.use_histogram = _is_affirmative(instance.get('use_histogram', False))
performance_tags = instance.get("performance_tags", DEFAULT_PERFORMANCE_TAGS)
self.tag_names = {
CONTAINER: instance.get("container_tags", DEFAULT_CONTAINER_TAGS),
PERFORMANCE: performance_tags,
IMAGE: instance.get('image_tags', DEFAULT_IMAGE_TAGS)
}
# Set filtering settings
if not instance.get("exclude"):
self._filtering_enabled = False
if instance.get("include"):
self.log.warning("You must specify an exclude section to enable filtering")
else:
self._filtering_enabled = True
include = instance.get("include", [])
exclude = instance.get("exclude", [])
self._exclude_patterns, self._include_patterns, _filtered_tag_names = get_filters(include, exclude)
self.tag_names[FILTERED] = _filtered_tag_names
# Other options
self.collect_image_stats = _is_affirmative(instance.get('collect_images_stats', False))
self.collect_container_size = _is_affirmative(instance.get('collect_container_size', False))
self.collect_events = _is_affirmative(instance.get('collect_events', True))
self.collect_image_size = _is_affirmative(instance.get('collect_image_size', False))
self.collect_disk_stats = _is_affirmative(instance.get('collect_disk_stats', False))
self.collect_ecs_tags = _is_affirmative(instance.get('ecs_tags', True)) and Platform.is_ecs_instance()
self.ecs_tags = {}
except Exception, e:
self.log.critical(e)
self.warning("Initialization failed. Will retry at next iteration")
else:
self.init_success = True
def check(self, instance):
"""Run the Docker check for one instance."""
if not self.init_success:
# Initialization can fail if cgroups are not ready. So we retry if needed
# https://github.com/DataDog/dd-agent/issues/1896
self.init()
if not self.init_success:
# Initialization failed, will try later
return
# Report image metrics
if self.collect_image_stats:
self._count_and_weigh_images()
if self.collect_ecs_tags:
self.refresh_ecs_tags()
if self.is_k8s():
try:
self.kube_labels = self.kubeutil.get_kube_labels()
except Exception as e:
self.log.warning('Could not retrieve kubernetes labels: %s' % str(e))
self.kube_labels = {}
# Get the list of containers and the index of their names
containers_by_id = self._get_and_count_containers()
containers_by_id = self._crawl_container_pids(containers_by_id)
# Send events from Docker API
if self.collect_events or self._service_discovery:
self._process_events(containers_by_id)
# Report performance container metrics (cpu, mem, net, io)
self._report_performance_metrics(containers_by_id)
if self.collect_container_size:
self._report_container_size(containers_by_id)
# Collect disk stats from Docker info command
if self.collect_disk_stats:
self._report_disk_stats()
def _count_and_weigh_images(self):
try:
tags = self._get_tags()
active_images = self.docker_client.images(all=False)
active_images_len = len(active_images)
all_images_len = len(self.docker_client.images(quiet=True, all=True))
self.gauge("docker.images.available", active_images_len, tags=tags)
self.gauge("docker.images.intermediate", (all_images_len - active_images_len), tags=tags)
if self.collect_image_size:
self._report_image_size(active_images)
except Exception, e:
# It's not an important metric, keep going if it fails
self.warning("Failed to count Docker images. Exception: {0}".format(e))
def _get_and_count_containers(self):
"""List all the containers from the API, filter and count them."""
# Querying the size of containers is slow, we don't do it at each run
must_query_size = self.collect_container_size and self._latest_size_query == 0
self._latest_size_query = (self._latest_size_query + 1) % SIZE_REFRESH_RATE
running_containers_count = Counter()
all_containers_count = Counter()
try:
containers = self.docker_client.containers(all=True, size=must_query_size)
except Exception, e:
message = "Unable to list Docker containers: {0}".format(e)
self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message=message)
raise Exception(message)
else:
self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK)
# Filter containers according to the exclude/include rules
self._filter_containers(containers)
containers_by_id = {}
for container in containers:
container_name = DockerUtil.container_name_extractor(container)[0]
container_status_tags = self._get_tags(container, CONTAINER)
all_containers_count[tuple(sorted(container_status_tags))] += 1
if self._is_container_running(container):
running_containers_count[tuple(sorted(container_status_tags))] += 1
# Check if the container is included/excluded via its tags
if self._is_container_excluded(container):
self.log.debug("Container {0} is excluded".format(container_name))
continue
containers_by_id[container['Id']] = container
for tags, count in running_containers_count.iteritems():
self.gauge("docker.containers.running", count, tags=list(tags))
for tags, count in all_containers_count.iteritems():
stopped_count = count - running_containers_count[tags]
self.gauge("docker.containers.stopped", stopped_count, tags=list(tags))
return containers_by_id
def _is_container_running(self, container):
"""Tell if a container is running, according to its status.
There is no "nice" API field to figure it out. We just look at the "Status" field, knowing how it is generated.
See: https://github.com/docker/docker/blob/v1.6.2/daemon/state.go#L35
"""
return container["Status"].startswith("Up") or container["Status"].startswith("Restarting")
def _get_tags(self, entity=None, tag_type=None):
"""Generate the tags for a given entity (container or image) according to a list of tag names."""
# Start with custom tags
tags = list(self.custom_tags)
# Collect pod names as tags on kubernetes
if self.is_k8s() and KubeUtil.POD_NAME_LABEL not in self.collect_labels_as_tags:
self.collect_labels_as_tags.append(KubeUtil.POD_NAME_LABEL)
if entity is not None:
pod_name = None
# Get labels as tags
labels = entity.get("Labels")
if labels is not None:
for k in self.collect_labels_as_tags:
if k in labels:
v = labels[k]
if k == KubeUtil.POD_NAME_LABEL and self.is_k8s():
pod_name = v
k = "pod_name"
if "-" in pod_name:
replication_controller = "-".join(pod_name.split("-")[:-1])
if "/" in replication_controller: # k8s <= 1.1
namespace, replication_controller = replication_controller.split("/", 1)
elif KubeUtil.NAMESPACE_LABEL in labels: # k8s >= 1.2
namespace = labels[KubeUtil.NAMESPACE_LABEL]
pod_name = "{0}/{1}".format(namespace, pod_name)
tags.append("kube_namespace:%s" % namespace)
tags.append("kube_replication_controller:%s" % replication_controller)
tags.append("pod_name:%s" % pod_name)
elif not v:
tags.append(k)
else:
tags.append("%s:%s" % (k,v))
if k == KubeUtil.POD_NAME_LABEL and self.is_k8s() and k not in labels:
tags.append("pod_name:no_pod")
# Get entity specific tags
if tag_type is not None:
tag_names = self.tag_names[tag_type]
for tag_name in tag_names:
tag_value = self._extract_tag_value(entity, tag_name)
if tag_value is not None:
for t in tag_value:
tags.append('%s:%s' % (tag_name, str(t).strip()))
# Add ECS tags
if self.collect_ecs_tags:
entity_id = entity.get("Id")
if entity_id in self.ecs_tags:
ecs_tags = self.ecs_tags[entity_id]
tags.extend(ecs_tags)
# Add kube labels
if self.is_k8s():
kube_tags = self.kube_labels.get(pod_name)
if kube_tags:
tags.extend(list(kube_tags))
return tags
def _extract_tag_value(self, entity, tag_name):
"""Extra tag information from the API result (containers or images).
Cache extracted tags inside the entity object.
"""
if tag_name not in TAG_EXTRACTORS:
self.warning("{0} isn't a supported tag".format(tag_name))
return
# Check for already extracted tags
if "_tag_values" not in entity:
entity["_tag_values"] = {}
if tag_name not in entity["_tag_values"]:
entity["_tag_values"][tag_name] = TAG_EXTRACTORS[tag_name](entity)
return entity["_tag_values"][tag_name]
def refresh_ecs_tags(self):
ecs_config = self.docker_client.inspect_container('ecs-agent')
ip = ecs_config.get('NetworkSettings', {}).get('IPAddress')
ports = ecs_config.get('NetworkSettings', {}).get('Ports')
port = ports.keys()[0].split('/')[0] if ports else None
ecs_tags = {}
if ip and port:
tasks = requests.get('http://%s:%s/v1/tasks' % (ip, port)).json()
for task in tasks.get('Tasks', []):
for container in task.get('Containers', []):
tags = ['task_name:%s' % task['Family'], 'task_version:%s' % task['Version']]
ecs_tags[container['DockerId']] = tags
self.ecs_tags = ecs_tags
def _filter_containers(self, containers):
if not self._filtering_enabled:
return
self._filtered_containers = set()
for container in containers:
container_tags = self._get_tags(container, FILTERED)
if self._are_tags_filtered(container_tags):
container_name = DockerUtil.container_name_extractor(container)[0]
self._filtered_containers.add(container_name)
self.log.debug("Container {0} is filtered".format(container_name))
def _are_tags_filtered(self, tags):
if self._tags_match_patterns(tags, self._exclude_patterns):
if self._tags_match_patterns(tags, self._include_patterns):
return False
return True
return False
def _tags_match_patterns(self, tags, filters):
for rule in filters:
for tag in tags:
if re.match(rule, tag):
return True
return False
def _is_container_excluded(self, container):
"""Check if a container is excluded according to the filter rules.
Requires _filter_containers to run first.
"""
container_name = DockerUtil.container_name_extractor(container)[0]
return container_name in self._filtered_containers
def _report_container_size(self, containers_by_id):
for container in containers_by_id.itervalues():
if self._is_container_excluded(container):
continue
tags = self._get_tags(container, PERFORMANCE)
m_func = FUNC_MAP[GAUGE][self.use_histogram]
if "SizeRw" in container:
m_func(self, 'docker.container.size_rw', container['SizeRw'],
tags=tags)
if "SizeRootFs" in container:
m_func(
self, 'docker.container.size_rootfs', container['SizeRootFs'],
tags=tags)
def _report_image_size(self, images):
for image in images:
tags = self._get_tags(image, IMAGE)
if 'VirtualSize' in image:
self.gauge('docker.image.virtual_size', image['VirtualSize'], tags=tags)
if 'Size' in image:
self.gauge('docker.image.size', image['Size'], tags=tags)
# Performance metrics
def _report_performance_metrics(self, containers_by_id):
containers_without_proc_root = []
for container in containers_by_id.itervalues():
if self._is_container_excluded(container) or not self._is_container_running(container):
continue
tags = self._get_tags(container, PERFORMANCE)
self._report_cgroup_metrics(container, tags)
if "_proc_root" not in container:
containers_without_proc_root.append(DockerUtil.container_name_extractor(container)[0])
continue
self._report_net_metrics(container, tags)
if containers_without_proc_root:
message = "Couldn't find pid directory for containers: {0}. They'll be missing network metrics".format(
", ".join(containers_without_proc_root))
if not self.is_k8s():
self.warning(message)
else:
# On kubernetes, this is kind of expected. Network metrics will be collected by the kubernetes integration anyway
self.log.debug(message)
def _report_cgroup_metrics(self, container, tags):
try:
for cgroup in CGROUP_METRICS:
stat_file = self._get_cgroup_file(cgroup["cgroup"], container['Id'], cgroup['file'])
stats = self._parse_cgroup_file(stat_file)
if stats:
for key, (dd_key, metric_func) in cgroup['metrics'].iteritems():
metric_func = FUNC_MAP[metric_func][self.use_histogram]
if key in stats:
metric_func(self, dd_key, int(stats[key]), tags=tags)
# Computed metrics
for mname, (key_list, fct, metric_func) in cgroup.get('to_compute', {}).iteritems():
values = [stats[key] for key in key_list if key in stats]
if len(values) != len(key_list):
self.log.debug("Couldn't compute {0}, some keys were missing.".format(mname))
continue
value = fct(*values)
metric_func = FUNC_MAP[metric_func][self.use_histogram]
if value is not None:
metric_func(self, mname, value, tags=tags)
except MountException as ex:
if self.cgroup_listing_retries > MAX_CGROUP_LISTING_RETRIES:
raise ex
else:
self.warning("Couldn't find the cgroup files. Skipping the CGROUP_METRICS for now."
"Will retry {0} times before failing.".format(MAX_CGROUP_LISTING_RETRIES - self.cgroup_listing_retries))
self.cgroup_listing_retries += 1
else:
self.cgroup_listing_retries = 0
def _report_net_metrics(self, container, tags):
"""Find container network metrics by looking at /proc/$PID/net/dev of the container process."""
if self._disable_net_metrics:
self.log.debug("Network metrics are disabled. Skipping")
return
proc_net_file = os.path.join(container['_proc_root'], 'net/dev')
try:
with open(proc_net_file, 'r') as fp:
lines = fp.readlines()
"""Two first lines are headers:
Inter-| Receive | Transmit
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
"""
for l in lines[2:]:
cols = l.split(':', 1)
interface_name = str(cols[0]).strip()
if interface_name == 'eth0':
x = cols[1].split()
m_func = FUNC_MAP[RATE][self.use_histogram]
m_func(self, "docker.net.bytes_rcvd", long(x[0]), tags)
m_func(self, "docker.net.bytes_sent", long(x[8]), tags)
break
except Exception, e:
# It is possible that the container got stopped between the API call and now
self.warning("Failed to report IO metrics from file {0}. Exception: {1}".format(proc_net_file, e))
def _process_events(self, containers_by_id):
if self.collect_events is False:
# Crawl events for service discovery only
self._get_events()
return
try:
api_events = self._get_events()
aggregated_events = self._pre_aggregate_events(api_events, containers_by_id)
events = self._format_events(aggregated_events, containers_by_id)
except (socket.timeout, urllib2.URLError):
self.warning('Timeout when collecting events. Events will be missing.')
return
except Exception, e:
self.warning("Unexpected exception when collecting events: {0}. "
"Events will be missing".format(e))
return
for ev in events:
self.log.debug("Creating event: %s" % ev['msg_title'])
self.event(ev)
def _get_events(self):
"""Get the list of events."""
events, should_reload_conf = self.docker_util.get_events()
if should_reload_conf and self._service_discovery:
get_sd_backend(self.agentConfig).reload_check_configs = True
return events
def _pre_aggregate_events(self, api_events, containers_by_id):
# Aggregate events, one per image. Put newer events first.
events = defaultdict(deque)
for event in api_events:
# Skip events related to filtered containers
container = containers_by_id.get(event.get('id'))
if container is not None and self._is_container_excluded(container):
self.log.debug("Excluded event: container {0} status changed to {1}".format(
event['id'], event['status']))
continue
# from may be missing (for network events for example)
if 'from' in event:
events[event['from']].appendleft(event)
return events
def _format_events(self, aggregated_events, containers_by_id):
events = []
for image_name, event_group in aggregated_events.iteritems():
max_timestamp = 0
status = defaultdict(int)
status_change = []
container_tags = set()
for event in event_group:
max_timestamp = max(max_timestamp, int(event['time']))
status[event['status']] += 1
container_name = event['id'][:11]
if event['id'] in containers_by_id:
cont = containers_by_id[event['id']]
container_name = DockerUtil.container_name_extractor(cont)[0]
container_tags.update(self._get_tags(cont, PERFORMANCE))
container_tags.add('container_name:%s' % container_name)
status_change.append([container_name, event['status']])
status_text = ", ".join(["%d %s" % (count, st) for st, count in status.iteritems()])
msg_title = "%s %s on %s" % (image_name, status_text, self.hostname)
msg_body = (
"%%%\n"
"{image_name} {status} on {hostname}\n"
"```\n{status_changes}\n```\n"
"%%%"
).format(
image_name=image_name,
status=status_text,
hostname=self.hostname,
status_changes="\n".join(
["%s \t%s" % (change[1].upper(), change[0]) for change in status_change])
)
events.append({
'timestamp': max_timestamp,
'host': self.hostname,
'event_type': EVENT_TYPE,
'msg_title': msg_title,
'msg_text': msg_body,
'source_type_name': EVENT_TYPE,
'event_object': 'docker:%s' % image_name,
'tags': list(container_tags)
})
return events
def _report_disk_stats(self):
"""Report metrics about the volume space usage"""
stats = {
'docker.data.used': None,
'docker.data.total': None,
'docker.data.free': None,
'docker.metadata.used': None,
'docker.metadata.total': None,
'docker.metadata.free': None
# these two are calculated by _calc_percent_disk_stats
# 'docker.data.percent': None,
# 'docker.metadata.percent': None
}
info = self.docker_client.info()
driver_status = info.get('DriverStatus', [])
if not driver_status:
self.log.warning('Disk metrics collection is enabled but docker info did not'
' report any. Your storage driver might not support them, skipping.')
return
for metric in driver_status:
# only consider metrics about disk space
if len(metric) == 2 and 'Space' in metric[0]:
# identify Data and Metadata metrics
mtype = 'data'
if 'Metadata' in metric[0]:
mtype = 'metadata'
if 'Used' in metric[0]:
stats['docker.{0}.used'.format(mtype)] = metric[1]
elif 'Space Total' in metric[0]:
stats['docker.{0}.total'.format(mtype)] = metric[1]
elif 'Space Available' in metric[0]:
stats['docker.{0}.free'.format(mtype)] = metric[1]
stats = self._format_disk_metrics(stats)
stats.update(self._calc_percent_disk_stats(stats))
tags = self._get_tags()
for name, val in stats.iteritems():
if val is not None:
self.gauge(name, val, tags)
def _format_disk_metrics(self, metrics):
"""Cast the disk stats to float and convert them to bytes"""
for name, raw_val in metrics.iteritems():
if raw_val:
val, unit = raw_val.split(' ')
# by default some are uppercased others lowercased. That's error prone.
unit = unit.lower()
try:
val = int(float(val) * UNIT_MAP[unit])
metrics[name] = val
except KeyError:
self.log.error('Unrecognized unit %s for disk metric %s. Dropping it.' % (unit, name))
metrics[name] = None
return metrics
def _calc_percent_disk_stats(self, stats):
"""Calculate a percentage of used disk space for data and metadata"""
mtypes = ['data', 'metadata']
percs = {}
for mtype in mtypes:
used = stats.get('docker.{0}.used'.format(mtype))
total = stats.get('docker.{0}.total'.format(mtype))
free = stats.get('docker.{0}.free'.format(mtype))
if used and total and free and ceil(total) < free + used:
self.log.error('used, free, and total disk metrics may be wrong, '
'unable to calculate percentage.')
return {}
try:
if isinstance(used, int):
percs['docker.{0}.percent'.format(mtype)] = round(100 * float(used) / float(total), 2)
elif isinstance(free, int):
percs['docker.{0}.percent'.format(mtype)] = round(100 * (1.0 - (float(free) / float(total))), 2)
except ZeroDivisionError:
self.log.error('docker.{0}.total is 0, calculating docker.{1}.percent'
' is not possible.'.format(mtype, mtype))
return percs
# Cgroups
def _get_cgroup_file(self, cgroup, container_id, filename):
"""Find a specific cgroup file, containing metrics to extract."""
params = {
"mountpoint": self._mountpoints[cgroup],
"id": container_id,
"file": filename,
}
return DockerUtil.find_cgroup_filename_pattern(self._mountpoints, container_id) % (params)
def _parse_cgroup_file(self, stat_file):
"""Parse a cgroup pseudo file for key/values."""
self.log.debug("Opening cgroup file: %s" % stat_file)
try:
with open(stat_file, 'r') as fp:
if 'blkio' in stat_file:
return self._parse_blkio_metrics(fp.read().splitlines())
else:
return dict(map(lambda x: x.split(' ', 1), fp.read().splitlines()))
except IOError:
# It is possible that the container got stopped between the API call and now
self.log.info("Can't open %s. Metrics for this container are skipped." % stat_file)
def _parse_blkio_metrics(self, stats):
"""Parse the blkio metrics."""
metrics = {
'io_read': 0,
'io_write': 0,
}
for line in stats:
if 'Read' in line:
metrics['io_read'] += int(line.split()[2])
if 'Write' in line:
metrics['io_write'] += int(line.split()[2])
return metrics
# proc files
def _crawl_container_pids(self, container_dict):
"""Crawl `/proc` to find container PIDs and add them to `containers_by_id`."""
proc_path = os.path.join(self.docker_util._docker_root, 'proc')
pid_dirs = [_dir for _dir in os.listdir(proc_path) if _dir.isdigit()]
if len(pid_dirs) == 0:
self.warning("Unable to find any pid directory in {0}. "
"If you are running the agent in a container, make sure to "
'share the volume properly: "/proc:/host/proc:ro". '
"See https://github.com/DataDog/docker-dd-agent/blob/master/README.md for more information. "
"Network metrics will be missing".format(proc_path))
self._disable_net_metrics = True
return container_dict
self._disable_net_metrics = False
for folder in pid_dirs:
try:
path = os.path.join(proc_path, folder, 'cgroup')
with open(path, 'r') as f:
content = [line.strip().split(':') for line in f.readlines()]
except IOError, e:
# Issue #2074
self.log.debug("Cannot read %s, "
"process likely raced to finish : %s" %
(path, str(e)))
except Exception, e:
self.warning("Cannot read %s : %s" % (path, str(e)))
continue
try:
for line in content:
if line[1] in ('cpu,cpuacct', 'cpuacct,cpu', 'cpuacct') and 'docker' in line[2]:
cpuacct = line[2]
break
else:
continue
matches = re.findall(CONTAINER_ID_RE, cpuacct)
if matches:
container_id = matches[-1]
if container_id not in container_dict:
self.log.debug("Container %s not in container_dict, it's likely excluded", container_id)
continue
container_dict[container_id]['_pid'] = folder
container_dict[container_id]['_proc_root'] = os.path.join(proc_path, folder)
except Exception, e:
self.warning("Cannot parse %s content: %s" % (path, str(e)))
continue
return container_dict
|
py
|
1a574aad8f88cb4129640ad3dfd41a6b2e3ff2c0
|
"""
Windowing processes for windowing over days
"""
import udatetime
import datetime
from numpy import argmin, abs, array
from sit2standpy.v2.base import _BaseProcess, PROC, DATA
__all__ = ['WindowDays']
class WindowDays(_BaseProcess):
def __init__(self, hours=[8, 20], **kwargs):
"""
Window data into days, with the default behaviour to take the hours of most likely wakefulness
Parameters
----------
hours : list-like of int
Hours to include in the windowed data. Default is 8 to 20, which excludes the night from
the detection of sit-to-stand transfers.
"""
super().__init__(**kwargs)
self._hours = hours
def _call(self):
utime = self.data['Sensors']['Lumbar']['Unix Time']
# get the first timepoint to know which day to start and end with
time_sdt = udatetime.utcfromtimestamp(utime[0])
time_edt = udatetime.utcfromtimestamp(utime[-1])
n_days = (time_edt.date() - time_sdt.date()).days
if time_edt.hour > self._hours[0]:
n_days += 1
# set the start and end hours for the first day
day_start = time_sdt.replace(hour=self._hours[0], minute=0, second=0, microsecond=0)
day_end = time_sdt.replace(hour=self._hours[1], minute=0, second=0, microsecond=0)
iend = 10 # set so can reference in the i=0 loop
for i in range(n_days):
istart = argmin(abs(utime[iend-10:] - day_start.timestamp())) + iend - 10
iend = argmin(abs(utime[istart:] - day_end.timestamp())) + istart + 1
self.data = (PROC.format(day_n=i+1, value='Indices'), array([istart, iend]))
day_start += datetime.timedelta(days=1)
day_end += datetime.timedelta(days=1)
|
py
|
1a574b00545b746fee5957d92b3671489570c48a
|
import logging
from PyQt5 import QtCore, QtGui, QtWidgets
from hackedit.app import settings
from hackedit.app.forms import dlg_preferences_ui
from hackedit.app.widgets import preference_pages
from hackedit.api import system
def _logger():
return logging.getLogger(__name__)
class DlgPreferences(QtWidgets.QDialog):
_dlg = None
closed = QtCore.pyqtSignal()
color_highlight_background = None
color_highlight_text = None
def __init__(self, parent, app):
super().__init__(parent)
if DlgPreferences.color_highlight_background is None:
DlgPreferences.color_highlight_background = \
self.palette().color(QtGui.QPalette.Highlight).name()
if DlgPreferences.color_highlight_text is None:
DlgPreferences.color_highlight_text = self.palette().color(
QtGui.QPalette.HighlightedText).name()
self.app = app
self._ui = dlg_preferences_ui.Ui_Dialog()
self._ui.setupUi(self)
self._connect_slots()
# force reload of settings
settings.load()
self._setup_builtin_pages()
self._setup_editor_pages()
self._setup_plugin_pages()
self._ui.categories.sortByColumn(0, QtCore.Qt.AscendingOrder)
self._ui.categories.expandAll()
self.restore_state()
btns = self._ui.buttons
btns.button(btns.Reset).setToolTip(
_('Reset changes made to the current page.'))
btns.button(btns.RestoreDefaults).setToolTip(
_('Restore factory defaults for the current page.'))
btns.button(btns.Apply).setToolTip(
_('Apply changes but keep dialog open.'))
btns.button(btns.Ok).setToolTip(
_('Apply changes and close dialog.'))
btns.button(btns.Cancel).setToolTip(
_('Close dialog and cancel any changes.'))
self._ui.pages.setContentsMargins(0, 0, 0, 0)
def closeEvent(self, event):
super().closeEvent(event)
self.closed.emit()
@staticmethod
def edit_preferences(parent, app):
QtWidgets.qApp.setOverrideCursor(QtCore.Qt.WaitCursor)
dlg = DlgPreferences(parent, app)
dlg.restore_state()
QtWidgets.qApp.restoreOverrideCursor()
if system.DARWIN:
dlg.showMaximized()
else:
dlg.show()
dlg.exec_()
def goto_page(self, page_name):
def get_page():
for i in range(self._ui.categories.topLevelItemCount()):
item = self._ui.categories.topLevelItem(i)
if item.text(0) == page_name:
return item
for j in range(item.childCount()):
child_item = item.child(j)
if child_item.text(0) == page_name:
return child_item
return None
item = get_page()
self._ui.categories.setCurrentItem(item)
def _find_item_by_index(self, index):
for i in range(self._ui.categories.topLevelItemCount()):
item = self._ui.categories.topLevelItem(i)
idx = item.data(0, QtCore.Qt.UserRole)
if idx == index:
return item
assert isinstance(item, QtWidgets.QTreeWidgetItem)
for j in range(item.childCount()):
child_item = item.child(j)
idx = child_item.data(0, QtCore.Qt.UserRole)
if idx == index:
return child_item
return None
def _on_item_activated(self, item):
index = item.data(0, QtCore.Qt.UserRole)
text = item.text(0)
if item.parent() is not None:
text = '%s - %s' % (item.parent().text(0), text)
self._ui.label_title.setText(text)
self._ui.label_title.setStyleSheet('''background-color: %s;
color: %s;
padding: 10px;
border-radius:3px;''' % (DlgPreferences.color_highlight_background,
DlgPreferences.color_highlight_text))
self._ui.pages.setCurrentIndex(index)
w = self._ui.pages.currentWidget()
buttons = self._ui.buttons
buttons.button(buttons.Reset).setVisible(w.can_reset)
buttons.button(buttons.RestoreDefaults).setVisible(
w.can_restore_defaults)
buttons.button(buttons.Apply).setVisible(w.can_apply)
def _reset(self):
self._ui.pages.currentWidget().reset()
def _restore_defaults(self):
QtWidgets.qApp.setOverrideCursor(QtCore.Qt.WaitCursor)
self._ui.pages.currentWidget().restore_defaults()
self._reset()
QtWidgets.qApp.restoreOverrideCursor()
for i in range(self._ui.pages.count()):
page = self._ui.pages.widget(i)
page.save()
def _apply(self):
# save all settings
QtWidgets.qApp.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
for i in range(self._ui.pages.count()):
page = self._ui.pages.widget(i)
page.save()
self.app.apply_preferences()
for i in range(self._ui.pages.count()):
page = self._ui.pages.widget(i)
page.reset()
finally:
QtWidgets.qApp.restoreOverrideCursor()
def restore_state(self):
index = int(QtCore.QSettings().value(
'_cache/preferences_page_index', 0))
item = self._find_item_by_index(index)
self._ui.categories.setCurrentItem(item)
def _setup_builtin_pages(self):
env = preference_pages.Environment()
self._add_page(env)
self._add_page(preference_pages.Editor())
self._add_page(preference_pages.Behaviour())
colors = preference_pages.EditorColors()
env.colors = colors
self._add_page(colors)
self._add_page(preference_pages.EditorDisplay())
self._add_page(preference_pages.Indexing())
self._add_page(preference_pages.Mimetypes())
self._add_page(preference_pages.Notifications())
self._add_page(preference_pages.Shortcuts())
self._add_page(preference_pages.Templates())
self._add_page(preference_pages.Workspaces())
def _setup_plugin_pages(self):
pages = []
for p in self.app.plugin_manager.preferences_page_plugins:
pages.append(p.get_preferences_page())
for p in sorted(pages, key=lambda x: x.category is not None):
self._add_page(p)
pages[:] = []
for p in self.app.plugin_manager.workspace_plugins.values():
page = p.get_preferences_page()
if page:
pages.append(page)
for p in sorted(pages, key=lambda x: x.category is not None):
self._add_page(p)
def _setup_editor_pages(self):
for p in self.app.plugin_manager.editor_plugins:
p = p.get_specific_preferences_page()
if p:
p.category = _('Editor')
self._add_page(p)
def _connect_slots(self):
self._ui.categories.currentItemChanged.connect(
self._on_item_activated)
self._ui.buttons.button(self._ui.buttons.Reset).clicked.connect(
self._reset)
bt_restore_defaults = self._ui.buttons.button(
self._ui.buttons.RestoreDefaults)
bt_restore_defaults.clicked.connect(self._restore_defaults)
self._ui.buttons.button(self._ui.buttons.Apply).clicked.connect(
self._apply)
def accept(self):
self._apply()
QtCore.QSettings().setValue(
'_cache/preferences_page_index', self._ui.pages.currentIndex())
super().accept()
def reject(self):
QtCore.QSettings().setValue(
'_cache/preferences_page_index', self._ui.pages.currentIndex())
super().reject()
def _add_page(self, widget):
"""
Adds a settings page to the dialog
:param widget: page widget
:type widget: hackedit.api.widgets.PreferencePage
"""
if widget is None:
return
widget.setContentsMargins(0, 0, 0, 0)
index = self._ui.pages.count()
self._ui.pages.addWidget(widget)
item = QtWidgets.QTreeWidgetItem()
item.setText(0, widget.name)
if widget.icon is not None:
item.setIcon(0, widget.icon)
item.setData(0, QtCore.Qt.UserRole, index)
parent = None
if widget.category:
parent = self._ui.categories.findItems(
widget.category, QtCore.Qt.MatchExactly, 0)
if parent:
parent = parent[0]
else:
print('parent not found', widget.category)
if parent:
parent.addChild(item)
else:
self._ui.categories.addTopLevelItem(item)
widget.app = self.app
widget.reset()
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Enter or \
ev.key() == QtCore.Qt.Key_Return:
return
super().keyPressEvent(ev)
|
py
|
1a574b186f548bf09a5e09610ddc0984fcf3c600
|
import urllib.request as urlr
import urllib.parse as urlp
import urllib.error as urle
import http.cookiejar as ck
import pickle as pk
import re
import zlib
import time
import random as r
from datetime import datetime, timedelta
import os
from lxml import etree
import tkinter.messagebox
from tkinter import *
def get_Pkls(path):
if os.path.exists(path+'userdata.pkl'):
with open(path+'userdata.pkl', 'rb') as f1:
user_data = pk.load(f1)
else:
user_data = dict()
with open(path+'headers.pkl', 'rb') as f2:
head = pk.load(f2)
with open(path+'urls.pkl', 'rb') as f3:
url = pk.load(f3)
return (user_data, head, url)
def gzip_decode(rsp,code='utf-8'):
if rsp != None:
content = rsp.read()
gzipped = rsp.headers.get('Content-Encoding')
if gzipped:
html = zlib.decompress(content, zlib.MAX_WBITS | 32).decode(code,errors='ignore')
else:
html = content.decode(code)
return html
return ''
def get_FormHash(html):
tgt = re.search(r'name="formhash" value="(.+?)"',html)
if tgt:
return tgt.group(1)
return ''
def get_loginhash(html):
tgt = re.search(r'<div id="main_messaqge_(.+?)">',html)
if tgt:
return tgt.group(1)
return ''
def set_pgvs():
curMs = datetime.utcnow().second
pgv_ssid = "s" + str( (round(r.random() * 2147483647) * curMs) % 10000000000 )
pgv_pvi = (round(r.random() * 2147483647) * curMs) % 10000000000
return (pgv_ssid, pgv_pvi)
class WebService:
def __init__(self, path):
self.cookie_name = 'cookie.txt'
self.code = 'utf-8'
self.path = path
self.userdata, self.headers, self.urls = get_Pkls(self.path)
self.init_userdata()
self.init_cookie()
self.new_opener()
self.error = False
self.get_saylist()
def init_userdata(self):
if self.userdata.get('mission') == None:
self.userdata['mission'] = True
if self.userdata.get('autologin') == None:
self.userdata['autologin'] = True
def init_cookie(self):
self.cookie = ck.LWPCookieJar(self.path + self.cookie_name)
try:
self.cookie.load(self.path + self.cookie_name, True, True)
except FileNotFoundError:
self.cookie.save(self.path + self.cookie_name, True, True)
def save_cookie(self):
self.cookie.save(self.path + self.cookie_name, True, True)
def new_opener(self):
self.opener = urlr.build_opener(urlr.HTTPCookieProcessor(self.cookie))
def get_prelogin_data(self):
self.data = {}
self.data['username'] = self.userdata['username']
self.data['password'] = self.userdata['password']
self.data['loginfield'] = 'username'
self.data['cookietime'] = '2592000'
self.data['quickforward'] = 'yes'
self.data['handlekey'] = 'ls'
self.post_data = urlp.urlencode(self.data).encode(self.code)
def get_response(self, url, data=None, headers=None):
temp_headers = self.headers if headers==None else headers
if data:
req = urlr.Request(url, data, temp_headers)
else:
req = urlr.Request(url, headers=temp_headers)
try:
response = self.opener.open(req)
except urle.URLError as e:
if hasattr(e,'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
prompt = 'We failed to reach a server.\nReason: ' + str(e.reason)
elif hasattr(e, 'code'):
print('The server could\' fulfill the request.')
print('Error code: ', e.code)
prompt = 'The server could\' fulfill the request.\nError code: ' + str(e.code)
tkinter.messagebox.showerror('出错啦!', prompt)
response = None
except ConnectionResetError as e:
print(e)
tkinter.messagebox.showerror('出错啦!', e)
response = None
finally:
return response
def get_Verify_img(self):
os.chdir(self.path)
response = self.get_response(self.urls['code_url'])
if response:
img = response.read()
with open('code.png', 'wb') as f:
f.write(img)
os.chdir(os.pardir)
def get_login_data(self, code):
self.data.pop('quickforward')
self.data.pop('handlekey')
self.data['formhash'] = get_FormHash(self.html)
self.data['tsdm_verify'] = code#获取Entry值
self.data['answer'] = ''
self.data['questionid'] = '0'
self.data['referer'] = 'https://www.tsdm39.net/forum.php'
self.post_data = urlp.urlencode(self.data).encode(self.code)
def add_cookie(self, name, value):
temp_cookie = ck.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain=".tsdm39.net",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest={}
)
self.cookie.set_cookie(temp_cookie)
def get_pgv_cookie(self):
os.chdir(self.path)
if os.path.exists('pgv.txt'):
with open('pgv.txt') as f:
pgv_str = f.read()
tgt = re.search(r'pgv_pvi=(.+); pgv_info=(.+)', pgv_str)
self.add_cookie('pgv_pvi', tgt.group(1))
self.add_cookie('pgv_info', tgt.group(2))
si = re.search(r'ssi=(.+)', tgt.group(2)).group(1)
r3 = int(r.random() * 450) + 350
url = 'http://pingtcss.qq.com/pingd?dm=www.tsdm39.net&url=/forum.php&arg=-&rdm=-&rurl=-&adt=-&rarg=-&pvi=' + tgt.group(1) +'&si=' + si + '&ui=0&ty=1&rt=forum&pn=1&qq=000&r2=8480046&scr=1366x768&scl=24-bit&lg=zh-cn&jv=0&pf=Win32&tz=-8&fl=26.0%20r0&ct=-&ext=bc=0;adid=&r3=' + str(r3)
self.get_response(url)
else:
pgv_ssid, pgv_pvi = set_pgvs()
r3 = int(r.random() * 450) + 350
url = 'http://pingtcss.qq.com/pingd?dm=www.tsdm39.net&url=/forum.php&arg=-&rdm=-&rurl=-&adt=-&rarg=-&pvi=' + str(pgv_pvi) +'&si=' + str(pgv_ssid) + '&ui=0&ty=1&rt=forum&pn=1&qq=000&r2=8480046&scr=1366x768&scl=24-bit&lg=zh-cn&jv=0&pf=Win32&tz=-8&fl=26.0%20r0&ct=-&ext=bc=0;adid=&r3=' + str(r3)
self.get_response(url)
pgv_str = 'pgv_pvi=' + str(pgv_pvi) +'; pgv_info=ssi=' + str(pgv_ssid)
with open('pgv.txt', 'wt') as f:
f.write(pgv_str)
self.add_cookie('pgv_pvi',str(pgv_pvi))
self.add_cookie('pgv_info','ssi=' + str(pgv_ssid))
os.chdir(os.pardir)
def autoLogin(self):
response = self.get_response(self.urls['first_url'])
self.html = gzip_decode(response)
return re.search(r'title="访问我的空间">(.+)</a>',self.html)
def is_login(self, html):
return re.search(self.userdata['username'], html)
def get_enter_url(self, account, passwd):
self.userdata['username'] = account
self.userdata['password'] = passwd
self.get_prelogin_data()
response = self.get_response(self.urls['login_url'], self.post_data)
self.save_cookie()
self.html = gzip_decode(response)
self.urls['enter_url'] = self.urls['enter_url_start'] + get_loginhash(self.html) + self.urls['enter_url_end']
self.get_Verify_img()
def save_userdata(self):
with open(self.path + 'userdata.pkl', 'wb') as f:
pk.dump(self.userdata, f)
def get_author_image(self):
html = etree.HTML(self.html)
src = html.xpath('//div[@id="um"]/div[@class="avt y"]/a/img/@data-original')[0]
rsp = self.get_response(src)
if rsp:
img = rsp.read()
with open(self.path+'author.jpg','wb') as f:
f.write(img)
def get_saylist(self):
if os.path.exists(self.path+'saylist.txt'):
self.saylist = []
with open(self.path+'saylist.txt') as f:
for each in f:
each = each.strip()
if each:
self.saylist.append(each)
else:
prompt = '天使奏赛高!!!\n真白赛高~~~\n日常签到。。。\n'
self.saylist = ['天使奏赛高!!!','真白赛高~~~','日常签到。。。']
with open(self.path+'saylist.txt' ,'wt') as f:
f.write(prompt)
def get_sign_data(self):
rsp = self.get_response(self.urls['to_sign_url'])
self.html = gzip_decode(rsp)
sign_data = {}
sign_data['todaysay'] = r.choice(self.saylist)
sign_data['qdxq'] = 'kx'
sign_data['qdmode'] = '1'
sign_data['formhash'] = get_FormHash(self.html)
sign_data['fastreply'] = '1'
self.post_data = urlp.urlencode(sign_data).encode(self.code)
def do_sign(self):
rsp = self.get_response(self.urls['sign_url'], self.post_data)
self.save_cookie()
self.html = gzip_decode(rsp)
rand_money = re.findall(r'恭喜你签到成功!(.+?)</div>', self.html)
signed = re.search(r'您今日已经签到', self.html)
if rand_money:
return ('签到成功!%s' % rand_money[0])
elif signed:
return '您今日已经签到,请明天再来!'
else:
return None
def pre_mission(self):
rsp = self.get_response(self.urls['to_mission_url'])
self.html = gzip_decode(rsp)
return self.is_login(self.html)
def do_mission(self):
mission_data = {'act': 'clickad'}
self.post_data = urlp.urlencode(mission_data).encode(self.code)
rsp = self.get_response(self.urls['mission_url'], self.post_data)
self.html = gzip_decode(rsp)
wait = re.search(r'您需要等待(.+)后即可进行。',self.html)
time.sleep(r.randint(2,5))
if wait:
return wait.group(1)
else:
for i in range(5):
rsp = self.get_response(self.urls['mission_url'], self.post_data)
time.sleep(r.randint(2,5))
mission_data['act'] = 'getcre'
self.post_data = urlp.urlencode(mission_data).encode(self.code)
rsp = self.get_response(self.urls['mission_url'],self.post_data)
self.save_cookie()
self.html = gzip_decode(rsp)
self.mission_money = re.search(r'恭喜,您已经(成功领取了奖励天使币.+)<br />(每间隔.+可进行一次)。',self.html)
fail = re.search(r'不要作弊哦,重新进行游戏吧!',self.html)
if fail:
return 'fail'
return None
class Logs:
def __init__(self, path=os.curdir, filename='logs.pkl'):
self.logname = filename
self.path = path
self.init_logs()
def init_logs(self):
if os.path.exists(self.path + self.logname):
with open(self.path + self.logname, 'rb') as f:
self.logs = pk.load(f)
else:
self.logs = dict()
def save_logs(self):
with open(self.path + self.logname, 'wb') as f:
pk.dump(self.logs, f)
def log2file(self, content):
prompt = self.date2str(self.now())+content+'\n'
if os.path.exists(self.path+'logs.txt'):
with open(self.path+'logs.txt', 'at') as f:
f.write(prompt)
else:
with open(self.path+'logs.txt', 'wt') as f:
f.write(prompt)
def datelist2str(self, datelist):
return (str(datelist[0])+'年'+str(datelist[1])+'月'+str(datelist[2])+'日<-'+str(datelist[3])+':'+str(datelist[4])+':'+str(datelist[5])+'->:')
def date2str(self, date):
return (str(date.year)+'年'+str(date.month)+'月'+str(date.day)+'日<-'+str(date.hour)+':'+str(date.minute)+':'+str(date.second)+'->:')
def update_log(self, name, time=datetime.now(), save=True):
self.logs[name] = self.date2list(time)
if save:
self.save_logs()
def now(self):
return datetime.now()
def dt_list2sec(self, datelist2, datelist1):
dt = self.list2date(datelist2) - self.list2date(datelist1)
return dt.seconds
def date2list(self, date):
datelist = []
datelist.append(date.year)
datelist.append(date.month)
datelist.append(date.day)
datelist.append(date.hour)
datelist.append(date.minute)
datelist.append(date.second)
return datelist
def list2date(self, datelist):
return datetime(datelist[0],datelist[1],datelist[2],datelist[3],datelist[4],datelist[5])
def sign_avaliable(self):
if self.logs.get('sign'):
dt = self.now() - self.list2date(self.logs['sign'])
if (self.now().day - self.logs['sign'][2] >= 1) or (dt.seconds > 24*60*60):
return True
else:
return False
return True
def mission_avaliable(self):
if self.logs.get('mission'):
delta = self.now() - self.list2date(self.logs['mission'])
dt = 6*60*60 - delta.seconds
if dt > 0:
return dt
return True
def get_missionedtime(self, dtlist):
dt = timedelta(hours=6) - timedelta(hours=dtlist[0], minutes=dtlist[1], seconds=dtlist[2])
self.update_log('mission', self.now() - dt)
|
py
|
1a574b305daa17849310a325f8bf6168cade16b3
|
# Generated by Django 2.0.9 on 2018-11-13 20:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20181113_2026'),
]
operations = [
migrations.AddField(
model_name='user',
name='motto',
field=models.TextField(default='', max_length=100),
preserve_default=False,
),
]
|
bzl
|
1a574c0448883645954a23c88d383b716de70751
|
"""Provides macros for working with yoga library."""
YOGA_ROOTS = ["//..."]
JAVA_TARGET = "//java:java"
INFER_ANNOTATIONS_TARGET = "//lib/infer-annotations:infer-annotations"
JSR_305_TARGET = "//lib/jsr-305:jsr-305"
JUNIT_TARGET = "//lib/junit:junit"
PROGRUARD_ANNOTATIONS_TARGET = "//java/proguard-annotations/src/main/java/com/facebook/proguard/annotations:annotations"
SOLOADER_TARGET = "//lib/soloader:soloader"
GTEST_TARGET = "//lib/gtest:gtest"
JNI_TARGET = "//lib/jni:jni"
FBJNI_TARGET = "//lib/fb:fbjni"
FBJNI_JAVA_TARGET = "//lib/fb/src/main/java/com/facebook/jni:jni"
APPCOMPAT_TARGET = "//lib/appcompat:appcompat"
APPLE = ""
ANDROID = ""
ANDROID_SUPPORT_TARGET = "//lib/android-support:android-support"
ANDROID_TARGET = "//android:android"
ANDROID_JAVA_TARGET = "//android/src/main/java/com/facebook/yoga/android:android"
ANDROID_RES_TARGET = "//android:res"
ANDROID_SAMPLE_JAVA_TARGET = "//android/sample/java/com/facebook/samples/yoga:yoga"
ANDROID_SAMPLE_RES_TARGET = "//android/sample:res"
CXX_LIBRARY_WHITELIST = [
"//:yoga",
"//lib/fb:fbjni",
"//java:jni",
]
BASE_COMPILER_FLAGS = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-Wall",
"-Werror",
"-O3",
"-ffast-math",
]
LIBRARY_COMPILER_FLAGS = BASE_COMPILER_FLAGS + [
"-fPIC",
]
def _paths_join(path, *others):
"""Joins one or more path components."""
result = path
for p in others:
if p.startswith("/"): # absolute
result = p
elif not result or result.endswith("/"):
result += p
else:
result += "/" + p
return result
def subdir_glob(glob_specs, exclude = None, prefix = ""):
"""Returns a dict of sub-directory relative paths to full paths.
The subdir_glob() function is useful for defining header maps for C/C++
libraries which should be relative the given sub-directory.
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
it returns a dict of sub-directory relative paths to full paths.
Please refer to native.glob() for explanations and examples of the pattern.
Args:
glob_specs: The array of tuples in form of
(relative-sub-directory, glob-pattern inside relative-sub-directory).
type: List[Tuple[str, str]]
exclude: A list of patterns to identify files that should be removed
from the set specified by the first argument. Defaults to [].
type: Optional[List[str]]
prefix: If is not None, prepends it to each key in the dictionary.
Defaults to None.
type: Optional[str]
Returns:
A dict of sub-directory relative paths to full paths.
"""
if exclude == None:
exclude = []
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
_single_subdir_glob(dirpath, glob_pattern, exclude, prefix),
)
return _merge_maps(*results)
def _merge_maps(*file_maps):
result = {}
for file_map in file_maps:
for key in file_map:
if key in result and result[key] != file_map[key]:
fail(
"Conflicting files in file search paths. " +
"\"%s\" maps to both \"%s\" and \"%s\"." %
(key, result[key], file_map[key]),
)
result[key] = file_map[key]
return result
def _single_subdir_glob(dirpath, glob_pattern, exclude = None, prefix = None):
if exclude == None:
exclude = []
results = {}
files = native.glob([_paths_join(dirpath, glob_pattern)], exclude = exclude)
for f in files:
if dirpath:
key = f[len(dirpath) + 1:]
else:
key = f
if prefix:
key = _paths_join(prefix, key)
results[key] = f
return results
def yoga_dep(dep):
return "//" + dep
def yoga_android_aar(*args, **kwargs):
native.android_aar(*args, **kwargs)
def yoga_android_binary(*args, **kwargs):
native.android_binary(*args, **kwargs)
def yoga_android_library(*args, **kwargs):
native.android_library(*args, **kwargs)
def yoga_android_resource(*args, **kwargs):
native.android_resource(*args, **kwargs)
def yoga_apple_library(*args, **kwargs):
native.apple_library(*args, **kwargs)
def yoga_apple_test(*args, **kwargs):
native.apple_test(*args, **kwargs)
def yoga_cxx_binary(*args, **kwargs):
kwargs.pop("platforms", None)
native.cxx_binary(*args, **kwargs)
def yoga_cxx_library(*args, **kwargs):
# Currently unused
kwargs.pop("platforms", None)
native.cxx_library(*args, **kwargs)
def yoga_cxx_test(*args, **kwargs):
native.cxx_test(*args, **kwargs)
def yoga_java_binary(*args, **kwargs):
native.java_binary(*args, **kwargs)
def yoga_java_library(*args, **kwargs):
native.java_library(*args, **kwargs)
def yoga_java_test(*args, **kwargs):
native.java_test(*args, **kwargs)
def yoga_prebuilt_cxx_library(*args, **kwargs):
native.prebuilt_cxx_library(*args, **kwargs)
def yoga_prebuilt_jar(*args, **kwargs):
native.prebuilt_jar(*args, **kwargs)
def is_apple_platform():
return True
def yoga_apple_binary():
if is_apple_platform():
yoganet_ios_srcs = []
for arch in [
"iphonesimulator-x86_64",
"iphoneos-arm64",
]:
name = "yoganet-" + arch
yoganet_ios_srcs.append(":" + name)
native.genrule(
name = name,
srcs = [
yoga_dep(":yogaApple#%s,static" % arch),
yoga_dep("YogaKit:YogaKitApple#%s,static" % arch),
yoga_dep("csharp:yoganetApple#%s,static" % arch),
],
out = "libyoga-%s.a" % arch,
cmd = "libtool -static -o $OUT $SRCS",
visibility = [yoga_dep("csharp:yoganet-ios")],
)
native.genrule(
name = "yoganet-ios",
srcs = yoganet_ios_srcs,
out = "libyoga.a",
cmd = "lipo $SRCS -create -output $OUT",
visibility = ["PUBLIC"],
)
yoganet_macosx_target = "csharp:yoganetAppleMac#macosx-%s,dynamic"
native.genrule(
name = "yoganet-macosx",
srcs = [
yoga_dep(yoganet_macosx_target % "x86_64"),
],
out = "libyoga.dylib",
cmd = "lipo $SRCS -create -output $OUT",
visibility = ["PUBLIC"],
)
|
py
|
1a574e44c9115c097ac45fdb5de0f20322fbe332
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.core import CardFactory, MessageFactory
from botbuilder.dialogs import (
ComponentDialog,
DialogSet,
DialogTurnStatus,
WaterfallDialog,
WaterfallStepContext,
)
from botbuilder.dialogs.prompts import TextPrompt, PromptOptions
from botbuilder.schema import (
ActionTypes,
Attachment,
AnimationCard,
AudioCard,
HeroCard,
VideoCard,
ReceiptCard,
SigninCard,
ThumbnailCard,
MediaUrl,
CardAction,
CardImage,
ThumbnailUrl,
Fact,
ReceiptItem,
)
from .resources.adaptive_card_example import ADAPTIVE_CARD_CONTENT
from helpers.activity_helper import create_activity_reply
MAIN_WATERFALL_DIALOG = "mainWaterfallDialog"
class MainDialog(ComponentDialog):
def __init__(self):
super().__init__("MainDialog")
# Define the main dialog and its related components.
self.add_dialog(TextPrompt("TextPrompt"))
self.add_dialog(
WaterfallDialog(
MAIN_WATERFALL_DIALOG, [self.choice_card_step, self.show_card_step]
)
)
# The initial child Dialog to run.
self.initial_dialog_id = MAIN_WATERFALL_DIALOG
"""
1. Prompts the user if the user is not in the middle of a dialog.
2. Re-prompts the user when an invalid input is received.
"""
async def choice_card_step(self, step_context: WaterfallStepContext):
menu_text = (
"Which card would you like to see?\n"
"(1) Adaptive Card\n"
"(2) Animation Card\n"
"(3) Audio Card\n"
"(4) Hero Card\n"
"(5) Receipt Card\n"
"(6) Signin Card\n"
"(7) Thumbnail Card\n"
"(8) Video Card\n"
"(9) All Cards"
)
# Prompt the user with the configured PromptOptions.
return await step_context.prompt(
"TextPrompt", PromptOptions(prompt=MessageFactory.text(menu_text))
)
"""
Send a Rich Card response to the user based on their choice.
self method is only called when a valid prompt response is parsed from the user's response to the ChoicePrompt.
"""
async def show_card_step(self, step_context: WaterfallStepContext):
response = step_context.result.lower().strip()
choice_dict = {
"1": [self.create_adaptive_card],
"adaptive card": [self.create_adaptive_card],
"2": [self.create_animation_card],
"animation card": [self.create_animation_card],
"3": [self.create_audio_card],
"audio card": [self.create_audio_card],
"4": [self.create_hero_card],
"hero card": [self.create_hero_card],
"5": [self.create_receipt_card],
"receipt card": [self.create_receipt_card],
"6": [self.create_signin_card],
"signin card": [self.create_signin_card],
"7": [self.create_thumbnail_card],
"thumbnail card": [self.create_thumbnail_card],
"8": [self.create_video_card],
"video card": [self.create_video_card],
"9": [
self.create_adaptive_card,
self.create_animation_card,
self.create_audio_card,
self.create_hero_card,
self.create_receipt_card,
self.create_signin_card,
self.create_thumbnail_card,
self.create_video_card,
],
"all cards": [
self.create_adaptive_card,
self.create_animation_card,
self.create_audio_card,
self.create_hero_card,
self.create_receipt_card,
self.create_signin_card,
self.create_thumbnail_card,
self.create_video_card,
],
}
# Get the functions that will generate the card(s) for our response
# If the stripped response from the user is not found in our choice_dict, default to None
choice = choice_dict.get(response, None)
# If the user's choice was not found, respond saying the bot didn't understand the user's response.
if not choice:
not_found = create_activity_reply(
step_context.context.activity, "Sorry, I didn't understand that. :("
)
await step_context.context.send_activity(not_found)
else:
for func in choice:
card = func()
response = create_activity_reply(
step_context.context.activity, "", "", [card]
)
await step_context.context.send_activity(response)
# Give the user instructions about what to do next
await step_context.context.send_activity("Type anything to see another card.")
return await step_context.end_dialog()
"""
======================================
Helper functions used to create cards.
======================================
"""
# Methods to generate cards
def create_adaptive_card(self) -> Attachment:
return CardFactory.adaptive_card(ADAPTIVE_CARD_CONTENT)
def create_animation_card(self) -> Attachment:
card = AnimationCard(
media=[MediaUrl(url="http://i.giphy.com/Ki55RUbOV5njy.gif")],
title="Microsoft Bot Framework",
subtitle="Animation Card",
)
return CardFactory.animation_card(card)
def create_audio_card(self) -> Attachment:
card = AudioCard(
media=[MediaUrl(url="http://www.wavlist.com/movies/004/father.wav")],
title="I am your father",
subtitle="Star Wars: Episode V - The Empire Strikes Back",
text="The Empire Strikes Back (also known as Star Wars: Episode V – The Empire Strikes "
"Back) is a 1980 American epic space opera film directed by Irvin Kershner. Leigh "
"Brackett and Lawrence Kasdan wrote the screenplay, with George Lucas writing the "
"film's story and serving as executive producer. The second installment in the "
"original Star Wars trilogy, it was produced by Gary Kurtz for Lucasfilm Ltd. and "
"stars Mark Hamill, Harrison Ford, Carrie Fisher, Billy Dee Williams, Anthony "
"Daniels, David Prowse, Kenny Baker, Peter Mayhew and Frank Oz.",
image=ThumbnailUrl(
url="https://upload.wikimedia.org/wikipedia/en/3/3c/SW_-_Empire_Strikes_Back.jpg"
),
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Read more",
value="https://en.wikipedia.org/wiki/The_Empire_Strikes_Back",
)
],
)
return CardFactory.audio_card(card)
def create_hero_card(self) -> Attachment:
card = HeroCard(
title="",
images=[
CardImage(
url="https://sec.ch9.ms/ch9/7ff5/e07cfef0-aa3b-40bb-9baa-7c9ef8ff7ff5/buildreactionbotframework_960.jpg"
)
],
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Get Started",
value="https://docs.microsoft.com/en-us/azure/bot-service/",
)
],
)
return CardFactory.hero_card(card)
def create_video_card(self) -> Attachment:
card = VideoCard(
title="Big Buck Bunny",
subtitle="by the Blender Institute",
text="Big Buck Bunny (code-named Peach) is a short computer-animated comedy film by the Blender "
"Institute, part of the Blender Foundation. Like the foundation's previous film Elephants "
"Dream, the film was made using Blender, a free software application for animation made by "
"the same foundation. It was released as an open-source film under Creative Commons License "
"Attribution 3.0.",
media=[
MediaUrl(
url="http://download.blender.org/peach/bigbuckbunny_movies/"
"BigBuckBunny_320x180.mp4"
)
],
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Learn More",
value="https://peach.blender.org/",
)
],
)
return CardFactory.video_card(card)
def create_receipt_card(self) -> Attachment:
card = ReceiptCard(
title="John Doe",
facts=[
Fact(key="Order Number", value="1234"),
Fact(key="Payment Method", value="VISA 5555-****"),
],
items=[
ReceiptItem(
title="Data Transfer",
price="$38.45",
quantity="368",
image=CardImage(
url="https://github.com/amido/azure-vector-icons/raw/master/"
"renders/traffic-manager.png"
),
),
ReceiptItem(
title="App Service",
price="$45.00",
quantity="720",
image=CardImage(
url="https://github.com/amido/azure-vector-icons/raw/master/"
"renders/cloud-service.png"
),
),
],
tax="$7.50",
total="90.95",
buttons=[
CardAction(
type=ActionTypes.open_url,
title="More Information",
value="https://azure.microsoft.com/en-us/pricing/details/bot-service/",
)
],
)
return CardFactory.receipt_card(card)
def create_signin_card(self) -> Attachment:
card = SigninCard(
text="BotFramework Sign-in Card",
buttons=[
CardAction(
type=ActionTypes.signin,
title="Sign-in",
value="https://login.microsoftonline.com",
)
],
)
return CardFactory.signin_card(card)
def create_thumbnail_card(self) -> Attachment:
card = ThumbnailCard(
title="BotFramework Thumbnail Card",
subtitle="Your bots — wherever your users are talking",
text="Build and connect intelligent bots to interact with your users naturally wherever"
" they are, from text/sms to Skype, Slack, Office 365 mail and other popular services.",
images=[
CardImage(
url="https://sec.ch9.ms/ch9/7ff5/"
"e07cfef0-aa3b-40bb-9baa-7c9ef8ff7ff5/"
"buildreactionbotframework_960.jpg"
)
],
buttons=[
CardAction(
type=ActionTypes.open_url,
title="Get Started",
value="https://docs.microsoft.com/en-us/azure/bot-service/",
)
],
)
return CardFactory.thumbnail_card(card)
|
py
|
1a574e4db5fd5bef82ccb9e217bbe06ea2501ace
|
# ParallelContext transfer functionality tests.
# This uses nonsense models and values to verify transfer takes place correctly.
import sys
from io import StringIO
from neuron import h
#h.nrnmpi_init()
pc = h.ParallelContext()
rank = pc.id()
nhost = pc.nhost()
if nhost > 1:
if rank == 0:
print("nhost > 1 so calls to expect_error will return without testing.")
def expect_error(callable, args, sec=None):
"""
Execute callable(args) and assert that it generated an error.
If sec is not None, executes callable(args, sec=sec)
Skips if nhost > 1 as all hoc_execerror end in MPI_ABORT
Does not work well with nrniv launch since hoc_execerror messages do not
pass through sys.stderr.
"""
if nhost > 1:
return
old_stderr = sys.stderr
sys.stderr = my_stderr = StringIO()
err = 0
try:
if sec:
callable(*args, sec=sec)
else:
callable(*args)
except:
err=1
errmes = my_stderr.getvalue()
sys.stderr = old_stderr
if errmes:
errmes = errmes.splitlines()[0]
errmes = errmes[(errmes.find(':')+2):]
print("expect_error: %s" % errmes)
if err == 0:
print("expect_error: no err for %s%s" % (str(callable), str(args)))
assert(err)
# HGap POINT_PROCESS via ChannelBUilder.
# Cannot use with extracellular.
ks = h.KSChan(1)
ks.name("HGap")
ks.iv_type(0)
ks.gmax(0)
ks.erev(0)
# Cell with enough nonsense stuff to exercise transfer possibilities.
class Cell():
def __init__(self):
self.soma = h.Section(name="soma", cell=self)
self.soma.diam = 10.
self.soma.L = 10.
self.soma.insert("na_ion") # can use nai as transfer source
# can use POINT_PROCESS range variable as targets
self.ic = h.IClamp(self.soma(.5))
self.vc = h.SEClamp(self.soma(.5))
self.vc.rs = 1e9 # no voltage clamp current
self.hgap = [None for _ in range(2)] # filled by mkgaps
def run():
pc.setup_transfer()
h.finitialize()
h.fadvance()
model = None # Global allows teardown of model
def teardown():
"""
destroy model
"""
global model
pc.gid_clear()
model = None
def mkmodel(ncell):
"""
Destroy existing model and re-create with ncell Cells.
"""
global model
if model:
teardown()
cells = {}
for gid in range(rank, ncell, nhost):
cells[gid] = Cell()
pc.set_gid2node(gid, rank)
pc.cell(gid, h.NetCon(cells[gid].soma(.5)._ref_v, None, sec=cells[gid].soma))
model = (cells, ncell)
def mkgaps(gids):
''' For list of gids, full gap, right to left '''
gidset = set()
for gid in gids:
g = [gid, (gid + 1)%model[1]]
sids = [i + 1000 for i in g]
for i, j in enumerate([1,0]):
if pc.gid_exists(g[i]):
cell = model[0][g[i]]
if g[i] not in gidset: # source var sid cannot be used twice
pc.source_var(cell.soma(.5)._ref_v, sids[i], sec=cell.soma)
gidset.add(g[i])
assert(cell.hgap[j] is None)
cell.hgap[j] = h.HGap(cell.soma(.5))
pc.target_var(cell.hgap[j], cell.hgap[j]._ref_e, sids[j])
cell.hgap[j].gmax = 0.0001
def transfer1(amp1=True):
"""
round robin transfer v to ic.amp and vc.amp1, nai to vc.amp2
"""
ncell = model[1]
for gid, cell in model[0].items():
s = cell.soma
srcsid = gid
tarsid = (gid+1)%ncell
pc.source_var(s(.5)._ref_v, srcsid, sec=s)
pc.source_var(s(.5)._ref_nai, srcsid+ncell, sec=s)
pc.target_var(cell.ic, cell.ic._ref_amp, tarsid)
if amp1:
pc.target_var(cell.vc, cell.vc._ref_amp1, tarsid)
pc.target_var(cell.vc, cell.vc._ref_amp2, tarsid+ncell)
def init_values():
"""
Initialize sources to their sid values and targets to 0
This allows substantive test that source values make it to targets.
"""
ncell = model[1]
for gid, c in model[0].items():
c.soma(.5).v = gid
c.soma(.5).nai = gid+ncell
c.ic.amp = 0
c.vc.amp1 = 0
c.vc.amp2 = 0
def check_values():
"""
Verify that target values are equal to source values.
"""
values = {}
for gid, c in model[0].items():
vi = c.soma(.5).v
if (h.ismembrane("extracellular", sec = c.soma)):
vi += c.soma(.5).vext[0]
values[gid] = {'v':vi, 'nai':c.soma(.5).nai, 'amp':c.ic.amp, 'amp1':c.vc.amp1, 'amp2':c.vc.amp2}
x = pc.py_gather(values, 0)
if rank == 0:
values = {}
for v in x:
values.update(v)
ncell = len(values)
for gid in values:
v1 = values[gid]
v2 = values[(gid+ncell-1)%ncell]
assert(v1['v'] == v2['amp'])
assert(v1['v'] == v2['amp1'])
assert(v1['nai'] == v2['amp2'])
def test_partrans():
# no transfer targets or sources.
mkmodel(4)
run()
# invalid source or target sid.
if 0 in model[0]:
cell = model[0][0]
s = cell.soma
expect_error(pc.source_var, (s(.5)._ref_v, -1), sec=s)
expect_error(pc.target_var, (cell.ic, cell.ic._ref_amp, -1))
# target with no source.
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.target_var(cell.ic, cell.ic._ref_amp, 1)
expect_error(run, ())
mkmodel(4)
# source with no target (not an error).
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.source_var(cell.soma(.5)._ref_v, 1, sec=cell.soma)
run()
# No point process for target
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.target_var(cell.vc._ref_amp3, 1)
run() # ok
pc.nthread(2)
expect_error(run, ()) # Do not know the POINT_PROCESS target
pc.nthread(1)
# Wrong sec for source ref and wrong point process for target ref.
mkmodel(1)
if pc.gid_exists(0):
cell = pc.gid2cell(0)
sec = h.Section(name="dend")
expect_error(pc.source_var, (cell.soma(.5)._ref_v, 1), sec=sec)
expect_error(pc.source_var, (cell.soma(.5)._ref_nai, 2), sec=sec)
del sec
expect_error(pc.target_var,(cell.ic, cell.vc._ref_amp3, 1))
# source sid already in use
expect_error(pc.source_var, (cell.soma(.5)._ref_nai, 1), sec=cell.soma)
# partrans update: could not find parameter index
# pv2node checks the parent
mkmodel(1)
s1 = h.Section(name="dend")
s2 = h.Section(name="soma")
ic = h.IClamp(s1(.5))
pc.source_var(s1(0)._ref_v, rank, sec=s1)
pc.target_var(ic, ic._ref_amp, rank)
run()
assert(s1(0).v == ic.amp)
'''
# but following changes the source node and things get screwed up
# because of continuing to use a freed Node*. The solution is
# beyond the scope of this pull request and would involve replacing
# description in terms of Node* with (Section*, arc_position)
s1.connect(s2(.5))
run()
print(s1(0).v, ic.amp)
assert(s1(0).v == ic.amp)
'''
# non_vsrc_update property disappears from Node*
s1.insert("pas") # not allowed to uninsert ions :(
pc.source_var(s1(.5)._ref_e_pas, rank+10, sec=s1)
pc.target_var(ic, ic._ref_delay, rank+10)
run()
assert(s1(.5).e_pas == ic.delay)
s1.uninsert("pas")
expect_error(run, ())
teardown()
del ic, s1, s2
# missing setup_transfer
mkmodel(4)
transfer1()
expect_error(h.finitialize, (-65,))
# round robin transfer v to ic.amp and vc.amp1, nai to vc.amp2
ncell = 5
mkmodel(ncell)
transfer1()
init_values()
run()
check_values()
# nrnmpi_int_alltoallv_sparse
h.nrn_sparse_partrans = 1
mkmodel(5)
transfer1()
init_values()
run()
check_values()
h.nrn_sparse_partrans = 0
# impedance error (number of gap junction not equal to number of pc.transfer_var)
imp = h.Impedance()
if 0 in model[0]:
imp.loc(model[0][0].soma(.5))
expect_error(imp.compute, (1, 1))
del imp
# For impedance, pc.target_var requires that its first arg be a reference to the POINT_PROCESS"
mkmodel(2)
if pc.gid_exists(0):
cell = pc.gid2cell(0)
pc.source_var(cell.soma(.5)._ref_v, 1000, sec=cell.soma)
cell.hgap[1] = h.HGap(cell.soma(.5))
pc.target_var(cell.hgap[1]._ref_e, 1001)
if pc.gid_exists(1):
cell = pc.gid2cell(1)
pc.source_var(cell.soma(.5)._ref_v, 1001, sec=cell.soma)
cell.hgap[0] = h.HGap(cell.soma(.5))
pc.target_var(cell.hgap[0]._ref_e, 1000)
pc.setup_transfer()
imp = h.Impedance()
h.finitialize(-65)
if pc.gid_exists(0):
imp.loc(pc.gid2cell(0).soma(.5))
expect_error(imp.compute, (10, 1, 100))
del imp, cell
# impedance
ncell = 5
mkmodel(ncell)
mkgaps(list(range(ncell-1)))
pc.setup_transfer()
imp = h.Impedance()
h.finitialize(-65)
if 0 in model[0]:
imp.loc(model[0][0].soma(.5))
niter=imp.compute(10, 1, 100)
if rank == 0:
print("impedance iterations=%d"%niter)
# tickle execution of target_ptr_update for one more line of coverage.
if 0 in model[0]:
model[0][0].hgap[1].loc(model[0][0].soma(0))
model[0][0].hgap[1].loc(model[0][0].soma(.5))
niter=imp.compute(10, 1, 100)
del imp
#CoreNEURON gap file generation
mkmodel(ncell)
transfer1()
# following is a bit tricky and need some user help in the docs.
# cannot be cache_efficient if general sparse matrix solver in effect.
cvode = h.CVode()
assert(cvode.use_mxb(0) == 0)
assert(cvode.cache_efficient(1) == 1)
pc.setup_transfer()
h.finitialize(-65)
pc.nrncore_write("tmp")
# CoreNEURON: one thread empty of gaps
mkmodel(1)
transfer1()
s = h.Section("dend")
pc.set_gid2node(rank+10, rank)
pc.cell(rank+10, h.NetCon(s(.5)._ref_v, None, sec=s))
pc.nthread(2)
pc.setup_transfer()
h.finitialize(-65)
pc.nrncore_write("tmp")
pc.nthread(1)
teardown()
del s
# There are single thread circumstances where target POINT_PROCESS is needed
s = h.Section("dend")
pc.set_gid2node(rank, rank)
pc.cell(rank, h.NetCon(s(.5)._ref_v, None, sec=s))
pc.source_var(s(.5)._ref_v, rank, sec=s)
ic=h.IClamp(s(.5))
pc.target_var(ic._ref_amp, rank)
pc.setup_transfer()
expect_error(h.finitialize, (-65,))
teardown()
del ic, s
# threads
mkmodel(ncell)
transfer1()
pc.nthread(2)
init_values()
run()
check_values()
pc.nthread(1)
# extracellular means use v = vm+vext[0]
for cell in model[0].values():
cell.soma.insert("extracellular")
init_values()
run()
check_values()
teardown()
if __name__ == "__main__":
test_partrans()
pc.barrier()
h.quit()
|
py
|
1a574e727d1f4d865bd356daf5d30ac0fa756904
|
import argparse
import sys
sys.setrecursionlimit(100000)
from interpreter import executeFunctions as executionFunctions
from interpreter import imageFunctions as imageWrapper
from GUI import main as GUIMain
parser = argparse.ArgumentParser(description='Interprets a piet image')
parser.add_argument("-f", "--file", required=True, type=str, help="complete filepath to a .png or .gif image")
parser.add_argument("-v", "--verbose", action="store_true", help="Outputs number of steps to STDOUT")
parser.add_argument("-g", "--graphical", action="store_true", help="Opens GUI with the file loaded")
args = parser.parse_args()
if not args.graphical:
executionFunctions.interpret(imageWrapper.getImage(args.file))
if args.verbose:
print("\nTotal steps: {}".format(executionFunctions.takeStep.counter))
else:
app = GUIMain.GUI()
app.setFileText(args.file)
app.loadFile()
app.run()
|
py
|
1a574eb9ab4bcee0bde6111285cc6d48089f819b
|
import math
from django.db.models.expressions import Func
from django.db.models.fields import FloatField, IntegerField
from django.db.models.functions import Cast
from django.db.models.functions.mixins import (
FixDecimalInputMixin, NumericOutputFieldMixin,
)
from django.db.models.lookups import Transform
class Abs(Transform):
function = 'ABS'
lookup_name = 'abs'
class ACos(NumericOutputFieldMixin, Transform):
function = 'ACOS'
lookup_name = 'acos'
class ASin(NumericOutputFieldMixin, Transform):
function = 'ASIN'
lookup_name = 'asin'
class ATan(NumericOutputFieldMixin, Transform):
function = 'ATAN'
lookup_name = 'atan'
class ATan2(NumericOutputFieldMixin, Func):
function = 'ATAN2'
arity = 2
def as_sqlite(self, compiler, connection, **extra_context):
if not getattr(connection.ops, 'spatialite', False) or connection.ops.spatial_version >= (5, 0, 0):
return self.as_sql(compiler, connection)
# This function is usually ATan2(y, x), returning the inverse tangent
# of y / x, but it's ATan2(x, y) on SpatiaLite < 5.0.0.
# Cast integers to float to avoid inconsistent/buggy behavior if the
# arguments are mixed between integer and float or decimal.
# https://www.gaia-gis.it/fossil/libspatialite/tktview?name=0f72cca3a2
clone = self.copy()
clone.set_source_expressions([
Cast(expression, FloatField()) if isinstance(expression.output_field, IntegerField)
else expression for expression in self.get_source_expressions()[::-1]
])
return clone.as_sql(compiler, connection, **extra_context)
class Ceil(Transform):
function = 'CEILING'
lookup_name = 'ceil'
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CEIL', **extra_context)
class Cos(NumericOutputFieldMixin, Transform):
function = 'COS'
lookup_name = 'cos'
class Cot(NumericOutputFieldMixin, Transform):
function = 'COT'
lookup_name = 'cot'
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, template='(1 / TAN(%(expressions)s))', **extra_context)
class Degrees(NumericOutputFieldMixin, Transform):
function = 'DEGREES'
lookup_name = 'degrees'
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection,
template='((%%(expressions)s) * 180 / %s)' % math.pi,
**extra_context
)
class Exp(NumericOutputFieldMixin, Transform):
function = 'EXP'
lookup_name = 'exp'
class Floor(Transform):
function = 'FLOOR'
lookup_name = 'floor'
class Ln(NumericOutputFieldMixin, Transform):
function = 'LN'
lookup_name = 'ln'
class Log(FixDecimalInputMixin, NumericOutputFieldMixin, Func):
function = 'LOG'
arity = 2
def as_sqlite(self, compiler, connection, **extra_context):
if not getattr(connection.ops, 'spatialite', False):
return self.as_sql(compiler, connection)
# This function is usually Log(b, x) returning the logarithm of x to
# the base b, but on SpatiaLite it's Log(x, b).
clone = self.copy()
clone.set_source_expressions(self.get_source_expressions()[::-1])
return clone.as_sql(compiler, connection, **extra_context)
class Mod(FixDecimalInputMixin, NumericOutputFieldMixin, Func):
function = 'MOD'
arity = 2
class Pi(NumericOutputFieldMixin, Func):
function = 'PI'
arity = 0
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, template=str(math.pi), **extra_context)
class Power(NumericOutputFieldMixin, Func):
function = 'POWER'
arity = 2
class Radians(NumericOutputFieldMixin, Transform):
function = 'RADIANS'
lookup_name = 'radians'
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection,
template='((%%(expressions)s) * %s / 180)' % math.pi,
**extra_context
)
class Round(Transform):
function = 'ROUND'
lookup_name = 'round'
class Sin(NumericOutputFieldMixin, Transform):
function = 'SIN'
lookup_name = 'sin'
class Sqrt(NumericOutputFieldMixin, Transform):
function = 'SQRT'
lookup_name = 'sqrt'
class Tan(NumericOutputFieldMixin, Transform):
function = 'TAN'
lookup_name = 'tan'
|
py
|
1a574fd356fe6b49c97bd56abf48a360357c4909
|
from View.ui_main import Ui_MainWindow
from Model.detect import detect_video
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
import sys
import os
class main_window(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.video_path = './'
self.save_dir = './'
self.video_name = None
self.outputfile = None
self.ui.video_pushButton.clicked.connect(self.get_file_name)
self.ui.savepath_pushButton.clicked.connect(self.get_dir_name)
self.ui.checkout_pushButton.setEnabled(False)
self.ui.checkout_pushButton.clicked.connect(self.open_output_file)
self.ui.run_buttom.clicked.connect(self.run)
def run(self):
# detect function
self.ui.log_field.setText("working on {} ...".format(self.video_name))
counts=0
counts = detect_video(self.ui.videopath_browser.toPlainText(), self.ui.unit_comboBox.currentIndex() + 1,
self.ui.savepath_browser.toPlainText(),
skipped=int(self.ui.skipped_browser.toPlainText()))
self.ui.checkout_pushButton.setEnabled(True)
self.set_total_count(counts)
self.set_save_path()
self.ui.log_field.setText("working on {} ... \ndone!".format(self.video_name))
def get_file_name(self):
# QFileDialog.getOpenFileName()
fileName1, filetype = QFileDialog.getOpenFileName(self, 'Open video file',
self.video_path) # 设置文件扩展名过滤,=
self.video_path = fileName1
try:
self.video_name = fileName1.split('/')[-1][:-4][0]
except:
self.video_name = fileName1
print(fileName1, "=========", filetype)
self.ui.videopath_browser.append(fileName1)
self.ui.savepath_browser.append(fileName1[:-4] + ".csv")
def get_dir_name(self):
# QFileDialog.getOpenFileName()
dirName1 = QFileDialog.getExistingDirectory(self, 'Open saving directory',
self.save_dir) # 设置文件扩展名过滤,=
self.save_dir = dirName1
self.outputfile = os.path.join(dirName1, self.video_name + ".csv")
print(dirName1, "=========")
self.ui.savepath_browser.append(self.outputfile)
# 更新label共提取n个数字
def set_total_count(self, n):
self.ui.data_count.setText("{}个数字".format(n))
def set_save_path(self):
self.ui.saved2path.setText("保存至{}".format(self.outputfile))
def open_output_file(self):
assert os.path.exists(self.outputfile) is not None, "path not initialized"
QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(self.outputfile))
# todo:I'm working on
def update_logs(self):
while (self):
if os.path.exists(self.outputfile):
output_line = 10
with open(self.outputfile, 'r') as opfile:
lines = opfile.readlines()
log = ""
start = -output_line if len(lines) > output_line else 0
for line in lines[start:-1]:
log += line + "\n"
self.ui.log_field.setText(log)
else:
pass
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = main_window()
window.show()
sys.exit(app.exec_())
|
py
|
1a574fdb3bbe38296ffb63a1593fe5a2dea15784
|
# encoding: UTF-8
from generate_data_type import pre_process
import sec_struct
STRUCT_DICT = sec_struct.__dict__
API_NAME = 'TdApi'
FILE_PREFIX = 'sec_td_'
ERROR_STRUCT = 'DFITCSECRspInfoField'
#----------------------------------------------------------------------
def process_callback(cpp_line):
'''处理回调'''
# 预处理
new_line = cpp_line.replace(' virtual void ', '')
# 获取回调函数名
cb_name = new_line[:new_line.index('(')]
# 获取回调参数
args_line = new_line[(new_line.index('(')+1):new_line.index(')')]
args_line = args_line.replace('struct ', '')
args_line = args_line.replace('*', '')
if not args_line:
return
args_list = args_line.split(',') # 参数列表
args_type_list = [] # 参数类型
args_name_list = [] # 参数名
for arg in args_list:
l = arg.split(' ')
l = [i for i in l if i]
args_type_list.append(l[0])
args_name_list.append(l[1])
create_task(cb_name, args_type_list, args_name_list, cpp_line)
create_process(cb_name, args_type_list, args_name_list)
create_callback(cb_name, args_type_list)
#----------------------------------------------------------------------
def process_function(cpp_line):
'''处理主动'''
new_line = cpp_line.replace(' virtual int ', '')
new_line = new_line.replace(') = 0;\n', '')
# 获取主动函数名
fc_name = new_line[:new_line.index('(')]
# 获取主动参数
args_line = new_line[(new_line.index('(')+1):new_line.index(')')]
args_line = args_line.replace('*', '')
args_line = args_line.replace('struct', '')
if not args_line:
return
args_list = args_line.split(',') # 参数列表
args_type_list = [] # 参数类型
args_name_list = [] # 参数名
for arg in args_list:
l = arg.split(' ')
l = [i for i in l if i]
args_type_list.append(l[0])
args_name_list.append(l[1])
print args_type_list
if args_type_list and args_type_list[0] in STRUCT_DICT:
create_function(fc_name, args_type_list, args_name_list)
#----------------------------------------------------------------------
def create_task(cb_name, args_type_list, args_name_list, cpp_line):
'''创建任务部分'''
# 从回调函数生成任务对象,并放入队列
new_cb_line = cpp_line.replace(' virtual void ', 'void ' + API_NAME + '::')
new_cb_line = new_cb_line.replace('{}', '')
source_task_f.write(new_cb_line + '\n')
source_task_f.write('{\n')
source_task_f.write('\tTask *task = new Task();\n')
source_task_f.write('\ttask->task_name = %s;\n' %cb_name.upper())
for i, arg_type in enumerate(args_type_list):
arg_name = args_name_list[i]
if arg_type == 'bool':
source_task_f.write('\ttask->task_last = %s;\n' %arg_name)
elif ERROR_STRUCT in arg_type:
source_task_f.write('\n')
source_task_f.write('\tif (%s)\n' %arg_name)
source_task_f.write('\t{\n')
source_task_f.write('\t\t%s *task_error = new %s();\n' %(arg_type, arg_type))
source_task_f.write('\t\t *task_error = *%s;\n' %arg_name)
source_task_f.write('\t\ttask->task_error = task_error;\n')
source_task_f.write('\t}\n')
source_task_f.write('\n')
else:
source_task_f.write('\n')
source_task_f.write('\tif (%s)\n' %arg_name)
source_task_f.write('\t{\n')
source_task_f.write('\t\t%s *task_data = new %s();\n' %(arg_type, arg_type))
source_task_f.write('\t\t *task_data = *%s;\n' %arg_name)
source_task_f.write('\t\ttask->task_data = task_data;\n')
source_task_f.write('\t}\n')
source_task_f.write('\tthis->task_queue.push(task);\n')
source_task_f.write('};\n')
source_task_f.write('\n')
# define常量
global define_count
define_count += 1
header_define_f.write('#define %s %s\n' %(cb_name.upper(), define_count))
# switch段代码
source_switch_f.write('case %s:\n' %cb_name.upper())
source_switch_f.write('{\n')
source_switch_f.write('\tthis->%s(task);\n' %cb_name.replace('On', 'process'))
source_switch_f.write('\tbreak;\n')
source_switch_f.write('}\n')
source_switch_f.write('\n')
#----------------------------------------------------------------------
def create_process(cb_name, args_type_list, args_name_list):
'''创建处理部分'''
source_process_f.write('void %s::%s(Task *task)\n' %(API_NAME, cb_name.replace('On', 'process')))
source_process_f.write('{\n')
source_process_f.write('\tPyLock lock;\n')
new_args_list = []
for i, arg_type in enumerate(args_type_list):
if ERROR_STRUCT in arg_type:
source_process_f.write('\tdict error;\n')
source_process_f.write('\tif (task->task_error)\n')
source_process_f.write('\t{\n')
source_process_f.write('\t\t%s *task_error = (%s*)task->task_error;\n' %(arg_type, arg_type))
struct = STRUCT_DICT[arg_type]
for key in struct.keys():
source_process_f.write('\t\terror[\"%s\"] = task_error->%s;\n' %(key, key))
source_process_f.write('\t\tdelete task->task_error;\n')
source_process_f.write('\t}\n')
source_process_f.write('\n')
new_args_list.append('error')
elif arg_type in STRUCT_DICT:
source_process_f.write('\tdict data;\n')
source_process_f.write('\tif (task->task_data)\n')
source_process_f.write('\t{\n')
source_process_f.write('\t\t%s *task_data = (%s*)task->task_data;\n' %(arg_type, arg_type))
struct = STRUCT_DICT[arg_type]
for key in struct.keys():
source_process_f.write('\t\tdata[\"%s\"] = task_data->%s;\n' %(key, key))
source_process_f.write('\t\tdelete task->task_data;\n')
source_process_f.write('\t}\n')
source_process_f.write('\n')
new_args_list.append('data')
elif arg_type == 'bool':
new_args_list.append('task->task_last')
new_args = ', '.join(new_args_list)
source_process_f.write('\tthis->%s(%s);\n' %(cb_name.replace('On', 'on'), new_args))
source_process_f.write('\tdelete task;\n')
source_process_f.write('};\n')
source_process_f.write('\n')
# 生成.h文件中的process部分
process_line = 'void %s (Task *task);\n' %(cb_name.replace('On', 'process'))
header_process_f.write(process_line)
header_process_f.write('\n')
#----------------------------------------------------------------------
def create_callback(cb_name, args_type_list):
'''创建回调部分'''
# 生成.h文件中的on部分
new_args_list = []
new_names_list = []
for arg_type in args_type_list:
if arg_type == 'bool':
new_args_list.append('bool flag')
new_names_list.append('flag')
elif arg_type == ERROR_STRUCT:
new_args_list.append('dict error')
new_names_list.append('error')
elif arg_type in STRUCT_DICT:
new_args_list.append('dict data')
new_names_list.append('data')
new_args_line = ', '.join(new_args_list)
new_line = 'virtual void %s(%s){};\n' %(cb_name.replace('On', 'on'), new_args_line)
header_on_f.write(new_line)
header_on_f.write('\n')
# 生成.cpp中的封装部分
override_line = '("%s")(%s)' %(cb_name.replace('On', 'on'), ', '.join(new_names_list))
source_wrap_f.write(new_line.replace('{};', ''))
source_wrap_f.write('{\n')
source_wrap_f.write('\ttry\n')
source_wrap_f.write('\t{\n')
source_wrap_f.write('\t\tthis->get_override%s;\n' %override_line)
source_wrap_f.write('\t}\n')
source_wrap_f.write('\tcatch (error_already_set const &)\n')
source_wrap_f.write('\t{\n')
source_wrap_f.write('\t\tPyErr_Print();\n')
source_wrap_f.write('\t}\n')
source_wrap_f.write('};\n')
source_wrap_f.write('\n')
#----------------------------------------------------------------------
def create_function(fc_name, args_type_list, args_name_list):
'''创建主动函数部分'''
# 生成.cpp文件中的主动函数部分
arg_type = args_type_list[0]
struct = STRUCT_DICT[arg_type]
source_function_f.write('int %s::%s(dict req)\n' %(API_NAME, fc_name.replace('Req', 'req')))
source_function_f.write('{\n')
source_function_f.write('\t%s myreq = %s();\n' %(arg_type, arg_type))
source_function_f.write('\tmemset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = '\tgetString(req, "%s", myreq.%s);\n' %(key, key)
elif value == 'int':
line = '\tgetInt(req, "%s", &myreq.%s);\n' %(key, key)
elif value == 'long':
line = '\tgetLong(req, "%s", &myreq.%s);\n' %(key, key)
elif value == 'double':
line = '\tgetDouble(req, "%s", &myreq.%s);\n' %(key, key)
source_function_f.write(line)
source_function_f.write('\tint i = this->api->%s(&myreq);\n' %fc_name)
source_function_f.write('\treturn i;\n')
source_function_f.write('};\n')
source_function_f.write('\n')
# 生成.h文件中的主动函数部分
if 'Req' in fc_name:
req_line = 'int %s(dict req);\n' %fc_name.replace('Req', 'req')
header_function_f.write(req_line)
header_function_f.write('\n')
# 打开文件
cpp_f = open('DFITCSECTraderApi.h', 'r')
source_task_f = open(FILE_PREFIX + 'task.cpp', 'w')
source_process_f = open(FILE_PREFIX + 'process.cpp', 'w')
source_function_f = open(FILE_PREFIX + 'function.cpp', 'w')
source_switch_f = open(FILE_PREFIX + 'switch.cpp', 'w')
source_wrap_f = open(FILE_PREFIX + 'wrap.cpp', 'w')
header_define_f = open(FILE_PREFIX + 'define.h', 'w')
header_process_f = open(FILE_PREFIX + 'header_process.h', 'w')
header_on_f = open(FILE_PREFIX + 'header_on.h', 'w')
header_function_f = open(FILE_PREFIX + 'header_function.h', 'w')
# 常量技术
define_count = 0
# 遍历处理
for n, cpp_line in enumerate(cpp_f):
cpp_line = pre_process(cpp_line)
if 'virtual void On' in cpp_line:
process_callback(cpp_line)
elif 'virtual int' in cpp_line:
process_function(cpp_line)
# 退出
cpp_f.close()
source_task_f.close()
source_process_f.close()
source_function_f.close()
source_switch_f.close()
source_wrap_f.close()
header_define_f.close()
header_process_f.close()
header_on_f.close()
header_function_f.close()
print API_NAME + u'处理完成'
|
py
|
1a5750218bfdbd19ccfd096df631056e9718d2e4
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from enum import auto
from typing import Any, Callable, Dict, List
import regex
from bkuser_core.categories.constants import CategoryType
from bkuser_core.categories.models import ProfileCategory
from bkuser_core.common.enum import AutoLowerEnum
from bkuser_core.departments.models import Department
from bkuser_core.profiles.models import DynamicFieldInfo, Profile
from django.utils.translation import ugettext_lazy as _
class IAMCallbackMethods(AutoLowerEnum):
LIST_ATTR = auto()
LIST_ATTR_VALUE = auto()
LIST_INSTANCE = auto()
FETCH_INSTANCE_INFO = auto()
LIST_INSTANCE_BY_POLICY = auto()
SEARCH_INSTANCE = auto()
_choices_labels = (
(LIST_ATTR, "查询某个资源类型可用于配置权限的属性列表"),
(LIST_ATTR_VALUE, "获取一个资源类型某个属性的值列表"),
(LIST_INSTANCE, "根据过滤条件查询实例"),
(FETCH_INSTANCE_INFO, "批量获取资源实例详情"),
(LIST_INSTANCE_BY_POLICY, "根据策略表达式查询资源实例"),
(SEARCH_INSTANCE, "搜索资源实例"),
)
class PrincipalTypeEnum(AutoLowerEnum):
USER = auto()
class IAMAction(AutoLowerEnum):
# 用户字段
MANAGE_FIELD = auto()
VIEW_FIELD = auto()
# 审计
VIEW_AUDIT = auto()
# 目录相关
CREATE_LOCAL_CATEGORY = auto()
CREATE_LDAP_CATEGORY = auto()
CREATE_MAD_CATEGORY = auto()
CREATE_CUSTOM_CATEGORY = auto()
MANAGE_CATEGORY = auto()
VIEW_CATEGORY = auto()
# 部门
CREATE_ROOT_DEPARTMENT = auto()
MANAGE_DEPARTMENT = auto()
VIEW_DEPARTMENT = auto()
@classmethod
def get_choice_label(cls, action_id: "IAMAction") -> str:
return {
cls.MANAGE_FIELD: "用户字段管理",
cls.VIEW_FIELD: "查看字段",
cls.VIEW_AUDIT: "审计信息查看",
cls.CREATE_LOCAL_CATEGORY: "本地用户目录新建",
cls.CREATE_LDAP_CATEGORY: "LDAP目录新建",
cls.CREATE_MAD_CATEGORY: "MAD目录新建",
cls.CREATE_CUSTOM_CATEGORY: "自定义目录新建",
cls.MANAGE_CATEGORY: "目录管理",
cls.VIEW_CATEGORY: "查看目录",
cls.CREATE_ROOT_DEPARTMENT: "根组织新建",
cls.MANAGE_DEPARTMENT: "组织和成员管理",
cls.VIEW_DEPARTMENT: "组织和成员查看",
}[action_id]
@classmethod
def get_global_actions(cls) -> tuple:
"""不需要和任何资源绑定,只需要判断某人是否有某个操作的权限"""
return (
cls.VIEW_AUDIT,
cls.VIEW_FIELD,
cls.MANAGE_FIELD,
cls.CREATE_MAD_CATEGORY,
cls.CREATE_LDAP_CATEGORY,
cls.CREATE_LOCAL_CATEGORY,
cls.CREATE_CUSTOM_CATEGORY,
)
@classmethod
def get_action_by_category_type(cls, category_type: str) -> "IAMAction":
return { # type: ignore
CategoryType.LOCAL.value: cls.CREATE_LOCAL_CATEGORY,
CategoryType.LDAP.value: cls.CREATE_LDAP_CATEGORY,
CategoryType.MAD.value: cls.CREATE_MAD_CATEGORY,
}[category_type]
@classmethod
def is_global_action(cls, action_id: "IAMAction") -> bool:
for i in cls.get_global_actions():
if action_id == i:
return True
return False
@classmethod
def get_related_resource_types(cls, action_id: "IAMAction") -> list:
return {
cls.MANAGE_CATEGORY: [ResourceType.CATEGORY],
cls.VIEW_CATEGORY: [ResourceType.CATEGORY],
cls.VIEW_DEPARTMENT: [ResourceType.DEPARTMENT],
cls.MANAGE_DEPARTMENT: [ResourceType.DEPARTMENT],
cls.CREATE_ROOT_DEPARTMENT: [ResourceType.CATEGORY],
}[action_id]
class ResourceType(AutoLowerEnum):
FIELD = auto()
CATEGORY = auto()
DEPARTMENT = auto()
PROFILE = auto()
@classmethod
def get_type_name(cls, resource_type: "ResourceType") -> str:
return {
cls.FIELD: _("用户字段"),
cls.CATEGORY: _("用户目录"),
cls.DEPARTMENT: _("组织"),
cls.PROFILE: _("用户"),
}[resource_type]
@classmethod
def get_by_model(cls, instance) -> "ResourceType":
return { # type: ignore
Department: cls.DEPARTMENT,
ProfileCategory: cls.CATEGORY,
DynamicFieldInfo: cls.FIELD,
Profile: cls.PROFILE,
}[type(instance)]
@classmethod
def get_attr_by_model(cls, instance, index: int) -> str:
"""通过 model instance 获取"""
type_ = cls.get_by_model(instance)
id_name_pair = cls.get_id_name_pair(type_)
return getattr(instance, id_name_pair[index])
@classmethod
def get_attributes_mapping(cls, instance) -> dict:
"""获取模型和权限中心属性对应"""
def get_department_path_attribute(obj):
start = f"/category,{obj.category_id}/"
ancestor_ids = obj.get_ancestors(include_self=True).values_list("id", flat=True)
for ancestor_id in ancestor_ids:
start += f"department,{ancestor_id}/"
return {"_bk_iam_path_": start}
_map: Dict[Any, Callable] = {
cls.DEPARTMENT: get_department_path_attribute,
}
try:
return _map[cls.get_by_model(instance)](instance)
except KeyError:
return {}
@classmethod
def get_key_mapping(cls, resource_type: "ResourceType") -> dict:
def parse_department_path(data):
"""解析 department path"""
value = data["value"]
field_map = {"department": "parent_id", "category": "category_id"}
value_pattern = r"^\/((?P<resource_type>\w+),(?P<resource_id>\d+)\/)+"
r = regex.match(value_pattern, value).capturesdict()
r = list(zip(r["resource_type"], r["resource_id"]))
the_last_of_path = r[-1]
# 非叶子节点的策略,直接返回路径最后的 id 作为资源 id
if "node_type" in data and data["node_type"] == "non-leaf":
field_map["department"] = "id"
return field_map[the_last_of_path[0]], int(the_last_of_path[1])
_map: Dict[Any, dict] = {
cls.DEPARTMENT: {
"department.id": "id",
"department._bk_iam_path_": parse_department_path,
},
cls.CATEGORY: {"category.id": "id"},
cls.FIELD: {"field.id": "name"},
cls.PROFILE: {},
}
return _map[resource_type]
@classmethod
def get_id_name_pair(cls, resource_type: "ResourceType") -> tuple:
"""获取 id name 对"""
_map: Dict[Any, tuple] = {
cls.DEPARTMENT: ("id", "name"),
cls.CATEGORY: ("id", "display_name"),
cls.FIELD: ("id", "display_name"),
cls.PROFILE: ("id", "username"),
}
return _map[resource_type]
@classmethod
def get_instance_resource_nodes(cls, instance: Any) -> list:
"""通过数据库实例获取依赖授权路径"""
if not instance:
return []
def get_parent_nodes(i: Department) -> List[dict]:
"""获取父路径的 resource nodes"""
# 请求 callback 需要完整的资源路径
parents = i.get_ancestors(include_self=True)
d_nodes = [{"type": cls.get_by_model(d).value, "id": d.pk, "name": d.name} for d in parents]
category = ProfileCategory.objects.get(id=i.category_id)
return [
{"type": cls.CATEGORY.value, "id": category.id, "name": category.display_name},
*d_nodes,
]
special_map: Dict[Any, Callable] = {
cls.DEPARTMENT: get_parent_nodes,
}
try:
return special_map[cls.get_by_model(instance)](instance)
except KeyError:
return [
{
"type": cls.get_by_model(instance).value,
"id": instance.pk,
"name": getattr(
instance,
cls.get_constants_by_model(instance, "get_id_name_pair")[1],
),
}
]
@classmethod
def get_constants_by_model(cls, instance, target: str) -> Any:
"""通过数据模型实例来获取配置常量
:param instance: 数据模型实例
:param target: 目标方法
"""
return getattr(cls, target)(cls.get_by_model(instance))
|
py
|
1a575047216c8a2b8344477fddfe39e69cab1d04
|
import socket
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定
sock.bind(("0.0.0.0",5566))
#监听
sock.listen(20)
#接收
conn,addr = sock.accept();
print("address is :",addr)
data = conn.recv(1024)
print(data)
msg = """HTTP/1.1 200 OK
Content-Type:text/html
<meta charset='utf-8'>
<h1>人生苦短,我用python</h1>
"""
# 发送数据
conn.sendto(msg.encode("utf-8"),addr)
conn.close()
sock.close()
|
py
|
1a5750d5753f7540109aae1bac5f80a8d190caf8
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: [email protected]
"""
"""
from wapi.common import functions
from wapi.common import constants
super_function = functions.super_function
def test_random_int():
res = super_function.random_int(2)
assert len(res) == 2
def test_get_current_space_name():
res = super_function.get_current_space_name()
assert res == constants.DEFAULT_SPACE_NAME
def test_run_shell():
res, err = functions.run_shell('ls te')
assert err == b'ls: te: No such file or directory\n'
res, err = functions.run_shell('ls tests/common/test_functions.py')
assert res == b'tests/common/test_functions.py\n'
|
py
|
1a5751e49c5510720d369748b89cc4312bd4875f
|
import cv2
import numpy as np
import threading
class Webcam2rgb():
def start(self, callback, cameraNumber=0, width = None, height = None, fps = None, directShow = False):
self.callback = callback
try:
self.cam = cv2.VideoCapture(cameraNumber + cv2.CAP_DSHOW if directShow else cv2.CAP_ANY)
if not self.cam.isOpened():
print('opening camera')
self.cam.open(0)
if width:
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH,width)
if height:
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT,height)
if fps:
self.cam.set(cv2.CAP_PROP_FPS, fps)
self.running = True
self.thread = threading.Thread(target = self.calc_BRG)
self.thread.start()
self.ret_val = True
except:
self.running = False
self.ret_val = False
def stop(self):
self.running = False
self.thread.join()
def calc_BRG(self):
while self.running:
try:
self.ret_val = False
self.ret_val, img = self.cam.read()
h, w, c = img.shape
brg = img[int(h/2),int(w/2)]
self.callback(self.ret_val,brg)
except:
self.running = False
def cameraFs(self):
return self.cam.get(cv2.CAP_PROP_FPS)
|
py
|
1a5751ff8b3384e27c0b9d8c80f6f1c487861435
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-14 05:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bots', '0002_bot_mashup'),
('content', '0018_auto_20170113_1954'),
]
operations = [
migrations.RemoveField(
model_name='mashup',
name='bot',
),
migrations.AddField(
model_name='mashup',
name='bot',
field=models.ManyToManyField(null=True, related_name='mashups', to='bots.Bot'),
),
]
|
py
|
1a575201979af70fded4a170d70af06c5139de0c
|
import sys, math, pygame
from mob import *
from bolt import *
from boltPower import *
from healthUp import *
from speedBoost import *
from Coin import *
#https://opengameart.org/content/space-man-space-bot-rework
class Player(Mob):
def __init__(self, speed=6, startPos=[0,0], powers=[], playerLives=6):
Mob.__init__(self, "PNG/Player/spaceman.png", [0,0], startPos)
self.notMoving = [pygame.image.load("PNG/Player/spaceman.png"),
pygame.image.load("PNG/Player/spaceman.png"),
pygame.image.load("PNG/Player/spaceman.png"),
]
self.upImages = [pygame.image.load("PNG/Player/spacemanFoward1.png"),
pygame.image.load("PNG/Player/spacemanFoward2.png"),
pygame.image.load("PNG/Player/spacemanFoward3.png"),
]
self.downImages = [pygame.image.load("PNG/Player/spaceman.png"),
pygame.image.load("PNG/Player/spaceman2.png"),
pygame.image.load("PNG/Player/spaceman3.png"),
]
self.leftImages = [pygame.image.load("PNG/Player/spaceman-left-1.png"),
pygame.image.load("PNG/Player/spaceman-left-2.png"),
pygame.image.load("PNG/Player/spaceman-left-3.png"),
]
self.rightImages = [pygame.image.load("PNG/Player/spaceman-right-1.png"),
pygame.image.load("PNG/Player/spaceman-right-2.png"),
pygame.image.load("PNG/Player/spaceman-right-3.png"),
]
self.images = self.downImages
self.y = "down"
self.frame = 0;
self.maxFrame = len(self.images)-1
self.aniTimer = 0
self.aniTimerMax = 60/15
self.maxSpeed = speed
self.faceKeys = []
self.keys = []
self.goal = [0,0]
self.kind = "player"
self.lives = playerLives
self.didBounceX = False
self.didBounceY = False
self.fireTimer = 0
self.fireTimerMax = 60/3
self.bullets = []
self.firing = False
# ~ self.facing = "down"
self.alive = True
self.invincible = False
self.invincTimer = 0
self.invincTimerMax = 60
def go(self, d):
mode, direction = d.split(" ")
if mode == "go":
self.keys += [direction]
elif mode == "s":
try:
self.keys.remove(direction)
if direction == "left" or "right":
self.speedx = 0
if direction == "up" or "down":
self.speedy = 0
except:
return
if self.keys:
if self.keys[-1] == "left":
self.speedx = -self.maxSpeed
elif self.keys[-1] == "right":
self.speedx = self.maxSpeed
elif self.keys[-1] == "up":
self.speedy = -self.maxSpeed
elif self.keys[-1] == "down":
self.speedy = self.maxSpeed
def face(self, y):
mode, direction = y.split(" ")
if mode == "face":
self.faceKeys += [direction]
elif mode == "stop":
try:
self.faceKeys.remove(direction)
except:
return
if self.faceKeys:
if self.faceKeys[-1] == "left":
self.images = self.leftImages
self.rect = self.image.get_rect(center = self.rect.center)
elif self.faceKeys[-1] == "right":
self.images = self.rightImages
self.rect = self.image.get_rect(center = self.rect.center)
elif self.faceKeys[-1] == "up":
self.images = self.upImages
self.rect = self.image.get_rect(center = self.rect.center)
elif self.faceKeys[-1] == "down":
self.images = self.downImages
self.rect = self.image.get_rect(center = self.rect.center)
def collide(self, other):
if other.kind == "warp":
pass
if not self.invincible and (other.kind == "enemy" or other.kind == "greenie" or other.kind == "imposter"): #get hurt
self.lives += -1
print "gets hurt"
self.invincible = True
if self.lives == 0:
self.alive = False
if not (other.kind == "warp" or other.kind == "Coin"):
Mob.collide(self, other)
return True
def facingDirection(self):
if self.images == self.downImages or self.images == self.notMoving:
self.y = "down"
if self.images == self.upImages:
self.y = "up"
if self.images == self.leftImages:
self.y = "left"
if self.images == self.rightImages:
self.y = "right"
def shoot(self, testingFire=True):
if testingFire and self.firing:
if self.fireTimer < self.fireTimerMax:
self.fireTimer += 1
else:
self.fireTimer = 0
self.firing = False
else:
self.firing = True
if self.y == "down":
speed = [0,10]
image = "PNG/Bolt/spacemanbolt-down.png"
if self.y == "up":
speed = [0,-10]
image = "PNG/Bolt/spacemanbolt-up.png"
if self.y == "left":
speed = [-10,0]
image = "PNG/Bolt/spacemanbolt-left.png"
if self.y == "right":
speed = [10,0]
image = "PNG/Bolt/spacemanbolt-right.png"
return Bolt(image, speed, self.rect.center)
def update(*args):
self = args[0]
size = args[1]
self.didBounceX = False
self.didBounceY = False
self.move()
self.live(self.lives)
self.bounceWall(size)
self.animate()
self.facingDirection()
if len(self.faceKeys) == 0:
if self.speedx < 0:
self.images = self.leftImages
if self.speedx > 0:
self.images = self.rightImages
if self.speedx == 0:
if self.speedy < 0:
self.images = self.upImages
if self.speedy >= 0:
self.images = self.downImages
if self.firing:
if self.fireTimer < self.fireTimerMax:
self.fireTimer += 1
else:
self.fireTimer = 0
self.firing = False
if self.invincible:
if self.invincTimer < self.invincTimerMax:
self.invincTimer += 1
else:
self.invincTimer = 0
self.invincible = False
if self.lives < 0:
self.alive = False
|
py
|
1a57521e65646e4269e9eccadcf81ee59f7602fd
|
import random
from raiden.constants import ABSENT_SECRET
from raiden.settings import DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
from raiden.transfer import channel, routes
from raiden.transfer.architecture import Event, TransitionResult
from raiden.transfer.events import EventPaymentSentFailed, EventPaymentSentSuccess
from raiden.transfer.identifiers import CANONICAL_IDENTIFIER_GLOBAL_QUEUE
from raiden.transfer.mediated_transfer.events import (
EventRouteFailed,
EventUnlockFailed,
EventUnlockSuccess,
SendLockedTransfer,
SendSecretReveal,
)
from raiden.transfer.mediated_transfer.state import (
InitiatorTransferState,
TransferDescriptionWithSecretState,
)
from raiden.transfer.mediated_transfer.state_change import (
ReceiveSecretRequest,
ReceiveSecretReveal,
)
from raiden.transfer.state import (
ChannelState,
NettingChannelState,
RouteState,
message_identifier_from_prng,
)
from raiden.transfer.state_change import Block, ContractReceiveSecretReveal, StateChange
from raiden.transfer.utils import is_valid_secret_reveal
from raiden.utils.typing import (
MYPY_ANNOTATION,
Address,
BlockExpiration,
BlockNumber,
ChannelID,
Dict,
List,
MessageID,
NodeNetworkStateMap,
Optional,
PaymentWithFeeAmount,
Secret,
SecretHash,
)
def events_for_unlock_lock(
initiator_state: InitiatorTransferState,
channel_state: NettingChannelState,
secret: Secret,
secrethash: SecretHash,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Unlocks the lock offchain, and emits the events for the successful payment. """
# next hop learned the secret, unlock the token locally and send the
# lock claim message to next hop
transfer_description = initiator_state.transfer_description
message_identifier = message_identifier_from_prng(pseudo_random_generator)
unlock_lock = channel.send_unlock(
channel_state=channel_state,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
secret=secret,
secrethash=secrethash,
)
payment_sent_success = EventPaymentSentSuccess(
token_network_registry_address=channel_state.token_network_registry_address,
token_network_address=channel_state.token_network_address,
identifier=transfer_description.payment_identifier,
amount=transfer_description.amount,
target=transfer_description.target,
secret=secret,
route=initiator_state.route.route,
)
unlock_success = EventUnlockSuccess(
transfer_description.payment_identifier, transfer_description.secrethash
)
return [unlock_lock, payment_sent_success, unlock_success]
def handle_block(
initiator_state: InitiatorTransferState,
state_change: Block,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
""" Checks if the lock has expired, and if it has sends a remove expired
lock and emits the failing events.
"""
secrethash = initiator_state.transfer.lock.secrethash
locked_lock = channel_state.our_state.secrethashes_to_lockedlocks.get(secrethash)
if not locked_lock:
if channel_state.partner_state.secrethashes_to_lockedlocks.get(secrethash):
return TransitionResult(initiator_state, list())
else:
# if lock is not in our or our partner's locked locks then the
# task can go
return TransitionResult(None, list())
lock_expiration_threshold = BlockExpiration(
locked_lock.expiration + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL
)
lock_has_expired = channel.is_lock_expired(
end_state=channel_state.our_state,
lock=locked_lock,
block_number=state_change.block_number,
lock_expiration_threshold=lock_expiration_threshold,
)
events: List[Event] = list()
if lock_has_expired and initiator_state.transfer_state != "transfer_expired":
is_channel_open = channel.get_status(channel_state) == ChannelState.STATE_OPENED
if is_channel_open:
expired_lock_events = channel.send_lock_expired(
channel_state=channel_state,
locked_lock=locked_lock,
pseudo_random_generator=pseudo_random_generator,
)
events.extend(expired_lock_events)
if initiator_state.received_secret_request:
reason = "bad secret request message from target"
else:
reason = "lock expired"
transfer_description = initiator_state.transfer_description
payment_identifier = transfer_description.payment_identifier
# TODO: When we introduce multiple transfers per payment this needs to be
# reconsidered. As we would want to try other routes once a route
# has failed, and a transfer failing does not mean the entire payment
# would have to fail.
# Related issue: https://github.com/raiden-network/raiden/issues/2329
payment_failed = EventPaymentSentFailed(
token_network_registry_address=transfer_description.token_network_registry_address,
token_network_address=transfer_description.token_network_address,
identifier=payment_identifier,
target=transfer_description.target,
reason=reason,
)
route_failed = EventRouteFailed(
secrethash=secrethash,
route=initiator_state.route.route,
token_network_address=transfer_description.token_network_address,
)
unlock_failed = EventUnlockFailed(
identifier=payment_identifier,
secrethash=initiator_state.transfer_description.secrethash,
reason=reason,
)
lock_exists = channel.lock_exists_in_either_channel_side(
channel_state=channel_state, secrethash=secrethash
)
initiator_state.transfer_state = "transfer_expired"
return TransitionResult(
# If the lock is either in our state or partner state we keep the
# task around to wait for the LockExpired messages to sync.
# Check https://github.com/raiden-network/raiden/issues/3183
initiator_state if lock_exists else None,
events + [payment_failed, route_failed, unlock_failed],
)
else:
return TransitionResult(initiator_state, events)
def try_new_route(
channelidentifiers_to_channels: Dict[ChannelID, NettingChannelState],
nodeaddresses_to_networkstates: NodeNetworkStateMap,
candidate_route_states: List[RouteState],
transfer_description: TransferDescriptionWithSecretState,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[Optional[InitiatorTransferState]]:
initiator_state = None
events: List[Event] = list()
amount_with_fee: PaymentWithFeeAmount = PaymentWithFeeAmount(
transfer_description.amount + transfer_description.allocated_fee
)
channel_state = None
route_state = None
reachable_route_states = routes.filter_reachable_routes(
candidate_route_states, nodeaddresses_to_networkstates
)
for reachable_route_state in reachable_route_states:
forward_channel_id = reachable_route_state.forward_channel_id
candidate_channel_state = forward_channel_id and channelidentifiers_to_channels.get(
forward_channel_id
)
assert isinstance(candidate_channel_state, NettingChannelState)
is_channel_usable = channel.is_channel_usable_for_new_transfer(
channel_state=candidate_channel_state,
transfer_amount=amount_with_fee,
lock_timeout=transfer_description.locktimeout,
)
if is_channel_usable:
channel_state = candidate_channel_state
route_state = reachable_route_state
break
if route_state is None:
if not reachable_route_states:
reason = "there is no route available"
else:
reason = "none of the available routes could be used"
transfer_failed = EventPaymentSentFailed(
token_network_registry_address=transfer_description.token_network_registry_address,
token_network_address=transfer_description.token_network_address,
identifier=transfer_description.payment_identifier,
target=transfer_description.target,
reason=reason,
)
events.append(transfer_failed)
initiator_state = None
else:
assert channel_state is not None
message_identifier = message_identifier_from_prng(pseudo_random_generator)
lockedtransfer_event = send_lockedtransfer(
transfer_description=transfer_description,
channel_state=channel_state,
message_identifier=message_identifier,
block_number=block_number,
route_state=route_state,
route_states=reachable_route_states,
)
assert lockedtransfer_event
initiator_state = InitiatorTransferState(
route=route_state,
transfer_description=transfer_description,
channel_identifier=channel_state.identifier,
transfer=lockedtransfer_event.transfer,
)
events.append(lockedtransfer_event)
return TransitionResult(initiator_state, events)
def send_lockedtransfer(
transfer_description: TransferDescriptionWithSecretState,
channel_state: NettingChannelState,
message_identifier: MessageID,
block_number: BlockNumber,
route_state: RouteState,
route_states: List[RouteState],
) -> SendLockedTransfer:
""" Create a mediated transfer using channel. """
assert channel_state.token_network_address == transfer_description.token_network_address
lock_expiration = channel.get_safe_initial_expiration(
block_number, channel_state.reveal_timeout, transfer_description.locktimeout
)
# The payment amount and the fee amount must be included in the locked
# amount, as a guarantee to the mediator that the fee will be claimable
# on-chain.
total_amount = PaymentWithFeeAmount(
transfer_description.amount + transfer_description.allocated_fee
)
lockedtransfer_event = channel.send_lockedtransfer(
channel_state=channel_state,
initiator=transfer_description.initiator,
target=transfer_description.target,
amount=total_amount,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
expiration=lock_expiration,
secrethash=transfer_description.secrethash,
route_states=routes.prune_route_table(
route_states=route_states, selected_route=route_state
),
)
return lockedtransfer_event
def handle_secretrequest(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretRequest,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
is_message_from_target = (
state_change.sender == initiator_state.transfer_description.target
and state_change.secrethash == initiator_state.transfer_description.secrethash
and state_change.payment_identifier
== initiator_state.transfer_description.payment_identifier
)
lock = channel.get_lock(
channel_state.our_state, initiator_state.transfer_description.secrethash
)
# This should not ever happen. This task clears itself when the lock is
# removed.
assert lock is not None, "channel is does not have the transfer's lock"
already_received_secret_request = initiator_state.received_secret_request
# lock.amount includes the fees, transfer_description.amount is the actual
# payment amount, for the transfer to be valid and the unlock allowed the
# target must receive an amount between these values.
is_valid_secretrequest = (
state_change.amount <= lock.amount
and state_change.amount >= initiator_state.transfer_description.amount
and state_change.expiration == lock.expiration
and initiator_state.transfer_description.secret != ABSENT_SECRET
)
if already_received_secret_request and is_message_from_target:
# A secret request was received earlier, all subsequent are ignored
# as it might be an attack
iteration = TransitionResult(initiator_state, list())
elif is_valid_secretrequest and is_message_from_target:
# Reveal the secret to the target node and wait for its confirmation.
# At this point the transfer is not cancellable anymore as either the lock
# timeouts or a secret reveal is received.
#
# Note: The target might be the first hop
#
message_identifier = message_identifier_from_prng(pseudo_random_generator)
transfer_description = initiator_state.transfer_description
recipient = transfer_description.target
revealsecret = SendSecretReveal(
recipient=Address(recipient),
message_identifier=message_identifier,
secret=transfer_description.secret,
canonical_identifier=CANONICAL_IDENTIFIER_GLOBAL_QUEUE,
)
initiator_state.transfer_state = "transfer_secret_revealed"
initiator_state.received_secret_request = True
iteration = TransitionResult(initiator_state, [revealsecret])
elif not is_valid_secretrequest and is_message_from_target:
initiator_state.received_secret_request = True
iteration = TransitionResult(initiator_state, list())
else:
iteration = TransitionResult(initiator_state, list())
return iteration
def handle_offchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
""" Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the pending locks and the
transferred amount updated.
"""
iteration: TransitionResult[Optional[InitiatorTransferState]]
valid_reveal = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=initiator_state.transfer_description.secrethash,
)
sent_by_partner = state_change.sender == channel_state.partner_state.address
is_channel_open = channel.get_status(channel_state) == ChannelState.STATE_OPENED
if valid_reveal and is_channel_open and sent_by_partner:
events = events_for_unlock_lock(
initiator_state=initiator_state,
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
pseudo_random_generator=pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration
def handle_onchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
""" When a secret is revealed on-chain all nodes learn the secret.
This check the on-chain secret corresponds to the one used by the
initiator, and if valid a new balance proof is sent to the next hop with
the current lock removed from the pending locks and the transferred amount
updated.
"""
iteration: TransitionResult[Optional[InitiatorTransferState]]
secret = state_change.secret
secrethash = initiator_state.transfer_description.secrethash
is_valid_secret = is_valid_secret_reveal(
state_change=state_change, transfer_secrethash=secrethash
)
is_channel_open = channel.get_status(channel_state) == ChannelState.STATE_OPENED
is_lock_expired = state_change.block_number > initiator_state.transfer.lock.expiration
is_lock_unlocked = is_valid_secret and not is_lock_expired
if is_lock_unlocked:
channel.register_onchain_secret(
channel_state=channel_state,
secret=secret,
secrethash=secrethash,
secret_reveal_block_number=state_change.block_number,
)
if is_lock_unlocked and is_channel_open:
events = events_for_unlock_lock(
initiator_state,
channel_state,
state_change.secret,
state_change.secrethash,
pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration
def state_transition(
initiator_state: InitiatorTransferState,
state_change: StateChange,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[Optional[InitiatorTransferState]]:
if type(state_change) == Block:
assert isinstance(state_change, Block), MYPY_ANNOTATION
iteration = handle_block(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ReceiveSecretRequest:
assert isinstance(state_change, ReceiveSecretRequest), MYPY_ANNOTATION
iteration = handle_secretrequest(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ReceiveSecretReveal:
assert isinstance(state_change, ReceiveSecretReveal), MYPY_ANNOTATION
iteration = handle_offchain_secretreveal(
initiator_state, state_change, channel_state, pseudo_random_generator
)
elif type(state_change) == ContractReceiveSecretReveal:
assert isinstance(state_change, ContractReceiveSecretReveal), MYPY_ANNOTATION
iteration = handle_onchain_secretreveal(
initiator_state, state_change, channel_state, pseudo_random_generator
)
else:
iteration = TransitionResult(initiator_state, list())
return iteration
|
py
|
1a575263023782470633a858c8de186950c7baa7
|
"""Tests for asyncio/sslproto.py."""
import logging
import socket
from test import support
import unittest
import weakref
from unittest import mock
try:
import ssl
except ImportError:
ssl = None
import asyncio
from asyncio import log
from asyncio import protocols
from asyncio import sslproto
from test import support
from test.test_asyncio import utils as test_utils
from test.test_asyncio import functional as func_tests
def tearDownModule():
asyncio.set_event_loop_policy(None)
@unittest.skipIf(ssl is None, 'No ssl module')
class SslProtoHandshakeTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def ssl_protocol(self, *, waiter=None, proto=None):
sslcontext = test_utils.dummy_ssl_context()
if proto is None: # app protocol
proto = asyncio.Protocol()
ssl_proto = sslproto.SSLProtocol(self.loop, proto, sslcontext, waiter,
ssl_handshake_timeout=0.1)
self.assertIs(ssl_proto._app_transport.get_protocol(), proto)
self.addCleanup(ssl_proto._app_transport.close)
return ssl_proto
def connection_made(self, ssl_proto, *, do_handshake=None):
transport = mock.Mock()
sslpipe = mock.Mock()
sslpipe.shutdown.return_value = b''
if do_handshake:
sslpipe.do_handshake.side_effect = do_handshake
else:
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
with mock.patch('asyncio.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
return transport
def test_handshake_timeout_zero(self):
sslcontext = test_utils.dummy_ssl_context()
app_proto = mock.Mock()
waiter = mock.Mock()
with self.assertRaisesRegex(ValueError, 'a positive number'):
sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter,
ssl_handshake_timeout=0)
def test_handshake_timeout_negative(self):
sslcontext = test_utils.dummy_ssl_context()
app_proto = mock.Mock()
waiter = mock.Mock()
with self.assertRaisesRegex(ValueError, 'a positive number'):
sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter,
ssl_handshake_timeout=-10)
def test_eof_received_waiter(self):
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
self.connection_made(ssl_proto)
ssl_proto.eof_received()
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionResetError)
def test_fatal_error_no_name_error(self):
# From issue #363.
# _fatal_error() generates a NameError if sslproto.py
# does not import base_events.
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
# Temporarily turn off error logging so as not to spoil test output.
log_level = log.logger.getEffectiveLevel()
log.logger.setLevel(logging.FATAL)
try:
ssl_proto._fatal_error(None)
finally:
# Restore error logging.
log.logger.setLevel(log_level)
def test_connection_lost(self):
# From issue #472.
# yield from waiter hang if lost_connection was called.
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
self.connection_made(ssl_proto)
ssl_proto.connection_lost(ConnectionAbortedError)
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionAbortedError)
def test_close_during_handshake(self):
# bpo-29743 Closing transport during handshake process leaks socket
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
transport = self.connection_made(ssl_proto)
test_utils.run_briefly(self.loop)
ssl_proto._app_transport.close()
self.assertTrue(transport.abort.called)
def test_get_extra_info_on_closed_connection(self):
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
self.assertIsNone(ssl_proto._get_extra_info('socket'))
default = object()
self.assertIs(ssl_proto._get_extra_info('socket', default), default)
self.connection_made(ssl_proto)
self.assertIsNotNone(ssl_proto._get_extra_info('socket'))
ssl_proto.connection_lost(None)
self.assertIsNone(ssl_proto._get_extra_info('socket'))
def test_set_new_app_protocol(self):
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
new_app_proto = asyncio.Protocol()
ssl_proto._app_transport.set_protocol(new_app_proto)
self.assertIs(ssl_proto._app_transport.get_protocol(), new_app_proto)
self.assertIs(ssl_proto._app_protocol, new_app_proto)
def test_data_received_after_closing(self):
ssl_proto = self.ssl_protocol()
self.connection_made(ssl_proto)
transp = ssl_proto._app_transport
transp.close()
# should not raise
self.assertIsNone(ssl_proto.data_received(b'data'))
def test_write_after_closing(self):
ssl_proto = self.ssl_protocol()
self.connection_made(ssl_proto)
transp = ssl_proto._app_transport
transp.close()
# should not raise
self.assertIsNone(transp.write(b'data'))
##############################################################################
# Start TLS Tests
##############################################################################
class BaseStartTLS(func_tests.FunctionalTestCaseMixin):
PAYLOAD_SIZE = 1024 * 100
TIMEOUT = support.LONG_TIMEOUT
def new_loop(self):
raise NotImplementedError
def test_buf_feed_data(self):
class Proto(asyncio.BufferedProtocol):
def __init__(self, bufsize, usemv):
self.buf = bytearray(bufsize)
self.mv = memoryview(self.buf)
self.data = b''
self.usemv = usemv
def get_buffer(self, sizehint):
if self.usemv:
return self.mv
else:
return self.buf
def buffer_updated(self, nsize):
if self.usemv:
self.data += self.mv[:nsize]
else:
self.data += self.buf[:nsize]
for usemv in [False, True]:
proto = Proto(1, usemv)
protocols._feed_data_to_buffered_proto(proto, b'12345')
self.assertEqual(proto.data, b'12345')
proto = Proto(2, usemv)
protocols._feed_data_to_buffered_proto(proto, b'12345')
self.assertEqual(proto.data, b'12345')
proto = Proto(2, usemv)
protocols._feed_data_to_buffered_proto(proto, b'1234')
self.assertEqual(proto.data, b'1234')
proto = Proto(4, usemv)
protocols._feed_data_to_buffered_proto(proto, b'1234')
self.assertEqual(proto.data, b'1234')
proto = Proto(100, usemv)
protocols._feed_data_to_buffered_proto(proto, b'12345')
self.assertEqual(proto.data, b'12345')
proto = Proto(0, usemv)
with self.assertRaisesRegex(RuntimeError, 'empty buffer'):
protocols._feed_data_to_buffered_proto(proto, b'12345')
def test_start_tls_client_reg_proto_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.start_tls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data, b'O')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
# No garbage is left if SSL is closed uncleanly
client_context = weakref.ref(client_context)
self.assertIsNone(client_context())
def test_create_connection_memory_leak(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
def serve(sock):
sock.settimeout(self.TIMEOUT)
sock.start_tls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
# XXX: We assume user stores the transport in protocol
proto.tr = tr
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr,
ssl=client_context)
self.assertEqual(await on_data, b'O')
tr.write(HELLO_MSG)
await on_eof
tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
# No garbage is left for SSL client from loop.create_connection, even
# if user stores the SSLTransport in corresponding protocol instance
client_context = weakref.ref(client_context)
self.assertIsNone(client_context())
def test_start_tls_client_buf_proto_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
client_con_made_calls = 0
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.start_tls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.sendall(b'2')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class ClientProtoFirst(asyncio.BufferedProtocol):
def __init__(self, on_data):
self.on_data = on_data
self.buf = bytearray(1)
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def get_buffer(self, sizehint):
return self.buf
def buffer_updated(self, nsize):
assert nsize == 1
self.on_data.set_result(bytes(self.buf[:nsize]))
class ClientProtoSecond(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data1 = self.loop.create_future()
on_data2 = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProtoFirst(on_data1), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data1, b'O')
new_tr.write(HELLO_MSG)
new_tr.set_protocol(ClientProtoSecond(on_data2, on_eof))
self.assertEqual(await on_data2, b'2')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
# connection_made() should be called only once -- when
# we establish connection for the first time. Start TLS
# doesn't call connection_made() on application protocols.
self.assertEqual(client_con_made_calls, 1)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=self.TIMEOUT))
def test_start_tls_slow_client_cancel(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
client_context = test_utils.simple_client_sslcontext()
server_waits_on_handshake = self.loop.create_future()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
try:
self.loop.call_soon_threadsafe(
server_waits_on_handshake.set_result, None)
data = sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
await server_waits_on_handshake
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(
self.loop.start_tls(tr, proto, client_context),
0.5)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
def test_start_tls_server_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
ANSWER = b'answer'
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
answer = None
def client(sock, addr):
nonlocal answer
sock.settimeout(self.TIMEOUT)
sock.connect(addr)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.start_tls(client_context)
sock.sendall(HELLO_MSG)
answer = sock.recv_all(len(ANSWER))
sock.close()
class ServerProto(asyncio.Protocol):
def __init__(self, on_con, on_con_lost, on_got_hello):
self.on_con = on_con
self.on_con_lost = on_con_lost
self.on_got_hello = on_got_hello
self.data = b''
self.transport = None
def connection_made(self, tr):
self.transport = tr
self.on_con.set_result(tr)
def replace_transport(self, tr):
self.transport = tr
def data_received(self, data):
self.data += data
if len(self.data) >= len(HELLO_MSG):
self.on_got_hello.set_result(None)
def connection_lost(self, exc):
self.transport = None
if exc is None:
self.on_con_lost.set_result(None)
else:
self.on_con_lost.set_exception(exc)
async def main(proto, on_con, on_con_lost, on_got_hello):
tr = await on_con
tr.write(HELLO_MSG)
self.assertEqual(proto.data, b'')
new_tr = await self.loop.start_tls(
tr, proto, server_context,
server_side=True,
ssl_handshake_timeout=self.TIMEOUT)
proto.replace_transport(new_tr)
await on_got_hello
new_tr.write(ANSWER)
await on_con_lost
self.assertEqual(proto.data, HELLO_MSG)
new_tr.close()
async def run_main():
on_con = self.loop.create_future()
on_con_lost = self.loop.create_future()
on_got_hello = self.loop.create_future()
proto = ServerProto(on_con, on_con_lost, on_got_hello)
server = await self.loop.create_server(
lambda: proto, '127.0.0.1', 0)
addr = server.sockets[0].getsockname()
with self.tcp_client(lambda sock: client(sock, addr),
timeout=self.TIMEOUT):
await asyncio.wait_for(
main(proto, on_con, on_con_lost, on_got_hello),
timeout=self.TIMEOUT)
server.close()
await server.wait_closed()
self.assertEqual(answer, ANSWER)
self.loop.run_until_complete(run_main())
def test_start_tls_wrong_args(self):
async def main():
with self.assertRaisesRegex(TypeError, 'SSLContext, got'):
await self.loop.start_tls(None, None, None)
sslctx = test_utils.simple_server_sslcontext()
with self.assertRaisesRegex(TypeError, 'is not supported'):
await self.loop.start_tls(None, None, sslctx)
self.loop.run_until_complete(main())
def test_handshake_timeout(self):
# bpo-29970: Check that a connection is aborted if handshake is not
# completed in timeout period, instead of remaining open indefinitely
client_sslctx = test_utils.simple_client_sslcontext()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
server_side_aborted = False
def server(sock):
nonlocal server_side_aborted
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
server_side_aborted = True
finally:
sock.close()
async def client(addr):
await asyncio.wait_for(
self.loop.create_connection(
asyncio.Protocol,
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=support.SHORT_TIMEOUT),
0.5)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(client(srv.addr))
self.assertTrue(server_side_aborted)
# Python issue #23197: cancelling a handshake must not raise an
# exception or log an error, even if the handshake failed
self.assertEqual(messages, [])
# The 10s handshake timeout should be cancelled to free related
# objects without really waiting for 10s
client_sslctx = weakref.ref(client_sslctx)
self.assertIsNone(client_sslctx())
def test_create_connection_ssl_slow_handshake(self):
client_sslctx = test_utils.simple_client_sslcontext()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
def server(sock):
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop,
ssl_handshake_timeout=1.0)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaisesRegex(
ConnectionAbortedError,
r'SSL handshake.*is taking longer'):
self.loop.run_until_complete(client(srv.addr))
self.assertEqual(messages, [])
def test_create_connection_ssl_failed_certificate(self):
self.loop.set_exception_handler(lambda loop, ctx: None)
sslctx = test_utils.simple_server_sslcontext()
client_sslctx = test_utils.simple_client_sslcontext(
disable_verify=False)
def server(sock):
try:
sock.start_tls(
sslctx,
server_side=True)
except ssl.SSLError:
pass
except OSError:
pass
finally:
sock.close()
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop,
ssl_handshake_timeout=support.LOOPBACK_TIMEOUT)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(ssl.SSLCertVerificationError):
self.loop.run_until_complete(client(srv.addr))
def test_start_tls_client_corrupted_ssl(self):
self.loop.set_exception_handler(lambda loop, ctx: None)
sslctx = test_utils.simple_server_sslcontext()
client_sslctx = test_utils.simple_client_sslcontext()
def server(sock):
orig_sock = sock.dup()
try:
sock.start_tls(
sslctx,
server_side=True)
sock.sendall(b'A\n')
sock.recv_all(1)
orig_sock.send(b'please corrupt the SSL connection')
except ssl.SSLError:
pass
finally:
orig_sock.close()
sock.close()
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop)
self.assertEqual(await reader.readline(), b'A\n')
writer.write(b'B')
with self.assertRaises(ssl.SSLError):
await reader.readline()
writer.close()
return 'OK'
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
res = self.loop.run_until_complete(client(srv.addr))
self.assertEqual(res, 'OK')
@unittest.skipIf(ssl is None, 'No ssl module')
class SelectorStartTLSTests(BaseStartTLS, unittest.TestCase):
def new_loop(self):
return asyncio.SelectorEventLoop()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(asyncio, 'ProactorEventLoop'), 'Windows only')
class ProactorStartTLSTests(BaseStartTLS, unittest.TestCase):
def new_loop(self):
return asyncio.ProactorEventLoop()
if __name__ == '__main__':
unittest.main()
|
py
|
1a5752a299f8bb32cb45b47851f53fe9b7dc9a05
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
try:
from ipywidgets.widgets import DOMWidget, register
from traitlets import Unicode, Int, Bool
except Exception as exp:
# Init dummy objects needed to import this module without errors.
# These are all overwritten with imports from IPython (on success)
DOMWidget = object
def _noop(x):
return x
register = _noop
class _MockTraitlet(object):
def __init__(self, *args, **kwargs):
pass
def tag(self, *args, **kwargs):
pass
Unicode = Int = Float = Bool = _MockTraitlet
available, testable, why_not, which = False, False, str(exp), None
else:
available, testable, why_not, which = True, False, None, None
from ....app.backends._ipynb_util import create_glir_message
from ....app import Timer
# ---------------------------------------------------------- IPython Widget ---
def _stop_timers(canvas):
"""Stop all timers in a canvas."""
for attr in dir(canvas):
try:
attr_obj = getattr(canvas, attr)
except NotImplementedError:
# This try/except is needed because canvas.position raises
# an error (it is not implemented in this backend).
attr_obj = None
if isinstance(attr_obj, Timer):
attr_obj.stop()
@register
class VispyWidget(DOMWidget):
_view_name = Unicode("VispyView").tag(sync=True)
_view_module = Unicode('vispy').tag(sync=True)
_view_module_version = Unicode('~0.2.0').tag(sync=True)
_model_name = Unicode('VispyModel').tag(sync=True)
_model_module = Unicode('vispy').tag(sync=True)
_model_module_version = Unicode('~0.2.0').tag(sync=True)
#height/width of the widget is managed by IPython.
#it's a string and can be anything valid in CSS.
#here we only manage the size of the viewport.
width = Int().tag(sync=True)
height = Int().tag(sync=True)
resizable = Bool(value=True).tag(sync=True)
def __init__(self, **kwargs):
if DOMWidget is object:
raise ImportError("'ipywidgets' must be installed to use the notebook backend.")
super(VispyWidget, self).__init__(**kwargs)
self.on_msg(self.events_received)
self.canvas = None
self.canvas_backend = None
self.gen_event = None
def set_canvas(self, canvas):
self.width, self.height = canvas._backend._default_size
self.canvas = canvas
self.canvas_backend = self.canvas._backend
self.canvas_backend.set_widget(self)
self.gen_event = self.canvas_backend._gen_event
#setup the backend widget then.
def events_received(self, widget, content, buffers):
if content['msg_type'] == 'init':
self.canvas_backend._reinit_widget()
elif content['msg_type'] == 'events':
events = content['contents']
for ev in events:
self.gen_event(ev)
elif content['msg_type'] == 'status':
if content['contents'] == 'removed':
# Stop all timers associated to the widget.
_stop_timers(self.canvas_backend._vispy_canvas)
def send_glir_commands(self, commands):
# older versions of ipython (<3.0) use base64
# array_serialization = 'base64'
array_serialization = 'binary'
msg = create_glir_message(commands, array_serialization)
msg['array_serialization'] = array_serialization
if array_serialization == 'base64':
self.send(msg)
elif array_serialization == 'binary':
# Remove the buffers from the JSON message: they will be sent
# independently via binary WebSocket.
self.send(msg, buffers=msg.pop('buffers', None))
|
py
|
1a5753f0e75407b9e39d57debd992c0ddb2f40d9
|
"""
owtf.http.transaction
~~~~~~~~~~~~~~~~~~~~~
HTTP_Transaction is a container of useful HTTP Transaction information to
simplify code both in the framework and the plugins.
"""
import cgi
import logging
import io
import gzip
import zlib
import json
try:
from http.client import responses as response_messages
except ImportError:
from httplib import responses as response_messages
from cookies import Cookie, InvalidCookieError
from owtf.lib.general import derive_http_method
class HTTP_Transaction(object):
def __init__(self, timer):
self.timer = timer
self.new = False
def scope_str(self):
"""Get the scope in a string format
:return: Scope
:rtype: `str`
"""
return str(self.is_in_scope)[0]
def in_scope(self):
"""Check if the transaction is in scope
:return: True if in scope, else False
:rtype: `bool`
"""
return self.is_in_scope()
def start(self, url, data, method, is_in_scope):
"""Get attributes for a new transaction
:param url: transaction url
:type url: `str`
:param data: transaction data
:type data:
:param method:
:type method:
:param is_in_scope:
:type is_in_scope:
:return:
:rtype:
"""
self.is_in_scope = is_in_scope
self.start_request()
self.url = url
self.init_data(data)
self.method = derive_http_method(method, data)
self.found = None
self.raw_request = ''
self.response_headers = []
self.response_size = ''
self.status = ''
self.id = ''
self.html_link_id = ''
self.new = True # Flag new transaction.
def init_data(self, data):
"""Sets the data for the transaction
:param data: Data to set
:type data: `str`
:return: None
:rtype: None
"""
self.data = data
if self.data is None:
# This simplifies other code later, no need to cast to str if None, etc.
self.data = ''
def start_request(self):
"""Start timer for the request
:return: None
:rtype: None
"""
self.timer.start_timer('Request')
self.time = self.time_human = ''
def end_request(self):
"""End timer for the request
:return: None
:rtype: None
"""
self.time = self.timer.get_elapsed_time_as_str('Request')
self.time_human = self.time
self.local_timestamp = self.timer.get_current_date_time()
def set_transaction(self, found, request, response):
"""Response can be "Response" for 200 OK or "Error" for everything else, we don't care here.
:param found:
:type found:
:param request:
:type request:
:param response:
:type response:
:return:
:rtype:
"""
if self.url != response.url:
if response.code not in [302, 301]: # No way, error in hook.
# Mark as a redirect, dirty but more accurate than 200 :P
self.status = "%s Found" % str(302)
self.status += " --Redirect--> %s " % str(response.code)
self.status += response.msg
# Redirect differs in schema (i.e. https instead of http).
if self.url.split(':')[0] != response.url.split(':')[0]:
pass
self.url = response.url
else:
self.status = "%s %s" % (str(response.code), response.msg)
self.raw_request = request
self.found = found
self.response_headers = response.headers
self.response_contents = response.read()
self.check_if_compressed(response, self.response_contents)
self.end_request()
def set_transaction_from_db(self, id, url, method, status, time, time_human, local_timestamp, request_data,
raw_request, response_headers, response_size, response_body):
"""Set the transaction from the DB
:param id:
:type id:
:param url:
:type url:
:param method:
:type method:
:param status:
:type status:
:param time:
:type time:
:param time_human:
:type time_human:
:param local_timestamp:
:type local_timestamp:
:param request_data:
:type request_data:
:param raw_request:
:type raw_request:
:param response_headers:
:type response_headers:
:param response_size:
:type response_size:
:param response_body:
:type response_body:
:return:
:rtype:
"""
self.id = id
self.new = False # Flag NOT new transaction.
self.url = url
self.method = method
self.status = status
self.found = (self.status == "200 OK")
self.time = time
self.time_human = time_human
self.local_timestamp = local_timestamp
self.data = request_data
self.raw_request = raw_request
self.response_headers = response_headers
self.response_size = response_size
self.response_contents = response_body
def get_session_tokens(self):
"""Get a JSON blob of all captured cookies
:return:
:rtype:
"""
cookies = []
try: # parsing may sometimes fail
for cookie in self.cookies_list:
cookies.append(Cookie.from_string(cookie).to_dict())
except InvalidCookieError:
logging.debug("Cannot not parse the cookies")
return cookies
def set_error(self, error_message):
"""Set the error message for a transaction
:param error_message: Message to set
:type error_message: `str`
:return: None
:rtype: None
"""
# Only called for unknown errors, 404 and other HTTP stuff handled on self.SetResponse.
self.response_contents = error_message
self.end_request()
def get_id(self):
"""Get transaction ID
:return: transaction id
:rtype: `int`
"""
return self.id
def set_id(self, id, html_link_to_id):
"""Sets the transaction id and format an HTML link
:param id: transaction id
:type id: `int`
:param html_link_to_id: HTML link for the id
:type html_link_to_id: `str`
:return: None
:rtype: None
"""
self.id = id
self.html_link_id = html_link_to_id
# Only for new transactions, not when retrieved from DB, etc.
if self.new:
log = logging.getLogger('general')
log.info("New OWTF HTTP Transaction: %s",
" - ".join([self.id, self.time_human, self.status, self.method, self.url]))
def get_html_link(self, link_name=''):
"""Get the HTML link to the transaction ID
:param link_name: Name of the link
:type link_name: `str`
:return: Formatted HTML link
:rtype: `str`
"""
if '' == link_name:
link_name = "Transaction %s" % self.id
return self.html_link_id.replace('@@@PLACE_HOLDER@@@', link_name)
def get_html_link_time(self, link_name=''):
"""Get the HTML link to the transaction ID
:param link_name: Name of the link
:type link_name: `str`
:return: Formatted HTML link
:rtype: `str`
"""
return "%s (%s)" % (self.get_html_link(link_name), self.time_human)
def get_raw_escaped(self):
"""Get escaped request and response
:return: None
:rtype: None
"""
return "<pre>%s</pre>" % cgi.escape(self.get_raw())
def get_raw(self):
"""Get raw transaction request and response
:return: Raw string with response and request
:rtype: `str`
"""
return "%s\n\n%s" % (self.get_raw_request(), self.get_raw_response())
def get_raw_request(self):
"""Return raw request
:return: Raw request
:rtype: `str`
"""
return self.raw_request
def get_status(self):
"""Get status for transaction response
:return: Status
:rtype: `str`
"""
return self.status
def get_response_headers(self):
"""Get response headers for the transaction
:return:
:rtype:
"""
return self.response_headers
def get_raw_response(self, with_status=True):
"""Get the complete raw response
:param with_status: Want status?
:type with_status: `bool`
:return: Raw reponse
:rtype: `str`
"""
try:
return "%s\r\n%s\n\n%s" % (self.get_status(), str(self.response_headers), self.response_contents)
except UnicodeDecodeError:
return "%s\r\n%s\n\n[Binary Content]" % (self.get_status(), str(self.response_headers))
def get_raw_response_headers(self, with_status=True):
"""Get raw response headers for the transaction
:param with_status: Want status?
:type with_status: `bool`
:return: Raw response headers as a string
:rtype: `str`
"""
return "%s\r\n%s" % (self.get_status(), str(self.response_headers))
def get_raw_response_body(self):
"""Return raw response content
:return: Raw response body
:rtype: `str`
"""
return self.response_contents
def import_proxy_req_resp(self, request, response):
"""Import proxy request and response
:param request:
:type request:
:param response:
:type response:
:return:
:rtype:
"""
self.is_in_scope = request.in_scope
self.url = request.url
self.init_data(request.body)
self.method = request.method
try:
self.status = "%s %s" % (str(response.code), response_messages[int(response.code)])
except KeyError:
self.status = "%s Unknown Error" % str(response.code)
self.raw_request = request.raw_request
self.response_headers = response.header_string
self.response_contents = response.body
self.response_size = len(self.response_contents)
self.time = str(response.request_time)
self.time_human = self.timer.get_time_human(self.time)
self.local_timestamp = request.local_timestamp
self.found = (self.status == "200 OK")
self.cookies_list = response.cookies
self.new = True
self.id = ''
self.html_link_id = ''
def get_decode_response(self):
return self.decoded_content
def check_if_compressed(self, response, content):
if response.info().get('Content-Encoding') == 'gzip': # check for gzip compression
compressed_file = io.StringIO()
compressed_file.write(content)
compressed_file.seek(0)
f = gzip.GzipFile(fileobj=compressed_file, mode='rb')
self.decoded_content = f.read()
elif response.info().get('Content-Encoding') == 'deflate': # check for deflate compression
self.decoded_content = zlib.decompress(content)
else:
self.decoded_content = content # else the no compression
|
py
|
1a575566934a947034d6347087c268803e3beb1e
|
from PyQt4 import QtGui
from models.experiment import Experiment
__author__ = 'daniel'
class ExperimentComboBox(QtGui.QComboBox):
def __init__(self, session = None, parent = None):
super(ExperimentComboBox, self).__init__(parent)
self.session = session
self.refresh_experiments()
def refresh_experiments(self):
self.clear()
self.experiments = self.session.query(Experiment).all()
for e in self.experiments:
self.addItem(e.name)
def currentItem(self):
try:
val = self.experiments[self.currentIndex()]
except Exception as e:
print(e)
return None
return val
|
py
|
1a575608f4b9e8dd6cd72661df8537c91e293435
|
# -*- coding: utf-8 -*-
# Copyright 2016 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from flask import Flask
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from werkzeug.wrappers import BaseRequest
__version__ = '0.0.4'
def make_environ(event):
environ = {}
for hdr_name, hdr_value in event['headers'].items():
hdr_name = hdr_name.replace('-', '_').upper()
if hdr_name in ['CONTENT_TYPE', 'CONTENT_LENGTH']:
environ[hdr_name] = hdr_value
continue
http_hdr_name = 'HTTP_%s' % hdr_name
environ[http_hdr_name] = hdr_value
qs = event['queryStringParameters']
environ['REQUEST_METHOD'] = event['httpMethod']
environ['PATH_INFO'] = event['path']
environ['QUERY_STRING'] = urlencode(qs) if qs else ''
environ['REMOTE_ADDR'] = event['requestContext']['identity']['sourceIp']
environ['HOST'] = '%(HTTP_HOST)s:%(HTTP_X_FORWARDED_PORT)s' % environ
environ['SCRIPT_NAME'] = ''
environ['SERVER_PORT'] = environ['HTTP_X_FORWARDED_PORT']
environ['SERVER_PROTOCOL'] = 'HTTP/1.1'
environ['CONTENT_LENGTH'] = str(
len(event['body']) if event['body'] else ''
)
environ['wsgi.url_scheme'] = environ['HTTP_X_FORWARDED_PROTO']
environ['wsgi.input'] = StringIO(event['body'] or '')
environ['wsgi.version'] = (1, 0)
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = False
environ['wsgi.run_once'] = True
environ['wsgi.multiprocess'] = False
BaseRequest(environ)
return environ
class LambdaResponse(object):
def __init__(self):
self.status = None
self.response_headers = None
def start_response(self, status, response_headers, exc_info=None):
self.status = int(status[:3])
self.response_headers = dict(response_headers)
class FlaskLambda(Flask):
def __call__(self, event, context):
if 'httpMethod' not in event:
# In this "context" `event` is `environ` and
# `context` is `start_response`, meaning the request didn't
# occur via API Gateway and Lambda
return super(FlaskLambda, self).__call__(event, context)
response = LambdaResponse()
body = next(self.wsgi_app(
make_environ(event),
response.start_response
))
return {
'statusCode': response.status,
'headers': response.response_headers,
'body': body
}
|
py
|
1a57566f8575e22ba06230924ed0b907bf34a78c
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
c/c++ configuration routines
"""
import os, imp, sys, shlex, shutil
from Utils import md5
import Build, Utils, Configure, Task, Options, Logs, TaskGen
from Constants import *
from Configure import conf, conftest
cfg_ver = {
'atleast-version': '>=',
'exact-version': '==',
'max-version': '<=',
}
SNIP1 = '''
int main() {
void *p;
p=(void*)(%s);
return 0;
}
'''
SNIP2 = '''
int main() {
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
}
'''
SNIP3 = '''
int main() {
return 0;
}
'''
def parse_flags(line, uselib, env):
"""pkg-config still has bugs on some platforms, and there are many -config programs, parsing flags is necessary :-/"""
lst = shlex.split(line)
while lst:
x = lst.pop(0)
st = x[:2]
ot = x[2:]
if st == '-I' or st == '/I':
if not ot: ot = lst.pop(0)
env.append_unique('CPPPATH_' + uselib, ot)
elif st == '-D':
if not ot: ot = lst.pop(0)
env.append_unique('CXXDEFINES_' + uselib, ot)
env.append_unique('CCDEFINES_' + uselib, ot)
elif st == '-l':
if not ot: ot = lst.pop(0)
env.append_unique('LIB_' + uselib, ot)
elif st == '-L':
if not ot: ot = lst.pop(0)
env.append_unique('LIBPATH_' + uselib, ot)
elif x == '-pthread' or x.startswith('+'):
env.append_unique('CCFLAGS_' + uselib, x)
env.append_unique('CXXFLAGS_' + uselib, x)
env.append_unique('LINKFLAGS_' + uselib, x)
elif x == '-framework':
env.append_unique('FRAMEWORK_' + uselib, lst.pop(0))
elif x.startswith('-F'):
env.append_unique('FRAMEWORKPATH_' + uselib, x[2:])
elif x.startswith('-std'):
env.append_unique('CCFLAGS_' + uselib, x)
env.append_unique('LINKFLAGS_' + uselib, x)
elif x.startswith('-Wl'):
env.append_unique('LINKFLAGS_' + uselib, x)
elif x.startswith('-m') or x.startswith('-f'):
env.append_unique('CCFLAGS_' + uselib, x)
env.append_unique('CXXFLAGS_' + uselib, x)
@conf
def ret_msg(self, f, kw):
"""execute a function, when provided"""
if isinstance(f, str):
return f
return f(kw)
@conf
def validate_cfg(self, kw):
if not 'path' in kw:
kw['path'] = 'pkg-config --errors-to-stdout --print-errors'
# pkg-config version
if 'atleast_pkgconfig_version' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for pkg-config version >= %s' % kw['atleast_pkgconfig_version']
return
# pkg-config --modversion
if 'modversion' in kw:
return
if 'variables' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for %s variables' % kw['package']
return
# checking for the version of a module, for the moment, one thing at a time
for x in cfg_ver.keys():
y = x.replace('-', '_')
if y in kw:
if not 'package' in kw:
raise ValueError('%s requires a package' % x)
if not 'msg' in kw:
kw['msg'] = 'Checking for %s %s %s' % (kw['package'], cfg_ver[x], kw[y])
return
if not 'msg' in kw:
kw['msg'] = 'Checking for %s' % (kw['package'] or kw['path'])
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
if not 'errmsg' in kw:
kw['errmsg'] = 'not found'
@conf
def cmd_and_log(self, cmd, kw):
Logs.debug('runner: %s\n' % cmd)
if self.log:
self.log.write('%s\n' % cmd)
try:
p = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE, shell=True)
(out, err) = p.communicate()
except OSError, e:
self.log.write('error %r' % e)
self.fatal(str(e))
out = str(out)
err = str(err)
if self.log:
self.log.write(out)
self.log.write(err)
if p.returncode:
if not kw.get('errmsg', ''):
if kw.get('mandatory', False):
kw['errmsg'] = out.strip()
else:
kw['errmsg'] = 'fail'
self.fatal('fail')
return out
@conf
def exec_cfg(self, kw):
# pkg-config version
if 'atleast_pkgconfig_version' in kw:
cmd = '%s --atleast-pkgconfig-version=%s' % (kw['path'], kw['atleast_pkgconfig_version'])
self.cmd_and_log(cmd, kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
return
# checking for the version of a module
for x in cfg_ver:
y = x.replace('-', '_')
if y in kw:
self.cmd_and_log('%s --%s=%s %s' % (kw['path'], x, kw[y], kw['package']), kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
break
# retrieving the version of a module
if 'modversion' in kw:
version = self.cmd_and_log('%s --modversion %s' % (kw['path'], kw['modversion']), kw).strip()
self.define('%s_VERSION' % Utils.quote_define_name(kw.get('uselib_store', kw['modversion'])), version)
return version
# retrieving variables of a module
if 'variables' in kw:
env = kw.get('env', self.env)
uselib = kw.get('uselib_store', kw['package'].upper())
vars = Utils.to_list(kw['variables'])
for v in vars:
val = self.cmd_and_log('%s --variable=%s %s' % (kw['path'], v, kw['package']), kw).strip()
var = '%s_%s' % (uselib, v)
env[var] = val
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
return
lst = [kw['path']]
for key, val in kw.get('define_variable', {}).iteritems():
lst.append('--define-variable=%s=%s' % (key, val))
lst.append(kw.get('args', ''))
lst.append(kw['package'])
# so we assume the command-line will output flags to be parsed afterwards
cmd = ' '.join(lst)
ret = self.cmd_and_log(cmd, kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
parse_flags(ret, kw.get('uselib_store', kw['package'].upper()), kw.get('env', self.env))
return ret
@conf
def check_cfg(self, *k, **kw):
"""
for pkg-config mostly, but also all the -config tools
conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI')
conf.check_cfg(package='dbus-1', variables='system_bus_default_address session_bus_services_dir')
"""
self.validate_cfg(kw)
if 'msg' in kw:
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.exec_cfg(kw)
except Configure.ConfigurationError, e:
if 'errmsg' in kw:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
if 'okmsg' in kw:
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
return ret
# the idea is the following: now that we are certain
# that all the code here is only for c or c++, it is
# easy to put all the logic in one function
#
# this should prevent code duplication (ita)
# env: an optional environment (modified -> provide a copy)
# compiler: cc or cxx - it tries to guess what is best
# type: cprogram, cshlib, cstaticlib
# code: a c code to execute
# uselib_store: where to add the variables
# uselib: parameters to use for building
# define: define to set, like FOO in #define FOO, if not set, add /* #undef FOO */
# execute: True or False - will return the result of the execution
@conf
def validate_c(self, kw):
"""validate the parameters for the test method"""
if not 'env' in kw:
kw['env'] = self.env.copy()
env = kw['env']
if not 'compiler' in kw:
kw['compiler'] = 'cc'
if env['CXX_NAME'] and Task.TaskBase.classes.get('cxx', None):
kw['compiler'] = 'cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not 'type' in kw:
kw['type'] = 'cprogram'
assert not(kw['type'] != 'cprogram' and kw.get('execute', 0)), 'can only execute programs'
#if kw['type'] != 'program' and kw.get('execute', 0):
# raise ValueError, 'can only execute programs'
def to_header(dct):
if 'header_name' in dct:
dct = Utils.to_list(dct['header_name'])
return ''.join(['#include <%s>\n' % x for x in dct])
return ''
# set the file name
if not 'compile_mode' in kw:
kw['compile_mode'] = (kw['compiler'] == 'cxx') and 'cxx' or 'cc'
if not 'compile_filename' in kw:
kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '')
#OSX
if 'framework_name' in kw:
try: TaskGen.task_gen.create_task_macapp
except AttributeError: self.fatal('frameworks require the osx tool')
fwkname = kw['framework_name']
if not 'uselib_store' in kw:
kw['uselib_store'] = fwkname.upper()
if not kw.get('no_header', False):
if not 'header_name' in kw:
kw['header_name'] = []
fwk = '%s/%s.h' % (fwkname, fwkname)
if kw.get('remove_dot_h', None):
fwk = fwk[:-2]
kw['header_name'] = Utils.to_list(kw['header_name']) + [fwk]
kw['msg'] = 'Checking for framework %s' % fwkname
kw['framework'] = fwkname
#kw['frameworkpath'] = set it yourself
if 'function_name' in kw:
fu = kw['function_name']
if not 'msg' in kw:
kw['msg'] = 'Checking for function %s' % fu
kw['code'] = to_header(kw) + SNIP1 % fu
if not 'uselib_store' in kw:
kw['uselib_store'] = fu.upper()
if not 'define_name' in kw:
kw['define_name'] = self.have_define(fu)
elif 'type_name' in kw:
tu = kw['type_name']
if not 'msg' in kw:
kw['msg'] = 'Checking for type %s' % tu
if not 'header_name' in kw:
kw['header_name'] = 'stdint.h'
kw['code'] = to_header(kw) + SNIP2 % {'type_name' : tu}
if not 'define_name' in kw:
kw['define_name'] = self.have_define(tu.upper())
elif 'header_name' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for header %s' % kw['header_name']
l = Utils.to_list(kw['header_name'])
assert len(l)>0, 'list of headers in header_name is empty'
kw['code'] = to_header(kw) + SNIP3
if not 'uselib_store' in kw:
kw['uselib_store'] = l[0].upper()
if not 'define_name' in kw:
kw['define_name'] = self.have_define(l[0])
if 'lib' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for library %s' % kw['lib']
if not 'uselib_store' in kw:
kw['uselib_store'] = kw['lib'].upper()
if 'staticlib' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for static library %s' % kw['staticlib']
if not 'uselib_store' in kw:
kw['uselib_store'] = kw['staticlib'].upper()
if 'fragment' in kw:
# an additional code fragment may be provided to replace the predefined code
# in custom headers
kw['code'] = kw['fragment']
if not 'msg' in kw:
kw['msg'] = 'Checking for custom code'
if not 'errmsg' in kw:
kw['errmsg'] = 'fail'
for (flagsname,flagstype) in [('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')]:
if flagsname in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname])
if not 'errmsg' in kw:
kw['errmsg'] = 'fail'
if not 'execute' in kw:
kw['execute'] = False
if not 'errmsg' in kw:
kw['errmsg'] = 'not found'
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
if not 'code' in kw:
kw['code'] = SNIP3
if not kw.get('success'): kw['success'] = None
assert 'msg' in kw, 'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
@conf
def post_check(self, *k, **kw):
"set the variables after a test was run successfully"
is_success = False
if kw['execute']:
if kw['success']:
is_success = True
else:
is_success = (kw['success'] == 0)
if 'define_name' in kw:
if 'header_name' in kw or 'function_name' in kw or 'type_name' in kw or 'fragment' in kw:
if kw['execute']:
key = kw['success']
if isinstance(key, str):
if key:
self.define(kw['define_name'], key, quote=kw.get('quote', 1))
else:
self.define_cond(kw['define_name'], True)
else:
self.define_cond(kw['define_name'], False)
else:
self.define_cond(kw['define_name'], is_success)
if is_success and 'uselib_store' in kw:
import cc, cxx
for k in set(cc.g_cc_flag_vars).union(cxx.g_cxx_flag_vars):
lk = k.lower()
# inconsistency: includes -> CPPPATH
if k == 'CPPPATH': lk = 'includes'
if k == 'CXXDEFINES': lk = 'defines'
if k == 'CCDEFINES': lk = 'defines'
if lk in kw:
val = kw[lk]
# remove trailing slash
if isinstance(val, str):
val = val.rstrip(os.path.sep)
self.env.append_unique(k + '_' + kw['uselib_store'], val)
@conf
def check(self, *k, **kw):
# so this will be the generic function
# it will be safer to use check_cxx or check_cc
self.validate_c(kw)
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.run_c_code(*k, **kw)
except Configure.ConfigurationError, e:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
self.post_check(*k, **kw)
if not kw.get('execute', False):
return ret == 0
return ret
@conf
def run_c_code(self, *k, **kw):
test_f_name = kw['compile_filename']
k = 0
while k < 10000:
# make certain to use a fresh folder - necessary for win32
dir = os.path.join(self.blddir, '.conf_check_%d' % k)
# if the folder already exists, remove it
try:
shutil.rmtree(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
break
k += 1
try:
os.makedirs(dir)
except:
self.fatal('cannot create a configuration test folder %r' % dir)
try:
os.stat(dir)
except:
self.fatal('cannot use the configuration test folder %r' % dir)
bdir = os.path.join(dir, 'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
env = kw['env']
dest = open(os.path.join(dir, test_f_name), 'w')
dest.write(kw['code'])
dest.close()
back = os.path.abspath('.')
bld = Build.BuildContext()
bld.log = self.log
bld.all_envs.update(self.all_envs)
bld.all_envs['default'] = env
bld.lst_variants = bld.all_envs.keys()
bld.load_dirs(dir, bdir)
os.chdir(dir)
bld.rescan(bld.srcnode)
o = bld(features=[kw['compile_mode'], kw['type']], source=test_f_name, target='testprog')
for k, v in kw.iteritems():
setattr(o, k, v)
self.log.write("==>\n%s\n<==\n" % kw['code'])
# compile the program
try:
bld.compile()
except Utils.WafError:
ret = Utils.ex_stack()
else:
ret = 0
# chdir before returning
os.chdir(back)
if ret:
self.log.write('command returned %r' % ret)
self.fatal(str(ret))
# keep the name of the program to execute
if kw['execute']:
lastprog = o.link_task.outputs[0].abspath(env)
# if we need to run the program, try to get its result
if kw['execute']:
args = Utils.to_list(kw.get('exec_args', []))
proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
(out, err) = proc.communicate()
w = self.log.write
w(str(out))
w('\n')
w(str(err))
w('\n')
w('returncode %r' % proc.returncode)
w('\n')
if proc.returncode:
self.fatal(Utils.ex_stack())
ret = out
return ret
@conf
def check_cxx(self, *k, **kw):
kw['compiler'] = 'cxx'
return self.check(*k, **kw)
@conf
def check_cc(self, *k, **kw):
kw['compiler'] = 'cc'
return self.check(*k, **kw)
@conf
def define(self, define, value, quote=1):
"""store a single define and its state into an internal list for later
writing to a config header file. Value can only be
a string or int; other types not supported. String
values will appear properly quoted in the generated
header file."""
assert define and isinstance(define, str)
# ordered_dict is for writing the configuration header in order
tbl = self.env[DEFINES] or Utils.ordered_dict()
# the user forgot to tell if the value is quoted or not
if isinstance(value, str):
if quote:
tbl[define] = '"%s"' % repr('"'+value)[2:-1].replace('"', '\\"')
else:
tbl[define] = value
elif isinstance(value, int):
tbl[define] = value
else:
raise TypeError('define %r -> %r must be a string or an int' % (define, value))
# add later to make reconfiguring faster
self.env[DEFINES] = tbl
self.env[define] = value # <- not certain this is necessary
@conf
def undefine(self, define):
"""store a single define and its state into an internal list
for later writing to a config header file"""
assert define and isinstance(define, str)
tbl = self.env[DEFINES] or Utils.ordered_dict()
value = UNDEFINED
tbl[define] = value
# add later to make reconfiguring faster
self.env[DEFINES] = tbl
self.env[define] = value
@conf
def define_cond(self, name, value):
"""Conditionally define a name.
Formally equivalent to: if value: define(name, 1) else: undefine(name)"""
if value:
self.define(name, 1)
else:
self.undefine(name)
@conf
def is_defined(self, key):
defines = self.env[DEFINES]
if not defines:
return False
try:
value = defines[key]
except KeyError:
return False
else:
return value != UNDEFINED
@conf
def get_define(self, define):
"get the value of a previously stored define"
try: return self.env[DEFINES][define]
except KeyError: return None
@conf
def have_define(self, name):
"prefix the define with 'HAVE_' and make sure it has valid characters."
return self.__dict__.get('HAVE_PAT', 'HAVE_%s') % Utils.quote_define_name(name)
@conf
def write_config_header(self, configfile='', env='', guard='', top=False):
"save the defines into a file"
if not configfile: configfile = WAF_CONFIG_H
waf_guard = guard or '_%s_WAF' % Utils.quote_define_name(configfile)
# configfile -> absolute path
# there is a good reason to concatenate first and to split afterwards
if not env: env = self.env
if top:
diff = ''
else:
diff = Utils.diff_path(self.srcdir, self.curdir)
full = os.sep.join([self.blddir, env.variant(), diff, configfile])
full = os.path.normpath(full)
(dir, base) = os.path.split(full)
try: os.makedirs(dir)
except: pass
dest = open(full, 'w')
dest.write('/* Configuration header created by Waf - do not edit */\n')
dest.write('#ifndef %s\n#define %s\n\n' % (waf_guard, waf_guard))
dest.write(self.get_config_header())
# config files are not removed on "waf clean"
env.append_value(CFG_FILES, os.path.join(diff, configfile))
dest.write('\n#endif /* %s */\n' % waf_guard)
dest.close()
@conf
def get_config_header(self):
"""Fill-in the contents of the config header. Override when you need to write your own config header."""
config_header = []
tbl = self.env[DEFINES] or Utils.ordered_dict()
for key in tbl.allkeys:
value = tbl[key]
if value is None:
config_header.append('#define %s' % key)
elif value is UNDEFINED:
config_header.append('/* #undef %s */' % key)
else:
config_header.append('#define %s %s' % (key, value))
return "\n".join(config_header)
@conftest
def find_cpp(conf):
v = conf.env
cpp = None
if v['CPP']: cpp = v['CPP']
elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
if not cpp: cpp = conf.find_program('cpp', var='CPP')
if not cpp: cpp = v['CC']
if not cpp: cpp = v['CXX']
v['CPP'] = cpp
@conftest
def cc_add_flags(conf):
conf.add_os_flags('CFLAGS', 'CCFLAGS')
conf.add_os_flags('CPPFLAGS')
@conftest
def cxx_add_flags(conf):
conf.add_os_flags('CXXFLAGS')
conf.add_os_flags('CPPFLAGS')
@conftest
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS', 'LINKFLAGS')
@conftest
def cc_load_tools(conf):
conf.check_tool('cc')
@conftest
def cxx_load_tools(conf):
conf.check_tool('cxx')
|
py
|
1a5756c0789c42b6165dfc27db6b7dc80b595abb
|
"""Contains the OnScreenDebug class."""
__all__ = ['OnScreenDebug']
from panda3d.core import *
from direct.gui import OnscreenText
from direct.directtools import DirectUtil
class OnScreenDebug:
enabled = ConfigVariableBool("on-screen-debug-enabled", False)
def __init__(self):
self.onScreenText = None
self.frame = 0
self.text = ""
self.data = {}
def load(self):
if self.onScreenText:
return
fontPath = ConfigVariableString("on-screen-debug-font", "cmtt12").value
fontScale = ConfigVariableDouble("on-screen-debug-font-scale", 0.05).value
color = {
"black": Vec4(0, 0, 0, 1),
"white": Vec4(1, 1, 1, 1),
}
fgColor = color[ConfigVariableString("on-screen-debug-fg-color", "white").value]
bgColor = color[ConfigVariableString("on-screen-debug-bg-color", "black").value]
fgColor.setW(ConfigVariableDouble("on-screen-debug-fg-alpha", 0.85).value)
bgColor.setW(ConfigVariableDouble("on-screen-debug-bg-alpha", 0.85).value)
font = loader.loadFont(fontPath)
if not font.isValid():
print("failed to load OnScreenDebug font %s" % fontPath)
font = TextNode.getDefaultFont()
self.onScreenText = OnscreenText.OnscreenText(
pos = (-1.0, 0.9), fg=fgColor, bg=bgColor,
scale = (fontScale, fontScale, 0.0), align = TextNode.ALeft,
mayChange = 1, font = font)
# Make sure readout is never lit or drawn in wireframe
DirectUtil.useDirectRenderStyle(self.onScreenText)
def render(self):
if not self.enabled:
return
if not self.onScreenText:
self.load()
self.onScreenText.clearText()
entries = list(self.data.items())
entries.sort()
for k, v in entries:
if v[0] == self.frame:
# It was updated this frame (key equals value):
#isNew = " is"
isNew = "="
else:
# This data is not for the current
# frame (key roughly equals value):
#isNew = "was"
isNew = "~"
value = v[1]
if type(value) == float:
value = "% 10.4f"%(value,)
# else: other types will be converted to str by the "%s"
self.onScreenText.appendText("%20s %s %-44s\n"%(k, isNew, value))
self.onScreenText.appendText(self.text)
self.frame += 1
def clear(self):
self.text = ""
if self.onScreenText:
self.onScreenText.clearText()
def add(self, key, value):
self.data[key] = (self.frame, value)
return 1 # to allow assert onScreenDebug.add("foo", bar)
def has(self, key):
return key in self.data
def remove(self, key):
del self.data[key]
def removeAllWithPrefix(self, prefix):
toRemove = []
for key in list(self.data.keys()):
if len(key) >= len(prefix):
if key[:len(prefix)] == prefix:
toRemove.append(key)
for key in toRemove:
self.remove(key)
def append(self, text):
self.text += text
|
py
|
1a5756c933aecbb6fbec0732c5bcac44c5557423
|
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
RESOURCE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "resources"))
# these regions have some p2 instances, but not enough for automated testing
NO_P2_REGIONS = [
"ca-central-1",
"eu-central-1",
"eu-west-2",
"us-west-1",
"eu-west-3",
"eu-north-1",
"sa-east-1",
"ap-east-1",
"me-south-1",
"cn-northwest-1",
"eu-south-1",
"af-south-1",
]
NO_P3_REGIONS = [
"ap-northeast-1",
"ap-northeast-2",
"ap-southeast-1",
"ap-southeast-2",
"ap-south-1",
"ca-central-1",
"eu-central-1",
"eu-west-2",
"us-west-1",
"eu-west-3",
"eu-north-1",
"sa-east-1",
"ap-east-1",
"me-south-1",
"cn-northwest-1",
"eu-south-1",
"af-south-1",
]
NO_P4_REGIONS = [
"ap-southeast-1",
"ap-southeast-2",
"ap-south-1",
"ca-central-1",
"eu-central-1",
"eu-west-2",
"us-west-1",
"eu-west-3",
"eu-north-1",
"sa-east-1",
"ap-east-1",
"me-south-1",
"cn-northwest-1",
"eu-south-1",
"af-south-1",
]
MODEL_SUCCESS_FILES = {
"output": ["success"],
"model": ["model-symbol.json", "model-shapes.json", "model-0000.params"],
}
|
py
|
1a5756d058b4d5cc84477fe4eb8dd719ad8c4362
|
from unityagents import UnityEnvironment
import numpy as np
env = UnityEnvironment(file_name='./Tennis_Linux/Tennis.x86_64')
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
import time
import random
import torch
from collections import deque
import matplotlib.pyplot as plt
from ddpg.ddpg_agent import Agent, NoisyAgent
#agent = Agent(num_agents=num_agents, state_size=state_size, action_size=action_size, random_seed=2)
agent = NoisyAgent(num_agents=num_agents, state_size=state_size, action_size=action_size, random_seed=2)
def ddpg(n_episodes=2000):
scores_deque = deque(maxlen=100)
scores = []
mean_scores = []
start = time.time()
for i_episode in range(1, n_episodes+1):
agent.reset()
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
add_noise=True
score = np.zeros(num_agents)
t = 0
while True:
#print('\r{}: {}'.format(t, score), end="")
t += 1
#action = agent.act(state)
#next_state, reward, done, _ = env.step(action)
#env.render()
#actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
#actions = np.clip(actions, -1, 1) # all actions between -1 and 1
#actions = agent.act(states, add_noise=add_noise) # select an action (for each agent)
actions = agent.act(states) # select an action (for each agent)
#print('\r[{}]{}'.format(t, actions[0]), end="")
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
for i in range(num_agents):
agent.step(states[i], actions[i], rewards[i], next_states[i], dones[i])
num_update = 5
for _ in range(num_update):
agent.update()
states = next_states
score += np.array(rewards)
#print('\r{}: {} {} {}'.format(t, score, actions[0], actions[1]), end="")
if np.any(dones):
break
max_score = np.max(score)
scores_deque.append(max_score)
scores.append(max_score)
mean_scores.append(np.mean(scores_deque))
current = time.time()
elapsed = current - start
elapsed_str = time.strftime("%H:%M:%S", time.gmtime(elapsed))
print('\rEpisode {}\tAverage Score: {:.2f}\t{}'.format(i_episode, np.mean(scores_deque), elapsed_str), end="")
if i_episode%50 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}\t{}'.format(i_episode, np.mean(scores_deque), elapsed_str))
if np.mean(scores_deque) > 1.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))
break
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')
return scores
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores, color='blue')
plt.plot(np.arange(1, len(max_score)+1), mean_scores, color='orange')
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
env.close()
|
py
|
1a575738b327f84e184a5b4271fbc8b9b7a210e4
|
from construct import Tell, Pointer, Int32ub, Struct, Array, Rebuild, If
from retro_data_structures.construct_extensions.alignment import AlignedPrefixed
from retro_data_structures.construct_extensions.misc import Skip
def _get_current_section(context, increment=True) -> int:
root = context._root
section = root["_current_section"]
if increment:
root["_current_section"] += 1
return section
def _get_section_length_address(context):
index = _get_current_section(context)
return context._root.data_section_sizes.address + index * Int32ub.length
def DataSectionSizes(section_count, include_value=False, rebuildfunc=lambda this: 0):
return Struct(
address=Tell,
value=If(include_value, Array(section_count, Rebuild(Int32ub, rebuildfunc))),
offset=If(lambda this: not include_value, Skip(section_count, Int32ub)),
)
def GetDataSectionSize(context) -> int:
return context._root.data_section_sizes.value[_get_current_section(context)]
def GetDataSectionId(context):
return _get_current_section(context, False)
def ResetCurrentSection(context):
root = context._root
root._current_section = 0
def DataSectionSizePointer():
return Pointer(_get_section_length_address, Int32ub)
def DataSection(subcon, align=32, size=DataSectionSizePointer):
return AlignedPrefixed(size(), subcon, align, 0, b"\x00")
|
py
|
1a57584c13ea2c1cd9f124817c7d838ddd9a51b5
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Thomas Beermann <[email protected]>, 2018
# - Mario Lassnig <[email protected]>, 2018-2020
# - Hannes Hansen <[email protected]>, 2018-2019
# - Cedric Serfon <[email protected]>, 2018-2019
# - Martin Barisits <[email protected]>, 2019-2020
# - James Perry <[email protected]>, 2019-2020
# - Andrew Lister <[email protected]>, 2019
# - Benedikt Ziemons <[email protected]>, 2020
from __future__ import print_function
from datetime import datetime
from json import dumps, loads
from traceback import format_exc
from xml.sax.saxutils import escape
from flask import Flask, Blueprint, Response, request
from flask.views import MethodView
from six import string_types
from rucio.api.replica import (add_replicas, list_replicas, list_dataset_replicas,
list_dataset_replicas_bulk, delete_replicas,
get_did_from_pfns, update_replicas_states,
declare_bad_file_replicas, add_bad_dids, add_bad_pfns, get_suspicious_files,
declare_suspicious_file_replicas, list_bad_replicas_status,
get_bad_replicas_summary, list_datasets_per_rse,
set_tombstone, list_dataset_replicas_vp)
from rucio.common.config import config_get
from rucio.common.constants import SUPPORTED_PROTOCOLS
from rucio.common.exception import (AccessDenied, DataIdentifierAlreadyExists, InvalidType,
DataIdentifierNotFound, Duplicate, InvalidPath,
ResourceTemporaryUnavailable, RucioException,
RSENotFound, UnsupportedOperation, ReplicaNotFound,
InvalidObject, ScopeNotFound)
from rucio.common.utils import parse_response, APIEncoder, render_json_list
from rucio.core.replica_sorter import sort_replicas
from rucio.db.sqla.constants import BadFilesStatus, ReplicaState
from rucio.web.rest.flaskapi.v1.common import check_accept_header_wrapper_flask, try_stream, parse_scope_name, request_auth_env, response_headers
from rucio.web.rest.utils import generate_http_error_flask
try:
from urllib import unquote
from urlparse import parse_qs
except ImportError:
from urllib.parse import unquote
from urllib.parse import parse_qs
class Replicas(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream', 'application/metalink4+xml'])
def get(self, scope_name):
"""
List all replicas for data identifiers.
.. :quickref: Replicas; List replicas for DID.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
500 InternalError
:reqheader HTTP_ACCEPT: application/metalink4+xml
:param scope_name: data identifier (scope)/(name).
:resheader Content-Type: application/x-json-stream
:resheader Content-Type: application/metalink4+xml
:status 200: OK.
:status 401: Invalid auth token.
:status 404: DID not found.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: A dictionary containing all replicas information.
:returns: A metalink description of replicas if metalink(4)+xml is specified in Accept:
"""
try:
scope, name = parse_scope_name(scope_name, request.environ.get('vo'))
except ValueError as error:
return generate_http_error_flask(400, 'ValueError', error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
content_type = request.accept_mimetypes.best_match(['application/x-json-stream', 'application/metalink4+xml'], 'application/x-json-stream')
metalink = (content_type == 'application/metalink4+xml')
dids, schemes, select, limit = [{'scope': scope, 'name': name}], None, None, None
client_ip = request.headers.get('X-Forwarded-For', default=request.remote_addr)
client_location = {'ip': client_ip,
'fqdn': None,
'site': None}
schemes = request.args.get('schemes', None)
select = request.args.get('select', None)
limit = request.args.get('limit', None)
if limit:
limit = int(limit)
# Resolve all reasonable protocols when doing metalink for maximum access possibilities
if metalink and schemes is None:
schemes = SUPPORTED_PROTOCOLS
try:
def generate(vo):
# we need to call list_replicas before starting to reply
# otherwise the exceptions won't be propagated correctly
first = metalink
# then, stream the replica information
for rfile in list_replicas(dids=dids, schemes=schemes, vo=vo):
if first and metalink:
# first, set the appropriate content type, and stream the header
yield '<?xml version="1.0" encoding="UTF-8"?>\n<metalink xmlns="urn:ietf:params:xml:ns:metalink">\n'
first = False
replicas = []
dictreplica = {}
for rse in rfile['rses']:
for replica in rfile['rses'][rse]:
replicas.append(replica)
dictreplica[replica] = rse
replicas = sort_replicas(dictreplica, client_location, selection=select)
if not metalink:
yield dumps(rfile) + '\n'
else:
yield ' <file name="' + rfile['name'] + '">\n'
yield ' <identity>' + rfile['scope'] + ':' + rfile['name'] + '</identity>\n'
if rfile['adler32'] is not None:
yield ' <hash type="adler32">' + rfile['adler32'] + '</hash>\n'
if rfile['md5'] is not None:
yield ' <hash type="md5">' + rfile['md5'] + '</hash>\n'
yield ' <size>' + str(rfile['bytes']) + '</size>\n'
yield ' <glfn name="/atlas/rucio/%s:%s">' % (rfile['scope'], rfile['name'])
yield '</glfn>\n'
idx = 0
for replica in replicas:
yield ' <url location="' + str(dictreplica[replica]) + '" priority="' + str(idx + 1) + '">' + escape(replica) + '</url>\n'
idx += 1
if limit and limit == idx:
break
yield ' </file>\n'
if metalink:
if first:
# if still first output, i.e. there were no replicas
yield '<?xml version="1.0" encoding="UTF-8"?>\n<metalink xmlns="urn:ietf:params:xml:ns:metalink">\n</metalink>\n'
else:
# don't forget to send the metalink footer
yield '</metalink>\n'
return try_stream(generate(vo=request.environ.get('vo')), content_type=content_type)
except DataIdentifierNotFound as error:
return generate_http_error_flask(404, 'DataIdentifierNotFound', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
def post(self):
"""
Create file replicas at a given RSE.
.. :quickref: Replicas; create replicas at RSE
:<json string rse: The RSE name.
:<json list files: list of dicts with 'scope', 'name', 'bytes', 'meta' and 'adler32'.
:<json bool ignore_availability: Flag to ignore the RSE blacklisting.
:status 201: Replica Successfully created.
:status 400: Invalid Path.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Scope not found.
:status 409: Replica already exists.
:status 409: DID already exists.
:status 503: Resource Temporary Unavailable.
"""
json_data = request.data
try:
parameters = parse_response(json_data)
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
try:
add_replicas(rse=parameters['rse'], files=parameters['files'],
issuer=request.environ.get('issuer'), vo=request.environ.get('vo'),
ignore_availability=parameters.get('ignore_availability', False))
except InvalidPath as error:
return generate_http_error_flask(400, 'InvalidPath', error.args[0])
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except DataIdentifierAlreadyExists as error:
return generate_http_error_flask(409, 'DataIdentifierAlreadyExists', error.args[0])
except RSENotFound as error:
return generate_http_error_flask(404, 'RSENotFound', error.args[0])
except ScopeNotFound as error:
return generate_http_error_flask(404, 'ScopeNotFound', error.args[0])
except ResourceTemporaryUnavailable as error:
return generate_http_error_flask(503, 'ResourceTemporaryUnavailable', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return 'Created', 201
def put(self):
"""
Update a file replicas state at a given RSE.
.. :quickref: Replicas; update replicas state.
:<json string rse: The RSE name.
:<json list files: list of dicts with 'scope', 'name' and 'state'.
:status 201: Replica successfully updated.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 500: Internal Error.
"""
json_data = request.data
try:
parameters = parse_response(json_data)
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
try:
update_replicas_states(rse=parameters['rse'], files=parameters['files'], issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except UnsupportedOperation as error:
return generate_http_error_flask(500, 'UnsupportedOperation', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return '', 200
def delete(self):
"""
Delete file replicas at a given RSE.
.. :quickref: Replicas; Delete replica at RSE.
:<json string rse: The RSE name.
:<json list files: list of dicts with 'scope', 'name'.
:<json bool ignore_availability: Flag to ignore the RSE blacklisting.
:status 200: Replica successfully deleted.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Replica not found.
:status 500: Internal Error.
"""
json_data = request.data
try:
parameters = parse_response(json_data)
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
try:
delete_replicas(rse=parameters['rse'], files=parameters['files'],
issuer=request.environ.get('issuer'), vo=request.environ.get('vo'),
ignore_availability=parameters.get('ignore_availability', False))
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except RSENotFound as error:
return generate_http_error_flask(404, 'RSENotFound', error.args[0])
except ResourceTemporaryUnavailable as error:
return generate_http_error_flask(503, 'ResourceTemporaryUnavailable', error.args[0])
except ReplicaNotFound as error:
return generate_http_error_flask(404, 'ReplicaNotFound', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return '', 200
class ListReplicas(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream', 'application/metalink4+xml'])
def post(self):
"""
List all replicas for data identifiers.
.. :quickref: Replicas; List replicas for multiple DIDs.
:reqheader HTTP_ACCEPT: application/metalink4+xml
:query schemes: A list of schemes to filter the replicas.
:query sort: Requested sorting of the result, e.g., 'geoip', 'closeness', 'dynamic', 'ranking', 'random'.
:<json list dids: list of DIDs.
:<json list schemes: A list of schemes to filter the replicas.
:<json bool unavailable: Also include unavailable replicas.
:<json bool all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:<json string rse_expression: The RSE expression to restrict on a list of RSEs.
:<json dict client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site'}.
:<json bool sort: Requested sorting of the result, e.g., 'geoip', 'closeness', 'dynamic', 'ranking', 'random'.
:<json string domain: The network domain for the call, either None, 'wan' or 'lan'. None is fallback to 'wan', 'all' is both ['lan','wan']
:resheader Content-Type: application/x-json-stream
:resheader Content-Type: application/metalink4+xml
:status 200: OK.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 404: DID not found.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: A dictionary containing all replicas information.
:returns: A metalink description of replicas if metalink(4)+xml is specified in Accept:
"""
content_type = request.accept_mimetypes.best_match(['application/x-json-stream', 'application/metalink4+xml'], 'application/x-json-stream')
metalink = (content_type == 'application/metalink4+xml')
client_ip = request.headers.get('X-Forwarded-For', default=request.remote_addr)
dids, schemes, select, unavailable, limit = [], None, None, False, None
ignore_availability, rse_expression, all_states, domain = False, None, False, None
signature_lifetime, resolve_archives, resolve_parents = None, True, False
updated_after = None
client_location = {'ip': client_ip,
'fqdn': None,
'site': None}
try:
params = parse_response(request.data)
if 'dids' in params:
dids = params['dids']
if 'schemes' in params:
schemes = params['schemes']
if 'unavailable' in params:
unavailable = params['unavailable']
ignore_availability = True
if 'all_states' in params:
all_states = params['all_states']
if 'rse_expression' in params:
rse_expression = params['rse_expression']
if 'client_location' in params:
client_location.update(params['client_location'])
if 'sort' in params:
select = params['sort']
if 'domain' in params:
domain = params['domain']
if 'resolve_archives' in params:
resolve_archives = params['resolve_archives']
if 'resolve_parents' in params:
resolve_parents = params['resolve_parents']
if 'signature_lifetime' in params:
signature_lifetime = params['signature_lifetime']
else:
# hardcoded default of 10 minutes if config is not parseable
signature_lifetime = config_get('credentials', 'signature_lifetime', raise_exception=False, default=600)
if 'updated_after' in params:
if isinstance(params['updated_after'], (int, float)):
# convert from epoch time stamp to datetime object
updated_after = datetime.utcfromtimestamp(params['updated_after'])
else:
# attempt UTC format '%Y-%m-%dT%H:%M:%S' conversion
updated_after = datetime.strptime(params['updated_after'], '%Y-%m-%dT%H:%M:%S')
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
if request.query_string:
query_string = request.query_string.decode(encoding='utf-8')
params = parse_qs(query_string)
if 'select' in params:
select = params['select'][0]
if 'limit' in params:
limit = params['limit'][0]
if 'sort' in params:
select = params['sort']
# Resolve all reasonable protocols when doing metalink for maximum access possibilities
if metalink and schemes is None:
schemes = SUPPORTED_PROTOCOLS
content_type = 'application/metalink4+xml' if metalink else 'application/x-json-stream'
try:
def generate(request_id, issuer, vo):
# we need to call list_replicas before starting to reply
# otherwise the exceptions won't be propagated correctly
first = metalink
for rfile in list_replicas(dids=dids, schemes=schemes,
unavailable=unavailable,
request_id=request_id,
ignore_availability=ignore_availability,
all_states=all_states,
rse_expression=rse_expression,
client_location=client_location,
domain=domain, signature_lifetime=signature_lifetime,
resolve_archives=resolve_archives,
resolve_parents=resolve_parents,
updated_after=updated_after,
issuer=issuer,
vo=vo):
# in first round, set the appropriate content type, and stream the header
if first and metalink:
yield '<?xml version="1.0" encoding="UTF-8"?>\n<metalink xmlns="urn:ietf:params:xml:ns:metalink">\n'
first = False
if not metalink:
yield dumps(rfile, cls=APIEncoder) + '\n'
else:
replicas = []
dictreplica = {}
for replica in rfile['pfns'].keys():
replicas.append(replica)
dictreplica[replica] = (rfile['pfns'][replica]['domain'],
rfile['pfns'][replica]['priority'],
rfile['pfns'][replica]['rse'],
rfile['pfns'][replica]['client_extract'])
yield ' <file name="' + rfile['name'] + '">\n'
if 'parents' in rfile and rfile['parents']:
yield ' <parents>\n'
for parent in rfile['parents']:
yield ' <did>' + parent + '</did>\n'
yield ' </parents>\n'
yield ' <identity>' + rfile['scope'] + ':' + rfile['name'] + '</identity>\n'
if rfile['adler32'] is not None:
yield ' <hash type="adler32">' + rfile['adler32'] + '</hash>\n'
if rfile['md5'] is not None:
yield ' <hash type="md5">' + rfile['md5'] + '</hash>\n'
yield ' <size>' + str(rfile['bytes']) + '</size>\n'
yield ' <glfn name="/%s/rucio/%s:%s"></glfn>\n' % (config_get('policy', 'schema',
raise_exception=False,
default='generic'),
rfile['scope'],
rfile['name'])
lanreplicas = [replica for replica, v in dictreplica.items() if v[0] == 'lan']
replicas = lanreplicas + sort_replicas({k: v for k, v in dictreplica.items() if v[0] != 'lan'}, client_location, selection=select)
idx = 1
for replica in replicas:
yield ' <url location="' + str(dictreplica[replica][2]) \
+ '" domain="' + str(dictreplica[replica][0]) \
+ '" priority="' + str(idx) \
+ '" client_extract="' + str(dictreplica[replica][3]).lower() \
+ '">' + escape(replica) + '</url>\n'
if limit and limit == idx:
break
idx += 1
yield ' </file>\n'
if metalink:
if first:
# if still first output, i.e. there were no replicas
yield '<?xml version="1.0" encoding="UTF-8"?>\n<metalink xmlns="urn:ietf:params:xml:ns:metalink">\n</metalink>\n'
else:
# don't forget to send the metalink footer
yield '</metalink>\n'
return try_stream(generate(request_id=request.environ.get('request_id'),
issuer=request.environ.get('issuer'),
vo=request.environ.get('vo')),
content_type=content_type)
except DataIdentifierNotFound as error:
return generate_http_error_flask(404, 'DataIdentifierNotFound', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class ReplicasDIDs(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def post(self):
"""
List the DIDs associated to a list of replicas.
.. :quickref: ReplicasDIDs; List DIDs for replicas.
:<json string pfns: The list of PFNs.
:<json string rse: The RSE name.
:resheader Content-Type: application/x-json-string
:status 200: OK.
:status 400: Cannot decode json parameter list.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: A list of dictionaries containing the mapping PFNs to DIDs.
"""
json_data = request.data
rse, pfns = None, []
rse = None
try:
params = parse_response(json_data)
if 'pfns' in params:
pfns = params['pfns']
if 'rse' in params:
rse = params['rse']
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
try:
def generate(vo):
for pfn in get_did_from_pfns(pfns, rse, vo=vo):
yield dumps(pfn) + '\n'
return try_stream(generate(vo=request.environ.get('vo')))
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class BadReplicas(MethodView):
@check_accept_header_wrapper_flask(['application/json'])
def post(self):
"""
Declare a list of bad replicas.
.. :quickref: BadReplicasStates; Declare bad replicas.
:<json string pfns: The list of PFNs.
:<json string reason: The reason of the loss.
:resheader Content-Type: application/json
:status 201: Created.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Replica not found.
:status 500: Internal Error.
:returns: A list of not successfully declared files.
"""
json_data = request.data
pfns = []
try:
params = parse_response(json_data)
if 'pfns' in params:
pfns = params['pfns']
if 'reason' in params:
reason = params['reason']
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
not_declared_files = {}
try:
not_declared_files = declare_bad_file_replicas(pfns=pfns, reason=reason, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except RSENotFound as error:
return generate_http_error_flask(404, 'RSENotFound', error.args[0])
except ReplicaNotFound as error:
return generate_http_error_flask(404, 'ReplicaNotFound', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return Response(dumps(not_declared_files), status=201, content_type='application/json')
class SuspiciousReplicas(MethodView):
@check_accept_header_wrapper_flask(['application/json'])
def post(self):
"""
Declare a list of suspicious replicas.
.. :quickref: SuspiciousReplicas; Declare suspicious replicas.
:<json string pfns: The list of PFNs.
:<json string reason: The reason of the loss.
:resheader Content-Type: application/json
:status 201: Created.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 404: Replica not found.
:status 500: Internal Error.
:returns: A list of not successfully declared files.
"""
json_data = request.data
pfns = []
try:
params = parse_response(json_data)
if 'pfns' in params:
pfns = params['pfns']
if 'reason' in params:
reason = params['reason']
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
not_declared_files = {}
try:
not_declared_files = declare_suspicious_file_replicas(pfns=pfns, reason=reason, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return Response(dumps(not_declared_files), status=201, content_type='application/json')
@check_accept_header_wrapper_flask(['application/json'])
def get(self):
"""
List the suspicious replicas on a list of RSEs.
.. :quickref: SuspiciousReplicas; Get suspicious replicas.
:resheader Content-Type: application/json
:status 200: OK.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: List of suspicious file replicas.
"""
result = []
rse_expression, younger_than, nattempts = None, None, None
if request.query_string:
query_string = request.query_string.decode(encoding='utf-8')
try:
params = loads(unquote(query_string))
except ValueError:
params = parse_qs(query_string)
print(params)
if 'rse_expression' in params:
rse_expression = params['rse_expression'][0]
if 'younger_than' in params and params['younger_than'][0]:
younger_than = datetime.strptime(params['younger_than'][0], "%Y-%m-%dT%H:%M:%S")
if 'nattempts' in params:
nattempts = int(params['nattempts'][0])
try:
result = get_suspicious_files(rse_expression=rse_expression, younger_than=younger_than, nattempts=nattempts, vo=request.environ.get('vo'))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return Response(render_json_list(result), 200, content_type='application/json')
class BadReplicasStates(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def get(self):
"""
List the bad or suspicious replicas by states.
.. :quickref: BadReplicasStates; List bad replicas.
:query state: The state of the file (SUSPICIOUS or BAD).
:query rse: The RSE name.
:query younger_than: date in format "%Y-%m-%dT%H:%M:%S.%f" to select bad replicas younger than this date.
:query older_than: date in format "%Y-%m-%dT%H:%M:%S.%f" to select bad replicas older than this date.
:query limit: The maximum number of replicas returned.
:query list_pfns: Flag to include pfns.
:resheader Content-Type: application/x-json-stream
:status 200: OK.
:status 401: Invalid auth token.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: List of dicts of bad file replicas.
"""
state, rse, younger_than, older_than, limit, list_pfns = None, None, None, None, None, None
if request.query_string:
query_string = request.query_string.decode(encoding='utf-8')
try:
params = loads(unquote(query_string))
except ValueError:
params = parse_qs(query_string)
if 'state' in params:
state = params['state'][0]
if isinstance(state, string_types):
state = BadFilesStatus(state)
if 'rse' in params:
rse = params['rse'][0]
if 'younger_than' in params:
younger_than = datetime.strptime(params['younger_than'][0], "%Y-%m-%dT%H:%M:%S.%f")
if 'older_than' in params and params['older_than']:
older_than = datetime.strptime(params['older_than'][0], "%Y-%m-%dT%H:%M:%S.%f")
if 'limit' in params:
limit = int(params['limit'][0])
if 'list_pfns' in params:
list_pfns = bool(params['list_pfns'][0])
try:
def generate(vo):
for row in list_bad_replicas_status(state=state, rse=rse, younger_than=younger_than,
older_than=older_than, limit=limit, list_pfns=list_pfns,
vo=vo):
yield dumps(row, cls=APIEncoder) + '\n'
return try_stream(generate(vo=request.environ.get('vo')))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class BadReplicasSummary(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def get(self):
"""
Return a summary of the bad replicas by incident.
.. :quickref: BadReplicasSummary; List bad replicas by incident.
:query rse_expression: The RSE expression.
:query from_date: The start date.
:query to_date: The end date.
:resheader Content-Type: application/x-json-stream
:status 200: OK.
:status 401: Invalid auth token.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: List of bad replicas by incident.
"""
rse_expression, from_date, to_date = None, None, None
if request.query_string:
query_string = request.query_string.decode(encoding='utf-8')
try:
params = loads(unquote(query_string))
except ValueError:
params = parse_qs(query_string)
if 'rse_expression' in params:
rse_expression = params['rse_expression'][0]
if 'from_date' in params and params['from_date'][0]:
from_date = datetime.strptime(params['from_date'][0], "%Y-%m-%d")
if 'to_date' in params:
to_date = datetime.strptime(params['to_date'][0], "%Y-%m-%d")
try:
def generate(vo):
for row in get_bad_replicas_summary(rse_expression=rse_expression, from_date=from_date,
to_date=to_date, vo=vo):
yield dumps(row, cls=APIEncoder) + '\n'
return try_stream(generate(vo=request.environ.get('vo')))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class DatasetReplicas(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def get(self, scope_name):
"""
List dataset replicas.
.. :quickref: DatasetReplicas; List dataset replicas.
:param scope_name: data identifier (scope)/(name).
:query deep: Flag to ennable lookup at the file level.
:resheader Content-Type: application/x-json-stream
:status 200: OK.
:status 401: Invalid auth token.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: A dictionary containing all replicas information.
"""
try:
scope, name = parse_scope_name(scope_name, request.environ.get('vo'))
def generate(deep, vo):
for row in list_dataset_replicas(scope=scope, name=name, deep=deep, vo=vo):
yield dumps(row, cls=APIEncoder) + '\n'
return try_stream(generate(deep=request.args.get('deep', False), vo=request.environ.get('vo')))
except ValueError as error:
return generate_http_error_flask(400, 'ValueError', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class DatasetReplicasBulk(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def post(self):
"""
List dataset replicas for multiple DIDs.
.. :quickref: DatasetReplicas; List replicas for multiple DIDs.
:<json list dids: List of DIDs for querying the datasets.
:resheader Content-Type: application/x-json-stream
:status 200: OK.
:status 400: Bad Request.
:status 401: Invalid auth token.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: A dictionary containing all replicas information.
"""
json_data = request.data
try:
params = parse_response(json_data)
dids = params['dids']
didslength = len(dids)
except KeyError as error:
return generate_http_error_flask(400, 'KeyError', 'Cannot find mandatory parameter : %s' % str(error))
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
if didslength == 0:
return generate_http_error_flask(400, 'ValueError', 'List of DIDs is empty')
try:
def generate(vo):
for row in list_dataset_replicas_bulk(dids=dids, vo=vo):
yield dumps(row, cls=APIEncoder) + '\n'
return try_stream(generate(vo=request.environ.get('vo')))
except InvalidObject as error:
return generate_http_error_flask(400, 'InvalidObject', 'Cannot validate DIDs: %s' % (str(error)))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class DatasetReplicasVP(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def get(self, scope_name):
"""
List dataset replicas using the Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
.. :quickref: DatasetReplicas; List dataset replicas with VP.
:param scope_name: data identifier (scope)/(name).
:query deep: Flag to ennable lookup at the file level.
:resheader Content-Type: application/x-json-stream
:status 200: OK.
:status 401: Invalid auth token.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: If VP exists a list of dicts of sites, otherwise nothing
"""
try:
scope, name = parse_scope_name(scope_name, request.environ.get('vo'))
def generate(deep, vo):
for row in list_dataset_replicas_vp(scope=scope, name=name, deep=deep, vo=vo):
yield dumps(row, cls=APIEncoder) + '\n'
return try_stream(generate(deep=request.args.get('deep', False), vo=request.environ.get('vo')))
except ValueError as error:
return generate_http_error_flask(400, 'ValueError', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class ReplicasRSE(MethodView):
@check_accept_header_wrapper_flask(['application/x-json-stream'])
def get(self, rse):
"""
List dataset replicas per RSE.
.. :quickref: ReplicasRSE; List dataset replicas per RSE.
:resheader Content-Type: application/x-json-stream
:status 200: OK.
:status 401: Invalid auth token.
:status 406: Not Acceptable.
:status 500: Internal Error.
:returns: A dictionary containing all replicas on the RSE.
"""
try:
def generate(vo):
for row in list_datasets_per_rse(rse=rse, vo=vo):
yield dumps(row, cls=APIEncoder) + '\n'
return try_stream(generate(vo=request.environ.get('vo')))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
class BadDIDs(MethodView):
def post(self):
"""
Declare a list of bad replicas by DID.
.. :quickref: BadDIDs; Declare bad replicas by DID.
:<json string pfns: The list of PFNs.
:<json string reason: The reason of the loss.
:<json string state: The state is eiher BAD, SUSPICIOUS or TEMPORARY_UNAVAILABLE.
:<json string expires_at: The expiration date. Only apply to TEMPORARY_UNAVAILABLE.
:resheader Content-Type: application/x-json-string
:status 201: Created.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 404: Replica not found.
:status 500: Internal Error.
:returns: A list of not successfully declared files.
"""
json_data = request.data
dids = []
rse = None
reason = None
state = None
expires_at = None
try:
params = parse_response(json_data)
if 'dids' in params:
dids = params['dids']
if 'rse' in params:
rse = params['rse']
if 'reason' in params:
reason = params['reason']
state = ReplicaState.BAD
if 'expires_at' in params and params['expires_at']:
expires_at = datetime.strptime(params['expires_at'], "%Y-%m-%dT%H:%M:%S.%f")
not_declared_files = add_bad_dids(dids=dids, rse=rse, issuer=request.environ.get('issuer'), state=state,
reason=reason, expires_at=expires_at, vo=request.environ.get('vo'))
except (ValueError, InvalidType) as error:
return generate_http_error_flask(400, 'ValueError', error.args[0])
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except ReplicaNotFound as error:
return generate_http_error_flask(404, 'ReplicaNotFound', error.args[0])
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return Response(dumps(not_declared_files), status=201, content_type='application/json')
class BadPFNs(MethodView):
def post(self):
"""
Declare a list of bad PFNs.
.. :quickref: BadPFNs; Declare bad replicas.
:<json string pfns: The list of PFNs.
:<json string reason: The reason of the loss.
:<json string state: The state is eiher BAD, SUSPICIOUS or TEMPORARY_UNAVAILABLE.
:<json string expires_at: The expiration date. Only apply to TEMPORARY_UNAVAILABLE.
:resheader Content-Type: application/x-json-string
:status 201: Created.
:status 400: Cannot decode json parameter list.
:status 401: Invalid auth token.
:status 404: Replica not found.
:status 500: Internal Error.
:returns: A list of not successfully declared files.
"""
json_data = request.data
pfns = []
reason = None
state = None
expires_at = None
try:
params = parse_response(json_data)
if 'pfns' in params:
pfns = params['pfns']
if 'reason' in params:
reason = params['reason']
if 'state' in params:
state = params['state']
if 'expires_at' in params and params['expires_at']:
expires_at = datetime.strptime(params['expires_at'], "%Y-%m-%dT%H:%M:%S.%f")
add_bad_pfns(pfns=pfns, issuer=request.environ.get('issuer'), state=state, reason=reason, expires_at=expires_at, vo=request.environ.get('vo'))
except (ValueError, InvalidType) as error:
return generate_http_error_flask(400, 'ValueError', error.args[0])
except AccessDenied as error:
return generate_http_error_flask(401, 'AccessDenied', error.args[0])
except ReplicaNotFound as error:
return generate_http_error_flask(404, 'ReplicaNotFound', error.args[0])
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return 'Created', 201
class Tombstone(MethodView):
def post(self):
"""
Set a tombstone on a list of replicas.
.. :quickref: Tombstone; Set a tombstone on a list of replicas.
:<json string replicas: list fo replicas
:resheader Content-Type: application/x-json-string
:status 201: Created.
:status 401: Invalid auth token.
:status 404: ReplicaNotFound.
:status 500: Internal Error.
"""
json_data = request.data
replicas = []
try:
params = parse_response(json_data)
if 'replicas' in params:
replicas = params['replicas']
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
try:
for replica in replicas:
set_tombstone(replica['rse'], replica['scope'], replica['name'], issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except ReplicaNotFound as error:
return generate_http_error_flask(404, 'ReplicaNotFound', error.args[0])
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return 'Created', 201
def blueprint(no_doc=True):
bp = Blueprint('replicas', __name__, url_prefix='/replicas')
list_replicas_view = ListReplicas.as_view('list_replicas')
bp.add_url_rule('/list', view_func=list_replicas_view, methods=['post', ])
replicas_view = Replicas.as_view('replicas')
if no_doc:
# rule without trailing slash needs to be added before rule with trailing slash
bp.add_url_rule('', view_func=replicas_view, methods=['post', 'put', 'delete'])
bp.add_url_rule('/', view_func=replicas_view, methods=['post', 'put', 'delete'])
suspicious_replicas_view = SuspiciousReplicas.as_view('suspicious_replicas')
bp.add_url_rule('/suspicious', view_func=suspicious_replicas_view, methods=['post', ])
bad_replicas_states_view = BadReplicasStates.as_view('bad_replicas_states')
bp.add_url_rule('/bad/states', view_func=bad_replicas_states_view, methods=['get', ])
bad_replicas_summary_view = BadReplicasSummary.as_view('bad_replicas_summary')
bp.add_url_rule('/bad/summary', view_func=bad_replicas_summary_view, methods=['get', ])
bad_replicas_pfn_view = BadPFNs.as_view('add_bad_pfns')
bp.add_url_rule('/bad/pfns', view_func=bad_replicas_pfn_view, methods=['post', ])
bad_replicas_dids_view = BadDIDs.as_view('add_bad_dids')
bp.add_url_rule('/bad/dids', view_func=bad_replicas_dids_view, methods=['post', ])
replicas_rse_view = ReplicasRSE.as_view('replicas_rse')
bp.add_url_rule('/rse/<rse>', view_func=replicas_rse_view, methods=['get', ])
bad_replicas_view = BadReplicas.as_view('bad_replicas')
bp.add_url_rule('/bad', view_func=bad_replicas_view, methods=['post', ])
replicas_dids_view = ReplicasDIDs.as_view('replicas_dids')
bp.add_url_rule('/dids', view_func=replicas_dids_view, methods=['post', ])
dataset_replicas_view = DatasetReplicas.as_view('dataset_replicas')
bp.add_url_rule('/<path:scope_name>/datasets', view_func=dataset_replicas_view, methods=['get', ])
dataset_replicas_bulk_view = DatasetReplicasBulk.as_view('dataset_replicas_bulk')
bp.add_url_rule('/datasets_bulk', view_func=dataset_replicas_bulk_view, methods=['post', ])
dataset_replicas_vp_view = DatasetReplicasVP.as_view('dataset_replicas_vp')
bp.add_url_rule('/<path:scope_name>', view_func=replicas_view, methods=['get', ])
set_tombstone_view = Tombstone.as_view('set_tombstone')
bp.add_url_rule('/tombstone', view_func=set_tombstone_view, methods=['post', ])
if no_doc:
bp.add_url_rule('/list/', view_func=list_replicas_view, methods=['post', ])
bp.add_url_rule('/suspicious/', view_func=suspicious_replicas_view, methods=['post', ])
bp.add_url_rule('/bad/states/', view_func=bad_replicas_states_view, methods=['get', ])
bp.add_url_rule('/bad/summary/', view_func=bad_replicas_summary_view, methods=['get', ])
bp.add_url_rule('/bad/pfns/', view_func=bad_replicas_pfn_view, methods=['post', ])
bp.add_url_rule('/bad/dids/', view_func=bad_replicas_dids_view, methods=['post', ])
bp.add_url_rule('/rse/<rse>/', view_func=replicas_rse_view, methods=['get', ])
bp.add_url_rule('/bad/', view_func=bad_replicas_view, methods=['post', ])
bp.add_url_rule('/dids/', view_func=replicas_dids_view, methods=['post', ])
bp.add_url_rule('/datasets_bulk/', view_func=dataset_replicas_bulk_view, methods=['post', ])
bp.add_url_rule('/<path:scope_name>/datasets_vp', view_func=dataset_replicas_vp_view, methods=['get', ])
bp.add_url_rule('/<path:scope_name>/', view_func=replicas_view, methods=['get', ])
bp.add_url_rule('/tombstone/', view_func=set_tombstone_view, methods=['post', ])
bp.before_request(request_auth_env)
bp.after_request(response_headers)
return bp
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint(no_doc=False))
return doc_app
|
py
|
1a57591b364d1c367760afb14e5859d4ab7c308a
|
import numpy as np
import time
from collections import OrderedDict, deque, Counter
from digideep.environment import MakeEnvironment
from .data_helpers import flatten_dict, update_dict_of_lists, complete_dict_of_list, convert_time_to_batch_major, extract_keywise
# from mujoco_py import MujocoException
# from dm_control.rl.control import PhysicsError
from digideep.utility.logging import logger
from digideep.utility.profiling import KeepTime
from digideep.utility.monitoring import monitor
from digideep.environment.tsne_evaluation import tsne_evaluation
from digideep.environment.storage import Storage
class Explorer:
"""A class which runs environments in parallel and returns the result trajectories in a unified structure.
It support multi-agents in an environment.
Note:
The entrypoint of this class is the :func:`update` function, in which the :func:`step` function will be
called for ``n_steps`` times. In the :func:`step` function, the :func:`prestep` function is called first to get the
actions from the agents. Then the ``env.step`` function is called to execute those actions in the environments.
After the loop is done in the :func:`update`, we do another :func:`prestep` to save the ``observations``/``actions``
of the last step. This indicates the final action that the agent would take without actually executing that. This
information will be useful in some algorithms.
Args:
session (:obj:`~digideep.pipeline.session.Session`): The running session object.
agents (dict): A dictionary of the agents and their corresponding agent objects.
mode (str): The mode of the Explorer, which is any of the three: ``train`` | ``test`` | ``eval``
env (:obj:`env`): The parameters of the environment.
do_reset (bool): A flag indicating whether to reset the environment at the update start.
final_action (bool): A flag indicating whether in the final call of :func:`prestep` the action should also be generated or not.
num_workers (int): Number of workers to work in parallel.
deterministic (bool): Whether to choose the optimial action or to mix some noise with the action (i.e. for exploration).
n_steps (int): Number of steps to take in the :func:`update`.
render (bool): A flag used to indicate whether environment should be rendered at each step.
render_delay (float): The amount of seconds to wait after calling ``env.render``. Used when environment is too fast for
visualization, typically in ``eval`` mode.
seed (int): The environment seed.
Attributes:
steps (int): Number of times the :func:`step` function is called.
n_episode (int): Number of episodes (a full round of simulation) generated so far.
timesteps (int): Number of total timesteps of experience generated so far.
was_reset (bool): A flag indicating whether the Explorer has been just reset or not.
observations: A tracker of environment observations used to produce the actions for the next step.
masks: A tracker of environment ``done`` flag indicating the start of a new episode.
hidden_states: A tracker of hidden_states of the agents for producing the next step action in recurrent policies.
Caution:
Use ``do_reset`` with caution; only when you know what the consequences are.
Generally there are few oportunities when this flag needs to be true.
Tip:
This class is partially serializable. It only saves the state of environment wrappers and not the environment per se.
See Also:
:ref:`ref-data-structure`
"""
def __init__(self, session, agents=None, **params):
self.agents = agents
self.params = params
self.session = session
self.timestep = 0
self.timestep_num = 0
self.eval_episode_reward = deque(maxlen=100)
self.obs_cluster_num = 16
self.action_cluster_num = 2
# FSM을 위한 저장공간
self.storage = Storage(obs_cluster_num=self.obs_cluster_num, action_cluster_num=self.action_cluster_num, num_steps=self.timestep_num * 1000)
# Create models
extra_env_kwargs = self.params.get("extra_env_kwargs", {})
menv = MakeEnvironment(session, mode=self.params["mode"], seed=self.params["seed"], **self.params["env"])
self.envs = menv.create_envs(num_workers=self.params["num_workers"], extra_env_kwargs=extra_env_kwargs)
# self.params["env"]["env_type"]
self.state = {}
self.state["steps"] = 0
self.state["n_episode"] = 0
self.state["timesteps"] = 0
self.state["was_reset"] = False
self.local = {}
self.local["steps"] = 0
self.local["n_episode"] = 0
self.monitor_n_episode()
self.monitor_timesteps()
# We only reset once. Later environments will be reset automatically.
self.reset()
# Will the results be reported when using ``do_reset``?`
def monitor_n_episode(self):
if self.params["mode"] == "train":
monitor.set_meta_key("episode", self.state["n_episode"])
def monitor_timesteps(self):
if self.params["mode"] == "train":
monitor.set_meta_key("frame", self.state["timesteps"])
def state_dict(self):
# TODO" Should we make a deepcopy?
return {"state":self.state, "envs":self.envs.state_dict()}
def load_state_dict(self, state_dict):
self.state.update(state_dict["state"])
self.envs.load_state_dict(state_dict["envs"])
self.monitor_n_episode()
self.monitor_timesteps()
# if self.params["mode"] in ["test", "eval"]:
# # We reset the explorer in case of test/eval to clear the history of observations/masks/hidden_state.
# # Because this part does not make sense to be transferred.
# self.reset()
def make_counter(self, storage):
inputs = storage.input_batch
obs_clusters = storage.obs_cluster_batch
act_clusters = storage.action_cluster_batch
actions = storage.action_batch
hidden_action = storage.hidden_action_batch
step = 0
obs_cluster_labels = obs_clusters.argmax(dim=1).numpy()
act_cluster_labels = act_clusters.argmax(dim=1).numpy()
transition_pair = [(act_cluster_labels[i], obs_cluster_labels[i + 1], act_cluster_labels[i + 1]) for i in range(0, len(act_cluster_labels) - 1)]
counter = Counter(transition_pair)
return counter
def make_FSM(self, counter, obs_cluster_num, action_cluster_num, cut_value=20):
old_transition_list = list(counter.keys())
transition_list = []
for key in old_transition_list:
if counter[key] >= cut_value:
transition_list.append(key)
transition_table = [[[] for x in range(action_cluster_num)] for x in range(action_cluster_num)]
for transition in transition_list:
before_act, current_obs, current_act = transition
transition_table[current_act][before_act].append(current_obs)
from beautifultable import BeautifulTable
table = BeautifulTable()
table.column_headers = ["_"] + [str(x) for x in range(action_cluster_num)]
for i in range(action_cluster_num):
table.append_row([i] + [transition_table[i][j] for j in range(action_cluster_num)])
return table
def report_rewards(self, infos):
"""This function will extract episode information from infos and will send them to
:class:`~digideep.utility.monitoring.Monitor` class.
"""
# This episode keyword only exists if we use a Monitor wrapper.
# This keyword will only appear at the "reset" times.
# TODO: If this is a true multi-agent system, then the rewards
# must be separated as well!
if '/episode/r' in infos.keys():
rewards = infos['/episode/r']
self.eval_episode_reward.append(rewards)
self.timestep = self.timestep +1
#if self.timestep == self.timestep_num:
#print(np.mean(self.eval_episode_reward))
#import pdb
#pdb.set_trace()
#transition_counter = self.make_counter(self.storage)
#table = self.make_FSM(transition_counter, self.obs_cluster_num, self.action_cluster_num)
#print(table)
#tsne_evaluation(self.storage)
for rew in rewards:
if (rew is not None) and (not np.isnan(rew)):
self.local["n_episode"] += 1
self.state["n_episode"] += 1
self.monitor_n_episode()
monitor("/reward/"+self.params["mode"]+"/episodic", rew, window=self.params["win_size"])
self.session.writer.add_scalar('reward/'+self.params["mode"], rew, self.state["n_episode"])
def close(self):
"""It closes all environments.
"""
self.envs.close()
def reset(self):
"""Will reset the Explorer and all of its states. Will set ``was_reset`` to ``True`` to prevent immediate resets.
"""
self.state["observations"] = self.envs.reset()
self.state["masks"] = np.array([[0]]*self.params["num_workers"], dtype=np.float32)
# The initial hidden_state is not saved in the memory. The only use for it is
# getting passed to the action_generator.
# So if there is a size mismatch between this and the next hidden_states, no
# conflicts/errors would happen.
self.state["hidden_state"] = {}
for agent_name in self.agents:
self.state["hidden_state"][agent_name] = self.agents[agent_name].reset_hidden_state(self.params["num_workers"])
self.state["was_reset"] = True
def prestep(self, final_step=False):
"""
Function to produce actions for all of the agents. This function does not execute the actions in the environment.
Args:
final_step (bool): A flag indicating whether this is the last call of this function.
Returns:
dict: The pre-transition dictionary containing observations, masks, and agents informations. The format is like:
``{"observations":..., "masks":..., "agents":...}``
"""
with KeepTime("to_numpy"):
# TODO: Is it necessary for conversion of obs?
# NOTE: The np conversion will not work if observation is a dictionary.
# observations = np.array(self.state["observations"], dtype=np.float32)
observations = self.state["observations"]
masks = self.state["masks"]
hidden_state = self.state["hidden_state"]
with KeepTime("gen_action"):
publish_agents = True
agents = {}
# TODO: We are assuming a one-level action space.
if (not final_step) or (self.params["final_action"]):
if self.state["steps"] < self.params["warm_start"]:
# Take RANDOM actions if warm-starting
for agent_name in self.agents:
agents[agent_name] = self.agents[agent_name].random_action_generator(self.envs, self.params["num_workers"])
else:
# Take REAL actions if not warm-starting
for agent_name in self.agents:
action_generator = self.agents[agent_name].action_generator
agents[agent_name], storage_item = action_generator(observations, hidden_state[agent_name], masks, deterministic=self.params["deterministic"])
#self.storage.store(storage_item[0], storage_item[1], storage_item[2], storage_item[3], storage_item[4])
else:
publish_agents = False
# We are saving the "new" hidden_state now.
# for agent_name in self.agents:
# if (not final_step) or (self.params["final_action"]):
# action_generator = self.agents[agent_name].action_generator
# agents[agent_name] = action_generator(observations, hidden_state[agent_name], masks, deterministic=self.params["deterministic"])
# else:
# publish_agents = False
with KeepTime("form_dictionary"):
if publish_agents:
pre_transition = dict(observations=observations,
masks=masks,
agents=agents)
else:
pre_transition = dict(observations=observations,
masks=masks)
return pre_transition
def step(self):
"""Function that runs the ``prestep`` and the actual ``env.step`` functions.
It will also manipulate the transition data to be in appropriate format.
Returns:
dict: The full transition information, including the pre-transition (actions, last observations, etc) and the
results of executing actions on the environments, i.e. rewards and infos. The format is like:
``{"observations":..., "masks":..., "rewards":..., "infos":..., "agents":...}``
See Also:
:ref:`ref-data-structure`
"""
# We are saving old versions of observations, hidden_state, and masks.
with KeepTime("prestep"):
pre_transition = self.prestep()
# TODO: For true multi-agent systems, rewards must be a dictionary as well,
# i.e. one reward for each agent. However, if the agents are pursuing
# a single goal, the reward can still be a single scalar!
# Updating observations and masks: These two are one step old in the trajectory.
# hidden_state is the newest.
with KeepTime("envstep"):
# Prepare actions
actions = extract_keywise(pre_transition["agents"], "actions")
# Step
self.state["observations"], rewards, dones, infos = self.envs.step(actions)
# Post-step
self.state["hidden_state"] = extract_keywise(pre_transition["agents"], "hidden_state")
self.state["masks"] = np.array([0.0 if done_ else 1.0 for done_ in dones], dtype=np.float32).reshape((-1,1))
# NOTE: Uncomment if you find useful information in the continuous rewards ...
# monitor("/reward/"+self.params["mode"]+"/continuous", np.mean(rewards))
with KeepTime("render"):
if self.params["render"]:
self.envs.render()
if self.params["render_delay"] > 0:
time.sleep(self.params["render_delay"])
# except MujocoException as e:
# logger.error("We got a MuJoCo exception!")
# raise
# ## Retry??
# # return self.run()
with KeepTime("poststep"):
# TODO: Sometimes the type of observations is "dict" which shouldn't be. Investigate the reason.
if isinstance(self.state["observations"], OrderedDict) or isinstance(self.state["observations"], dict):
for key in self.state["observations"]:
if np.isnan(self.state["observations"][key]).any():
logger.warn('NaN caught in observations during rollout generation.', 'step =', self.state["steps"])
raise ValueError
else:
if np.isnan(self.state["observations"]).any():
logger.warn('NaN caught in observations during rollout generation.', 'step =', self.state["steps"])
raise ValueError
## Retry??
# return self.run()
self.state["steps"] += 1
self.state["timesteps"] += self.params["num_workers"]
self.monitor_timesteps()
# TODO: Adapt with the new dict_of_lists data structure.
with KeepTime("report_reward"):
self.report_rewards(infos)
transition = dict(**pre_transition,
rewards=rewards,
infos=infos)
return transition
def update(self):
"""Runs :func:`step` for ``n_steps`` times.
Returns:
dict: A dictionary of unix-stype file system keys including all information generated by the simulation.
See Also:
:ref:`ref-data-structure`
"""
# trajectory is a dictionary of lists
trajectory = {}
if not self.state["was_reset"] and self.params["do_reset"]:
self.reset()
self.state["was_reset"] = False
# Run T (n-step) steps.
self.local["steps"] = 0
self.local["n_episode"] = 0
while (self.params["n_steps"] and self.local["steps"] < self.params["n_steps"]) or \
(self.params["n_episodes"] and self.local["n_episode"] < self.params["n_episodes"]):
with KeepTime("step"):
# print("one exploration step ...")
transition = self.step()
with KeepTime("append"):
# Data is flattened in the explorer per se.
transition = flatten_dict(transition)
# Update the trajectory with the current list of data.
# Put nones if the key is absent.
update_dict_of_lists(trajectory, transition, index=self.local["steps"])
self.local["steps"] += 1
with KeepTime("poststep"):
# Take one prestep so we have the next observation/hidden_state/masks/action/value/ ...
transition = self.prestep(final_step=True)
transition = flatten_dict(transition)
update_dict_of_lists(trajectory, transition, index=self.local["steps"])
# Complete the trajectory if one key was in a transition, but did not occur in later
# transitions. "length=n_steps+1" is because of counting final out-of-loop prestep.
# complete_dict_of_list(trajectory, length=self.params["n_steps"]+1)
complete_dict_of_list(trajectory, length=self.local["steps"]+1)
result = convert_time_to_batch_major(trajectory)
# We discard the rest of monitored episodes for the test mode to prevent them from affecting next test.
monitor.discard_key("/reward/test/episodic")
return result
### Data Structure:
# Pre-step:
# observations
# masks:
#
# Agent (policies):
# actions
# hidden_state
# artifacts:
# action_log_p
# value
#
# Step:
# rewards
# infos
######################
##### Statistics #####
######################
# Stats: Wall-time
|
py
|
1a575ac9a8e904cb7b7e657121bbf9a40ce90be5
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom RNN decoder."""
import tensorflow as tf
def rnn_decoder(decoder_inputs,
initial_state,
cell,
loop_function=None,
scope=None):
"""RNN decoder for the LSTM-SSD model.
This decoder returns a list of all states, rather than only the final state.
Args:
decoder_inputs: A list of 4D Tensors with shape [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 4D Tensors with
shape [batch_size x output_size] containing generated outputs.
states: A list of the same length as decoder_inputs of the state of each
cell at each time-step. It is a 2D Tensor of shape
[batch_size x cell.state_size].
"""
with tf.variable_scope(scope or 'rnn_decoder'):
state_tuple = initial_state
outputs = []
states = []
prev = None
for local_step, decoder_input in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with tf.variable_scope('loop_function', reuse=True):
decoder_input = loop_function(prev, local_step)
output, state_tuple = cell(decoder_input, state_tuple)
outputs.append(output)
states.append(state_tuple)
if loop_function is not None:
prev = output
return outputs, states
def multi_input_rnn_decoder(decoder_inputs,
initial_state,
cell,
sequence_step,
selection_strategy='RANDOM',
is_training=None,
is_quantized=False,
preprocess_fn_list=None,
pre_bottleneck=False,
flatten_state=False,
scope=None):
"""RNN decoder for the Interleaved LSTM-SSD model.
This decoder takes multiple sequences of inputs and selects the input to feed
to the rnn at each timestep using its selection_strategy, which can be random,
learned, or deterministic.
This decoder returns a list of all states, rather than only the final state.
Args:
decoder_inputs: A list of lists of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
sequence_step: Tensor [batch_size] of the step number of the first elements
in the sequence.
selection_strategy: Method for picking the decoder_input to use at each
timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number
of times to use the second input before using the first.
is_training: boolean, whether the network is training. When using learned
selection, attempts exploration if training.
is_quantized: flag to enable/disable quantization mode.
preprocess_fn_list: List of functions accepting two tensor arguments: one
timestep of decoder_inputs and the lstm state. If not None,
decoder_inputs[i] will be updated with preprocess_fn[i] at the start of
each timestep.
pre_bottleneck: if True, use separate bottleneck weights for each sequence.
Useful when input sequences have differing numbers of channels. Final
bottlenecks will have the same dimension.
flatten_state: Whether the LSTM state is flattened.
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
states: A list of the same length as decoder_inputs of the state of each
cell at each time-step. It is a 2D Tensor of shape
[batch_size x cell.state_size].
Raises:
ValueError: If selection_strategy is not recognized or unexpected unroll
length.
"""
if flatten_state and len(decoder_inputs[0]) > 1:
raise ValueError('In export mode, unroll length should not be more than 1')
with tf.variable_scope(scope or 'rnn_decoder'):
state_tuple = initial_state
outputs = []
states = []
batch_size = decoder_inputs[0][0].shape[0].value
num_sequences = len(decoder_inputs)
sequence_length = len(decoder_inputs[0])
for local_step in range(sequence_length):
for sequence_index in range(num_sequences):
if preprocess_fn_list is not None:
decoder_inputs[sequence_index][local_step] = (
preprocess_fn_list[sequence_index](
decoder_inputs[sequence_index][local_step], state_tuple[0]))
if pre_bottleneck:
decoder_inputs[sequence_index][local_step] = cell.pre_bottleneck(
inputs=decoder_inputs[sequence_index][local_step],
state=state_tuple[1],
input_index=sequence_index)
action = generate_action(selection_strategy, local_step, sequence_step,
[batch_size, 1, 1, 1])
inputs, _ = select_inputs(decoder_inputs, action, local_step)
# Mark base network endpoints under raw_inputs/
with tf.name_scope(None):
inputs = tf.identity(inputs, 'raw_inputs/base_endpoint')
output, state_tuple_out = cell(inputs, state_tuple)
state_tuple = select_state(state_tuple, state_tuple_out, action)
outputs.append(output)
states.append(state_tuple)
return outputs, states
def generate_action(selection_strategy, local_step, sequence_step,
action_shape):
"""Generate current (binary) action based on selection strategy.
Args:
selection_strategy: Method for picking the decoder_input to use at each
timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number
of times to use the second input before using the first.
local_step: Tensor [batch_size] of the step number within the current
unrolled batch.
sequence_step: Tensor [batch_size] of the step number of the first elements
in the sequence.
action_shape: The shape of action tensor to be generated.
Returns:
A tensor of shape action_shape, each element is an individual action.
Raises:
ValueError: if selection_strategy is not supported or if 'SKIP' is not
followed by numerics.
"""
if selection_strategy.startswith('RANDOM'):
action = tf.random.uniform(action_shape, maxval=2, dtype=tf.int32)
action = tf.minimum(action, 1)
# First step always runs large network.
if local_step == 0 and sequence_step is not None:
action *= tf.minimum(
tf.reshape(tf.cast(sequence_step, tf.int32), action_shape), 1)
elif selection_strategy.startswith('SKIP'):
inter_count = int(selection_strategy[4:])
if local_step % (inter_count + 1) == 0:
action = tf.zeros(action_shape)
else:
action = tf.ones(action_shape)
else:
raise ValueError('Selection strategy %s not recognized' %
selection_strategy)
return tf.cast(action, tf.int32)
def select_inputs(decoder_inputs, action, local_step, get_alt_inputs=False):
"""Selects sequence from decoder_inputs based on 1D actions.
Given multiple input batches, creates a single output batch by
selecting from the action[i]-ith input for the i-th batch element.
Args:
decoder_inputs: A 2-D list of tensor inputs.
action: A tensor of shape [batch_size]. Each element corresponds to an index
of decoder_inputs to choose.
step: The current timestep.
get_alt_inputs: Whether the non-chosen inputs should also be returned.
Returns:
The constructed output. Also outputs the elements that were not chosen
if get_alt_inputs is True, otherwise None.
Raises:
ValueError: if the decoder inputs contains other than two sequences.
"""
num_seqs = len(decoder_inputs)
if not num_seqs == 2:
raise ValueError('Currently only supports two sets of inputs.')
stacked_inputs = tf.stack(
[decoder_inputs[seq_index][local_step] for seq_index in range(num_seqs)],
axis=-1)
action_index = tf.one_hot(action, num_seqs)
inputs = tf.reduce_sum(stacked_inputs * action_index, axis=-1)
inputs_alt = None
# Only works for 2 models.
if get_alt_inputs:
# Reverse of action_index.
action_index_alt = tf.one_hot(action, num_seqs, on_value=0.0, off_value=1.0)
inputs_alt = tf.reduce_sum(stacked_inputs * action_index_alt, axis=-1)
return inputs, inputs_alt
def select_state(previous_state, new_state, action):
"""Select state given action.
Currently only supports binary action. If action is 0, it means the state is
generated from the large model, and thus we will update the state. Otherwise,
if the action is 1, it means the state is generated from the small model, and
in interleaved model, we skip this state update.
Args:
previous_state: A state tuple representing state from previous step.
new_state: A state tuple representing newly computed state.
action: A tensor the same shape as state.
Returns:
A state tuple selected based on the given action.
"""
action = tf.cast(action, tf.float32)
state_c = previous_state[0] * action + new_state[0] * (1 - action)
state_h = previous_state[1] * action + new_state[1] * (1 - action)
return (state_c, state_h)
|
py
|
1a575b3439d7a8d66043237149a481eee1dad9bb
|
# Random Forest Regression
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 300, random_state = 0)
regressor.fit(X, y)
# Predicting
y_pred = regressor.predict([[ 6.5 ]])
# Visualising
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
|
py
|
1a575c0c92db9cb1714ee80415da7099dc557653
|
from .informacion import *
|
py
|
1a575c651bf07be3e0351d5c76e6a337e5b44611
|
import re
import logging
import uuid
import inspect
import typing
try:
orjson_enabled = True
import orjson as json
except ImportError:
orjson_enabled = False
import json
from enum import Enum
from typing import Dict, Type, Callable, Optional, List, Union, Literal
from nacl.signing import VerifyKey
from nacl.exceptions import BadSignatureError
from fastapi import FastAPI, Request
from fastapi.exceptions import HTTPException
from fastapi.responses import JSONResponse, ORJSONResponse
from pydantic import ValidationError, validate_arguments, constr, conint
from roid.components import (
Component,
ComponentType,
ButtonStyle,
EMOJI_REGEX,
SelectOption,
SelectValue,
InvokeContext,
)
from roid.exceptions import CommandAlreadyExists, ComponentAlreadyExists
from roid.objects import PartialEmoji
from roid.command import CommandType, Command, CommandGroup
from roid.interactions import InteractionType, Interaction
from roid.error_handlers import KNOWN_ERRORS
from roid.response import (
ResponsePayload,
ResponseType,
ResponseData,
Response,
)
from roid.http import HttpHandler
from roid.state import StorageBackend, MultiManagedState, SqliteBackend
from roid.deferred import CommandsBlueprint, DeferredGroupCommand
_log = logging.getLogger("roid-main")
class SlashCommands(FastAPI):
"""
A slash commands application.
This wraps the standard FastAPI class so this can in theory be used to create
a basic general web application around the bot as well. However, the `/` route
is reserved and docs are disabled.
"""
def __init__(
self,
application_id: int,
application_public_key: str,
token: str,
register_commands: bool = False,
state_backend: Optional[StorageBackend] = None,
**extra,
):
"""
A slash commands application.
This wraps the standard FastAPI class so this can in theory be used to create
a basic general web application around the bot as well. However, the `/` route
is reserved and docs are disabled.
Args:
application_id:
The application id obtained from discord.
See (https://discord.com/developers/application) to get this.
application_public_key:
The public key for request verification.
See (https://discord.com/developers/application) to get this.
token:
The bot token, this can be found in the portal at
https://discord.com/developers/applications/656598065532239892/bot.
register_commands:
An optional bool determining if the system automatically registers the
new commands.
Defaults to True.
WARNING: If this is True it will bulk overwrite the existing
application global commands and guild commands.
state_backend:
The given storage backend to use for internal state management
and `SlashCommands.state` calls.
If no backend is given the Sqlite backend is used.
"""
response_class = ORJSONResponse if orjson_enabled else JSONResponse
super().__init__(
**extra,
docs_url=None,
redoc_url=None,
default_response_class=response_class,
)
if state_backend is None:
state_backend = SqliteBackend(f"__internal_managed_state")
self.__state_backend = state_backend
self.__state: Optional[MultiManagedState] = None
self.register_commands = register_commands
self._verify_key = VerifyKey(bytes.fromhex(application_public_key))
self._application_id = application_id
self._token = token
self._global_error_handlers = KNOWN_ERRORS
self._commands: Dict[str, Union[Command, CommandGroup]] = {}
self._components: Dict[str, Component] = {}
self._http: Optional[HttpHandler] = None
# register the internal route and FastAPI internals.
self.post("/", name="Interaction Events")(self.__root)
self.on_event("startup")(self._startup)
self.on_event("shutdown")(self._shutdown)
def register_commands_on_start(self):
self.register_commands = True
@property
def state(self) -> MultiManagedState:
return self.__state
@state.setter
def state(self, _):
if hasattr(self, "_ignored_child"):
raise RuntimeError("state cannot be set at runtime.")
self._ignored_child = True
@property
def application_id(self):
return self._application_id
async def _startup(self):
"""A startup lifetime task invoked by the ASGI server."""
self._http = HttpHandler(self.application_id, self._token)
self.__state = MultiManagedState(backend=self.__state_backend)
await self.__state.startup()
if not self.register_commands:
return
# We can set the globals in bulk.
await self.reload_global_commands()
for command in self._commands.values():
if command.guild_ids is None:
continue
_log.info(
f"Registering command {command.name} for guilds: {command.guild_ids}"
)
await command.register(self)
async def _shutdown(self):
"""A shutdown lifetime task invoked by the ASGI server."""
try:
await self._http.shutdown()
finally:
await self.__state.shutdown()
async def reload_global_commands(self):
"""
Registers all global commands in bulk with Discord.
Note: This will ignore any commands with a `guild_id` or `guild_ids` specified.
"""
_log.debug("registering global commands with discord")
await self._http.register_commands(
[c for c in self._commands.values() if c.guild_ids is None]
)
def register_error(
self,
error: Type[Exception],
callback: Callable[[Exception], ResponsePayload],
):
"""
Registers a given error type to a handler.
This means that if an error is raised by the system that matches the given
exception type the callback will be invoked and it's response sent back.
The traceback is not logged if this is set.
Args:
error:
The error type itself, this must inherit from `Exception`.
callback:
The callback to handle the error and return a response.
"""
if not issubclass(error, Exception):
raise TypeError("error type does not inherit from `Exception`")
self._global_error_handlers[error] = callback
async def __root(self, request: Request):
try:
signature = request.headers["X-Signature-Ed25519"]
timestamp = request.headers["X-Signature-Timestamp"]
body = await request.body()
self._verify_key.verify(
b"%s%s" % (timestamp.encode(), body), bytes.fromhex(signature)
)
except (BadSignatureError, KeyError):
raise HTTPException(status_code=401)
data = json.loads(body)
logging.debug(f"got payload: {data}")
try:
interaction = Interaction(**data)
except ValidationError as e:
_log.warning(f"rejecting response due to {e!r}")
raise HTTPException(status_code=422, detail=e.errors())
if interaction.type == InteractionType.PING:
return {"type": ResponseType.PONG}
elif interaction.type in (
InteractionType.APPLICATION_COMMAND,
InteractionType.APPLICATION_COMMAND_AUTOCOMPLETE,
):
cmd = self._commands.get(interaction.data.name)
if cmd is None:
raise HTTPException(status_code=400, detail="No command found")
DEFAULT_RESPONSE_TYPE = ResponseType.CHANNEL_MESSAGE_WITH_SOURCE
return await self._invoke_with_handlers(
cmd, interaction, DEFAULT_RESPONSE_TYPE, pass_parent=True
)
elif interaction.type == InteractionType.MESSAGE_COMPONENT:
if interaction.data.custom_id is None:
raise HTTPException(status_code=400)
custom_id, *_ = interaction.data.custom_id.split(":", maxsplit=1)
component = self._components.get(custom_id)
if component is None:
raise HTTPException(status_code=400, detail="No component found")
DEFAULT_RESPONSE_TYPE = ResponseType.UPDATE_MESSAGE
return await self._invoke_with_handlers(
component, interaction, DEFAULT_RESPONSE_TYPE
)
raise HTTPException(status_code=400)
async def _invoke_with_handlers(
self,
callback,
interaction: Interaction,
default_response_type: ResponseType,
pass_parent: bool = False,
) -> ResponsePayload:
try:
resp = await callback(self, interaction)
except Exception as e:
handler = self._global_error_handlers.get(type(e))
if handler is None:
raise e from None
resp = handler(e)
args = [default_response_type, resp]
if pass_parent:
args.append(interaction)
resp = await self.process_response(*args)
_log.debug("returning response: %s", resp)
return resp
@validate_arguments(config={"arbitrary_types_allowed": True})
async def process_response(
self,
default_response_type: ResponseType,
response: Union[
None,
ResponsePayload,
Response,
ResponseData,
],
parent_interaction: Optional[Interaction] = None,
) -> ResponsePayload:
"""
Converts any of the possible response types into a ResponsePayload.
This is mostly useful for deferred components and allowing some level
of dynamic handling for users.
Args:
default_response_type:
The default ResponseType to use if the Response object / data
has not been set one.
response:
A given instance of the possible response types to process and
convert.
parent_interaction:
The interaction a given component belongs to.
Returns:
A ResponsePayload instance that has had all deferred components
resolved.
"""
if response is None:
return await Response().into_response_payload(
app=self,
default_type=default_response_type,
parent_interaction=parent_interaction,
)
if isinstance(response, ResponsePayload):
return response
if isinstance(response, Response):
return await response.into_response_payload(
app=self,
default_type=default_response_type,
parent_interaction=parent_interaction,
)
elif isinstance(response, ResponseData):
return ResponsePayload(type=default_response_type, data=response)
raise TypeError(
f"expected either: {ResponsePayload!r}, "
f"{ResponseData!r} or {Response!r} return type."
)
def add_blueprint(self, bp: CommandsBlueprint):
"""
Registers all commands and components linked
to the blueprint with the application.
This resolves all deferred components in the process.
"""
for command in bp._commands: # noqa
command(app=self)
for component in bp._components: # noqa
component(app=self)
@validate_arguments(config={"arbitrary_types_allowed": True})
def group(
self,
name: str,
description: str,
*,
guild_id: int = None,
guild_ids: List[int] = None,
default_permissions: bool = True,
defer_register: bool = False,
group_name: constr(
strip_whitespace=True, regex="[a-zA-Z0-9]+", min_length=1, max_length=30
) = "command",
group_description: constr(
strip_whitespace=True, regex="[a-zA-Z0-9 ]+", min_length=1, max_length=95
) = "Select a sub command to run.",
existing_commands: Dict[str, DeferredGroupCommand] = None, # noqa
):
"""
Registers a command group with the given app.
The description is required.
If either the conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the group command.
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
group_name:
The name of the parameter to label the sub commands group select as.
group_description:
The description of the select option for the sub commands.
"""
cmd = CommandGroup(
app=self,
name=name,
description=description,
application_id=self.application_id,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=not defer_register,
group_name=group_name,
group_description=group_description,
existing_commands=existing_commands,
)
if name in self._commands:
raise CommandAlreadyExists(
f"command with name {name!r} has already been defined and registered"
)
self._commands[name] = cmd
return cmd
@validate_arguments
def command(
self,
name: str,
description: str = None,
*,
type: CommandType = CommandType.CHAT_INPUT, # noqa
guild_id: int = None,
guild_ids: List[int] = None,
default_permissions: bool = True,
defer_register: bool = False,
):
"""
Registers a command with the given app.
If the command type is either `CommandType.MESSAGE` or `CommandType.USER`
there cannot be any description however, if the command type
is `CommandType.CHAT_INPUT` then description is required.
If either of those conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the command. This can only be applied to
`CommandType.CHAT_INPUT` commands.
type:
The type of command. This determines if it's a chat input command,
user context menu command or message context menu command.
defaults to `CommandType.CHAT_INPUT`
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
"""
if type in (CommandType.MESSAGE, CommandType.USER) and description is not None:
raise ValueError(f"only CHAT_INPUT types can have a set description.")
elif type is CommandType.CHAT_INPUT and description is None:
raise ValueError(
f"missing required field 'description' for CHAT_INPUT commands."
)
def wrapper(func):
cmd = Command(
app=self,
callback=func,
name=name,
description=description,
application_id=self.application_id,
cmd_type=type,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=not defer_register,
)
if name in self._commands:
raise CommandAlreadyExists(
f"command with name {name!r} has already been defined and registered"
)
self._commands[name] = cmd
return cmd
return wrapper
@validate_arguments
def button(
self,
label: str,
style: ButtonStyle,
*,
custom_id: Optional[
constr(
strip_whitespace=True, regex="[a-zA-Z0-9]+", min_length=1, max_length=32
)
] = None,
disabled: bool = False,
emoji: constr(strip_whitespace=True, regex=EMOJI_REGEX) = None,
url: Optional[str] = None,
oneshot: bool = False,
):
"""
Attaches a button component to the given command.
Args:
style:
The set button style. This can be any set style however url styles
require the url kwarg and generally would be better off using
the hyperlink helper decorator.
custom_id:
The custom button identifier. If you plan on having long running
persistent buttons that dont require context from their parent command;
e.g. reaction roles. You probably want to set this.
disabled:
If the button should start disabled or not.
label:
The button label / text shown on the button.
emoji:
The set emoji for the button. This should be a custom emoji
not a unicode emoji (use the `label` field for that.)
url:
The hyperlink url, if this is set the function body is not invoked
on click along with the `emoji` and `style` field being ignored.
oneshot:
If set to True this will remove the context from the store as soon
as it's invoked for the first time. This allows you to essentially
create one shot buttons which are invalidated after the first use.
"""
if emoji is not None:
emoji = re.findall(EMOJI_REGEX, emoji)[0]
animated, name, id_ = emoji
emoji = PartialEmoji(id=id_, name=name, animated=bool(animated))
if custom_id is None:
custom_id = str(uuid.uuid4())
if url is not None:
custom_id = None
def wrapper(func):
component = Component(
app=self,
callback=func,
type_=ComponentType.BUTTON,
style=style,
custom_id=custom_id,
disabled=disabled,
label=label,
emoji=emoji,
url=url,
oneshot=oneshot,
)
if url is None:
if custom_id in self._components:
raise ComponentAlreadyExists(
f"component with custom_id {custom_id!r} has "
f"already been defined and registered"
)
self._components[custom_id] = component
return component
return wrapper
@validate_arguments
def select(
self,
*,
custom_id: Optional[
constr(
strip_whitespace=True, regex="[a-zA-Z0-9]+", min_length=1, max_length=32
)
] = None,
disabled: bool = False,
placeholder: str = "Select an option.",
min_values: conint(ge=0, le=25) = 1,
max_values: conint(ge=0, le=25) = 1,
oneshot: bool = False,
):
"""
A select menu component.
This will occupy and entire action row so any components sharing the row
will be rejected (done on a first come first served basis.)
Args:
custom_id:
The custom button identifier. If you plan on having long running
persistent buttons that dont require context from their parent command;
e.g. reaction roles. You probably want to set this.
disabled:
If the button should start disabled or not.
placeholder:
The placeholder text the user sees while the menu is not focused.
min_values:
The minimum number of values the user must select.
max_values:
The maximum number of values the user can select.
oneshot:
If set to True this will remove the context from the store as soon
as it's invoked for the first time. This allows you to essentially
create one shot buttons which are invalidated after the first use.
"""
if custom_id is None:
custom_id = str(uuid.uuid4())
if max_values < min_values:
raise ValueError(
f"the minimum amount of select values cannot be "
f"larger than the max amount of select values."
)
def wrapper(func):
spec = inspect.getfullargspec(func)
for param, hint in spec.annotations.items():
if hint in (Interaction, InvokeContext):
continue
origin = typing.get_origin(hint)
# Needed if it's a multi-valued select.
if origin is not list and max_values != 1 and min_values != 1:
raise TypeError(
f"multi-value selects must be typed as a List[T] rather than T."
)
if origin is list:
inner, *_ = typing.get_args(origin)
if inner is str:
options = []
break
if hint is str:
options = []
break
options = _get_select_options(
typing.get_args(hint)[0] if origin is list else hint
)
if len(options) == 0:
raise ValueError(f"Select options must contain at least one value.")
break
else:
raise TypeError(
"function missing select value parameter and type hints."
)
component = Component(
app=self,
callback=func,
type_=ComponentType.SELECT_MENU,
custom_id=custom_id,
disabled=disabled,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
oneshot=oneshot,
options=options,
options_parameter=param,
)
if custom_id in self._components:
raise ComponentAlreadyExists(
f"component with custom_id {custom_id!r} has already been defined and registered"
)
self._components[custom_id] = component
return component
return wrapper
def _get_select_options(val: typing.Any) -> List[SelectOption]:
option_choices = []
if typing.get_origin(val) is Literal:
for value in typing.get_args(val):
if not isinstance(value, str):
raise TypeError(
f"select options have incompatible types. "
f"Literals must be all type `str`. "
f"Expected type str found {type(value)!r}"
)
option = SelectOption(
label=value,
value=value,
)
if option in option_choices:
raise ValueError(f"select options cannot have duplicate labels.")
option_choices.append(option)
return option_choices
if not issubclass(val, Enum):
raise TypeError(
"invalid type given expected a subclass of Enum or Literal.\n"
"Note: you can hint as type `str` to mark the select as general component. "
"This means you can add options at runtime via component.with_options()."
)
set_type = None
for v in val:
if not isinstance(v.value, (str, SelectValue)):
raise TypeError(
f"select options have incompatible types. "
f"enum must contain all `str` types or `SelectValue` types. "
f"Found {type(v.value)!r}"
)
if (set_type is not None) and (type(v.value) is not set_type):
raise TypeError(
f"enum values must all be the same type. "
f"Expected type: {set_type!r} got {type(v.value)!r}"
)
else:
set_type = type(v.value)
if isinstance(v.value, SelectValue):
value = v.value
option = SelectOption(
label=value.label,
value=value.value,
emoji=value.emoji,
description=value.description,
default=value.default,
)
else:
option = SelectOption(
label=v.value,
value=v.value,
)
if option in option_choices:
raise ValueError(f"select options cannot have duplicate labels.")
option_choices.append(option)
return option_choices
|
py
|
1a575c7fdbd930e26ce789abec7b02f8ea2057fa
|
111
num2 = 222
num3 = 33333333
num3 = 333
num4 = 44444
|
py
|
1a575d1eff722713097fb2ba69928dfd73a91213
|
"""Test the condition helper."""
from logging import ERROR
import pytest
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition
from homeassistant.util import dt
from tests.async_mock import patch
async def test_invalid_condition(hass):
"""Test if invalid condition raises."""
with pytest.raises(HomeAssistantError):
await condition.async_from_config(
hass,
{
"condition": "invalid",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
],
},
)
async def test_and_condition(hass):
"""Test the 'and' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_and_condition_with_template(hass):
"""Test the 'and' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "template",
"value_template": '{{ states.sensor.temperature.state == "100" }}',
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_or_condition(hass):
"""Test the 'or' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "or",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_or_condition_with_template(hass):
"""Test the 'or' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "or",
"conditions": [
{
"condition": "template",
"value_template": '{{ states.sensor.temperature.state == "100" }}',
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_not_condition(hass):
"""Test the 'not' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "not",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature", 101)
assert test(hass)
hass.states.async_set("sensor.temperature", 50)
assert test(hass)
hass.states.async_set("sensor.temperature", 49)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert not test(hass)
async def test_not_condition_with_template(hass):
"""Test the 'or' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "not",
"conditions": [
{
"condition": "template",
"value_template": '{{ states.sensor.temperature.state == "100" }}',
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature", 101)
assert test(hass)
hass.states.async_set("sensor.temperature", 50)
assert test(hass)
hass.states.async_set("sensor.temperature", 49)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert not test(hass)
async def test_time_window(hass):
"""Test time condition windows."""
sixam = dt.parse_time("06:00:00")
sixpm = dt.parse_time("18:00:00")
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=3),
):
assert not condition.time(after=sixam, before=sixpm)
assert condition.time(after=sixpm, before=sixam)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=9),
):
assert condition.time(after=sixam, before=sixpm)
assert not condition.time(after=sixpm, before=sixam)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=15),
):
assert condition.time(after=sixam, before=sixpm)
assert not condition.time(after=sixpm, before=sixam)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=21),
):
assert not condition.time(after=sixam, before=sixpm)
assert condition.time(after=sixpm, before=sixam)
async def test_if_numeric_state_not_raise_on_unavailable(hass):
"""Test numeric_state doesn't raise on unavailable/unknown state."""
test = await condition.async_from_config(
hass,
{"condition": "numeric_state", "entity_id": "sensor.temperature", "below": 42},
)
with patch("homeassistant.helpers.condition._LOGGER.warning") as logwarn:
hass.states.async_set("sensor.temperature", "unavailable")
assert not test(hass)
assert len(logwarn.mock_calls) == 0
hass.states.async_set("sensor.temperature", "unknown")
assert not test(hass)
assert len(logwarn.mock_calls) == 0
async def test_state_multiple_entities(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": ["sensor.temperature_1", "sensor.temperature_2"],
"state": "100",
},
],
},
)
hass.states.async_set("sensor.temperature_1", 100)
hass.states.async_set("sensor.temperature_2", 100)
assert test(hass)
hass.states.async_set("sensor.temperature_1", 101)
hass.states.async_set("sensor.temperature_2", 100)
assert not test(hass)
hass.states.async_set("sensor.temperature_1", 100)
hass.states.async_set("sensor.temperature_2", 101)
assert not test(hass)
async def test_multiple_states(hass):
"""Test with multiple states in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": ["100", "200"],
},
],
},
)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
hass.states.async_set("sensor.temperature", 200)
assert test(hass)
hass.states.async_set("sensor.temperature", 42)
assert not test(hass)
async def test_numeric_state_multiple_entities(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "numeric_state",
"entity_id": ["sensor.temperature_1", "sensor.temperature_2"],
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature_1", 49)
hass.states.async_set("sensor.temperature_2", 49)
assert test(hass)
hass.states.async_set("sensor.temperature_1", 50)
hass.states.async_set("sensor.temperature_2", 49)
assert not test(hass)
hass.states.async_set("sensor.temperature_1", 49)
hass.states.async_set("sensor.temperature_2", 50)
assert not test(hass)
async def test_zone_multiple_entities(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "zone",
"entity_id": ["device_tracker.person_1", "device_tracker.person_2"],
"zone": "zone.home",
},
],
},
)
hass.states.async_set(
"zone.home",
"zoning",
{"name": "home", "latitude": 2.1, "longitude": 1.1, "radius": 10},
)
hass.states.async_set(
"device_tracker.person_1",
"home",
{"friendly_name": "person_1", "latitude": 2.1, "longitude": 1.1},
)
hass.states.async_set(
"device_tracker.person_2",
"home",
{"friendly_name": "person_2", "latitude": 2.1, "longitude": 1.1},
)
assert test(hass)
hass.states.async_set(
"device_tracker.person_1",
"home",
{"friendly_name": "person_1", "latitude": 20.1, "longitude": 10.1},
)
hass.states.async_set(
"device_tracker.person_2",
"home",
{"friendly_name": "person_2", "latitude": 2.1, "longitude": 1.1},
)
assert not test(hass)
hass.states.async_set(
"device_tracker.person_1",
"home",
{"friendly_name": "person_1", "latitude": 2.1, "longitude": 1.1},
)
hass.states.async_set(
"device_tracker.person_2",
"home",
{"friendly_name": "person_2", "latitude": 20.1, "longitude": 10.1},
)
assert not test(hass)
async def test_multiple_zones(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "zone",
"entity_id": "device_tracker.person",
"zone": ["zone.home", "zone.work"],
},
],
},
)
hass.states.async_set(
"zone.home",
"zoning",
{"name": "home", "latitude": 2.1, "longitude": 1.1, "radius": 10},
)
hass.states.async_set(
"zone.work",
"zoning",
{"name": "work", "latitude": 20.1, "longitude": 10.1, "radius": 10},
)
hass.states.async_set(
"device_tracker.person",
"home",
{"friendly_name": "person", "latitude": 2.1, "longitude": 1.1},
)
assert test(hass)
hass.states.async_set(
"device_tracker.person",
"home",
{"friendly_name": "person", "latitude": 20.1, "longitude": 10.1},
)
assert test(hass)
hass.states.async_set(
"device_tracker.person",
"home",
{"friendly_name": "person", "latitude": 50.1, "longitude": 20.1},
)
assert not test(hass)
async def test_extract_entities():
"""Test extracting entities."""
assert condition.async_extract_entities(
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature_2",
"below": 110,
},
{
"condition": "not",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature_3",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature_4",
"below": 110,
},
],
},
{
"condition": "or",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature_5",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature_6",
"below": 110,
},
],
},
{
"condition": "state",
"entity_id": ["sensor.temperature_7", "sensor.temperature_8"],
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": ["sensor.temperature_9", "sensor.temperature_10"],
"below": 110,
},
],
}
) == {
"sensor.temperature",
"sensor.temperature_2",
"sensor.temperature_3",
"sensor.temperature_4",
"sensor.temperature_5",
"sensor.temperature_6",
"sensor.temperature_7",
"sensor.temperature_8",
"sensor.temperature_9",
"sensor.temperature_10",
}
async def test_extract_devices():
"""Test extracting devices."""
assert condition.async_extract_devices(
{
"condition": "and",
"conditions": [
{"condition": "device", "device_id": "abcd", "domain": "light"},
{"condition": "device", "device_id": "qwer", "domain": "switch"},
{
"condition": "state",
"entity_id": "sensor.not_a_device",
"state": "100",
},
{
"condition": "not",
"conditions": [
{
"condition": "device",
"device_id": "abcd_not",
"domain": "light",
},
{
"condition": "device",
"device_id": "qwer_not",
"domain": "switch",
},
],
},
{
"condition": "or",
"conditions": [
{
"condition": "device",
"device_id": "abcd_or",
"domain": "light",
},
{
"condition": "device",
"device_id": "qwer_or",
"domain": "switch",
},
],
},
],
}
) == {"abcd", "qwer", "abcd_not", "qwer_not", "abcd_or", "qwer_or"}
async def test_condition_template_error(hass, caplog):
"""Test invalid template."""
caplog.set_level(ERROR)
test = await condition.async_from_config(
hass, {"condition": "template", "value_template": "{{ undefined.state }}"}
)
assert not test(hass)
assert len(caplog.records) == 1
assert caplog.records[0].message.startswith(
"Error during template condition: UndefinedError:"
)
|
py
|
1a575d2c0b7f0df1557cb5b03555ddd6ea052a0f
|
# Copyright 2018 ZTE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
show_quota_classes = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'quota_class_set': {
'type': 'object',
'properties': {
'id': {'type': 'string', 'format': 'uuid'},
'volumes': {'type': 'integer'},
'snapshots': {'type': 'integer'},
'backups': {'type': 'integer'},
'groups': {'type': 'integer'},
'per_volume_gigabytes': {'type': 'integer'},
'gigabytes': {'type': 'integer'},
'backup_gigabytes': {'type': 'integer'},
},
# for volumes_{volume_type}, etc
"additionalProperties": {'type': 'integer'},
'required': ['id', 'volumes', 'snapshots', 'backups',
'per_volume_gigabytes', 'gigabytes',
'backup_gigabytes'],
}
},
'required': ['quota_class_set']
}
}
update_quota_classes = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'quota_class_set': {
'type': 'object',
'properties': {
'volumes': {'type': 'integer'},
'snapshots': {'type': 'integer'},
'backups': {'type': 'integer'},
'groups': {'type': 'integer'},
'per_volume_gigabytes': {'type': 'integer'},
'gigabytes': {'type': 'integer'},
'backup_gigabytes': {'type': 'integer'},
},
# for volumes_{volume_type}, etc
"additionalProperties": {'type': 'integer'},
'required': ['volumes', 'snapshots', 'backups',
'per_volume_gigabytes', 'gigabytes',
'backup_gigabytes'],
}
},
'required': ['quota_class_set']
}
}
|
py
|
1a575da965da5f254973177944f1249e67f4593c
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-21 16:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20180817_1631'),
]
operations = [
migrations.AddField(
model_name='historicslug',
name='service_name',
field=models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest')], max_length=50, null=True),
),
migrations.AlterUniqueTogether(
name='historicslug',
unique_together=set([('slug', 'service_name')]),
),
]
|
py
|
1a575e485e5eaf4dc6eaa4bb3e05794b56d4fc97
|
import errno
import inspect
import os
import sys
from contextlib import contextmanager
from itertools import repeat
from functools import update_wrapper
from .types import convert_type, IntRange, BOOL
from .utils import (
PacifyFlushWrapper,
make_str,
make_default_short_help,
echo,
get_os_args,
)
from .exceptions import (
ClickException,
UsageError,
BadParameter,
Abort,
MissingParameter,
Exit,
)
from .termui import prompt, confirm, style
from .formatting import HelpFormatter, join_options
from .parser import OptionParser, split_opt
from .globals import push_context, pop_context
from ._compat import PY2, isidentifier, iteritems, string_types
from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
_missing = object()
SUBCOMMAND_METAVAR = "COMMAND [ARGS]..."
SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
DEPRECATED_HELP_NOTICE = " (DEPRECATED)"
DEPRECATED_INVOKE_NOTICE = (
"DeprecationWarning: " + "The command %(name)s is deprecated."
)
def _maybe_show_deprecated_notice(cmd):
if cmd.deprecated:
echo(style(DEPRECATED_INVOKE_NOTICE % {"name": cmd.name}, fg="red"), err=True)
def fast_exit(code):
"""Exit without garbage collection, this speeds up exit by about 10ms for
things like bash completion.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = "_%s_COMPLETE" % (prog_name.replace("-", "_")).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
fast_exit(1)
def _check_multicommand(base_command, cmd_name, cmd, register=False):
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = "It is not possible to add multi commands as children to " "another multi command that is in chain mode"
else:
hint = "Found a multi command as subcommand to a multi command " "that is in chain mode. This is not supported"
raise RuntimeError(
'%s. Command "%s" is set to chain and "%s" was '
"added as subcommand but it in itself is a "
'multi command. ("%s" is a %s within a chained '
'%s named "%s").'
% (
hint,
base_command.name,
cmd_name,
cmd_name,
cmd.__class__.__name__,
base_command.__class__.__name__,
base_command.name,
)
)
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, "__code__", None)
args = getattr(code, "co_argcount", 3)
if args < 3:
# This will become a warning in Click 3.0:
from warnings import warn
warn(
Warning(
'Invoked legacy parameter callback "%s". The new '
"signature for such callbacks starting with "
"click 2.0 is (ctx, param, value)." % callback
),
stacklevel=3,
)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions that
fly.
"""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float("inf")
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. Default values will also be
ignored. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(
self,
command,
parent=None,
info_name=None,
obj=None,
auto_envvar_prefix=None,
default_map=None,
terminal_width=None,
max_content_width=None,
resilient_parsing=False,
allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None,
help_option_names=None,
token_normalize_func=None,
color=None,
):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, "meta", {})
#: A dictionary (-like object) with defaults for parameters.
if (
default_map is None
and parent is not None
and parent.default_map is not None
):
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ["--help"]
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures and default values
#: will be ignored. Useful for completion.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if (
parent is not None
and parent.auto_envvar_prefix is not None
and self.info_name is not None
):
auto_envvar_prefix = "%s_%s" % (
parent.auto_envvar_prefix,
self.info_name.upper(),
)
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utilities can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = __name__ + '.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(
width=self.terminal_width, max_width=self.max_content_width
)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ""
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = self.parent.command_path + " " + rv
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
raise Exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs):
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError(
"The given command does not have a " "callback that can be invoked."
)
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs):
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError("Callback is not a command.")
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
class BaseCommand(object):
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def get_usage(self, ctx):
raise NotImplementedError("Base commands cannot get usage")
def get_help(self, ctx):
raise NotImplementedError("Base commands cannot get help")
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError(
"Base commands do not know how to parse " "arguments."
)
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError("Base commands are not invokable by default")
def main(
self,
args=None,
prog_name=None,
complete_var=None,
standalone_mode=True,
**extra
):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point or reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env()
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args()
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(sys.argv and sys.argv[0] or __file__))
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except IOError as e:
if e.errno == errno.EPIPE:
sys.stdout = PacifyFlushWrapper(sys.stdout)
sys.stderr = PacifyFlushWrapper(sys.stderr)
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo("Aborted!", file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
:param hidden: hide this command from help outputs.
:param deprecated: issues a message indicating that
the command is deprecated.
"""
def __init__(
self,
name,
context_settings=None,
callback=None,
params=None,
help=None,
epilog=None,
short_help=None,
options_metavar="[OPTIONS]",
add_help_option=True,
hidden=False,
deprecated=False,
):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
# if a form feed (page break) is found in the help text, truncate help
# text to the content preceding the first form feed
if help and "\f" in help:
help = help.split("\f", 1)[0]
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
self.short_help = short_help
self.add_help_option = add_help_option
self.hidden = hidden
self.deprecated = deprecated
def get_usage(self, ctx):
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter."""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, " ".join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help="Show this message and exit.",
)
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it. This creates a
formatter and will call into the following formatting methods:
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_short_help_str(self, limit=45):
"""Gets short help for the command or makes it by shortening the long help string."""
return (
self.short_help
or self.help
and make_default_short_help(self.help, limit)
or ""
)
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
help_text = self.help
if self.deprecated:
help_text += DEPRECATED_HELP_NOTICE
formatter.write_text(help_text)
elif self.deprecated:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(DEPRECATED_HELP_NOTICE)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail(
"Got unexpected extra argument%s (%s)"
% (len(args) != 1 and "s" or "", " ".join(map(make_str, args)))
)
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
_maybe_show_deprecated_notice(self)
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name=None,
invoke_without_command=False,
no_args_is_help=None,
subcommand_metavar=None,
chain=False,
result_callback=None,
**attrs
):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot "
"have optional arguments."
)
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail("Missing command.")
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = args and "*" or None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail('No such command "%s".' % original_cmd_name)
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError("Command has no name.")
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter(object):
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. In Click 2.0, the old callback format will still work,
but it will raise a warning to give you change to migrate the
code easier.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value. Before Click
2.0, the signature was ``(ctx, value)``.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls=None,
type=None,
required=False,
default=None,
callback=None,
nargs=None,
metavar=None,
expose_value=True,
is_eager=False,
envvar=None,
autocompletion=None,
):
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self.autocompletion = autocompletion
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = self.value_from_envvar(ctx)
if value is None:
value = ctx.lookup_default(self.name)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError(
"Attempted to invoke composite type "
"but nargs has been set to %s. This is "
"not supported; nargs needs to be set to "
"a fixed value > 1." % self.nargs
)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None and not ctx.resilient_parsing:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
return os.environ.get(self.envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
def get_error_hint(self, ctx):
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join('"%s"' % x for x in hint_list)
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown. If this
value is a string, it shows the string instead of the
value. This is particularly useful for dynamic options.
:param show_envvar: controls if an environment variable should be shown on
the help page. Normally, environment variables
are not shown.
:param prompt: if set to `True` or a non empty string then the user will be
prompted for input. If set to `True` the prompt will be the
option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
:param hidden: hide this option from help outputs.
"""
param_type_name = "option"
def __init__(
self,
param_decls=None,
show_default=False,
prompt=False,
confirmation_prompt=False,
hide_input=False,
is_flag=None,
flag_value=None,
multiple=False,
count=False,
allow_from_autoenv=True,
type=None,
help=None,
hidden=False,
show_choices=True,
show_envvar=False,
**attrs
):
default_is_missing = attrs.get("default", _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace("_", " ").capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
self.hidden = hidden
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) and type is None:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError("Options cannot have nargs < 0")
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError("Cannot prompt for flags that are not bools.")
if not self.is_bool_flag and self.secondary_opts:
raise TypeError("Got secondary option for non boolean flag.")
if self.is_bool_flag and self.hide_input and self.prompt is not None:
raise TypeError(
"Hidden input does not work with boolean " "flag prompts."
)
if self.count:
if self.multiple:
raise TypeError(
"Options cannot be multiple and count " "at the same time."
)
elif self.is_flag:
raise TypeError(
"Options cannot be count and flags at " "the same time."
)
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if isidentifier(decl):
if name is not None:
raise TypeError("Name defined twice")
name = decl
else:
split_char = decl[:1] == "/" and ";" or "/"
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: -len(x[0])) # group long options first
name = possible_names[0][1].replace("-", "_").lower()
if not isidentifier(name):
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError("Could not determine name for option")
if not opts and not secondary_opts:
raise TypeError(
"No options defined but a name was passed (%s). "
"Did you mean to declare an argument instead "
"of an option?" % name
)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {"dest": self.name, "nargs": self.nargs, "obj": self}
if self.multiple:
action = "append"
elif self.count:
action = "count"
else:
action = "store"
if self.is_flag:
kwargs.pop("nargs", None)
if self.is_bool_flag and self.secondary_opts:
parser.add_option(
self.opts, action=action + "_const", const=True, **kwargs
)
parser.add_option(
self.secondary_opts, action=action + "_const", const=False, **kwargs
)
else:
parser.add_option(
self.opts, action=action + "_const", const=self.flag_value, **kwargs
)
else:
kwargs["action"] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
if self.hidden:
return
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += " " + self.make_metavar()
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = "%s_%s" % (ctx.auto_envvar_prefix, self.name.upper())
if envvar is not None:
extra.append(
"env var: %s"
% (
", ".join("%s" % d for d in envvar)
if isinstance(envvar, (list, tuple))
else envvar,
)
)
if self.default is not None and self.show_default:
if isinstance(self.show_default, string_types):
default_string = "({})".format(self.show_default)
elif isinstance(self.default, (list, tuple)):
default_string = ", ".join("%s" % d for d in self.default)
elif inspect.isfunction(self.default):
default_string = "(dynamic)"
else:
default_string = self.default
extra.append("default: {}".format(default_string))
if self.required:
extra.append("required")
if extra:
help = "%s[%s]" % (help and help + " " or "", "; ".join(extra))
return ((any_prefix_is_slash and "; " or " / ").join(rv), help)
def get_default(self, ctx):
# If we're a non boolean flag out default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(
self.prompt,
default=default,
type=self.type,
hide_input=self.hide_input,
show_choices=self.show_choices,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x),
)
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = "%s_%s" % (ctx.auto_envvar_prefix, self.name.upper())
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = "argument"
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get("default") is not None:
required = False
else:
required = attrs.get("nargs", 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
if self.default is not None and self.nargs < 0:
raise TypeError(
"nargs=-1 in combination with a default value " "is not supported."
)
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.type.get_metavar(self)
if not var:
var = self.name.upper()
if not self.required:
var = "[%s]" % var
if self.nargs != 1:
var += "..."
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError("Could not determine name for argument")
if len(decls) == 1:
name = arg = decls[0]
name = name.replace("-", "_").lower()
else:
raise TypeError(
"Arguments take exactly one "
"parameter declaration, got %d" % len(decls)
)
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def get_error_hint(self, ctx):
return '"%s"' % self.make_metavar()
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
# Circular dependency between decorators and core
from .decorators import command, group
|
py
|
1a575ebbce7db6718dab9a37fd80014f1958c2b7
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from app.main.config import configurations
# Initialize SQLAlchemy database
db = SQLAlchemy()
def create_app(config):
# Check if configuration is valid
if config not in configurations:
raise ValueError(f'{config} is not a valid configuration.')
# Create Flask application and initialize SQLAlchemy with the application instance
app = Flask(__name__)
app.config.from_object(configurations[config])
db.init_app(app)
return app
|
py
|
1a575ed929fed250b57dd4076d7987edb9733634
|
"""
Django settings for pi_server project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import logging
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('..') / '.env'
load_dotenv(dotenv_path=str(env_path))
logger = logging.getLogger(__name__)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
security_key = os.getenv('DJANGO_SECURITY_KEY')
# We don't completely kill the server, in order to make this more friendly as a whole
if not security_key:
logger.warning("""
WARNING: You need to include a "DJANGO_SECURITY_KEY in the .env file.
Please go here to learn more about the .env file and how to properly fill it:
https://github.com/eceusc/project-vending-machine/web_server/README.md#configuration
""")
SECRET_KEY = 'WARNING_CHANGE_ME'
else:
SECRET_KEY = security_key
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pi_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pi_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
py
|
1a576063d31a3d4ef64e0598a2b0e6186ae5cc88
|
from src.data.github.auth.main import authenticate
__all__ = ["authenticate"]
|
py
|
1a5760f9c2e25742e6a68174a2e1797f30088f0b
|
from tkinter import*
raiz=Tk()
import psycopg2
from bd import conexion
import cv2
from datetime import datetime
import time
cap = cv2.VideoCapture(0)
detector = cv2.QRCodeDetector()
control='u'
#se declara un fram dentro de la ventana con dimenciones
miFrame=Frame(raiz,width=1200, height=600)
#se empaqueta
miFrame.pack()
snombre=StringVar()
sapellido=StringVar()
scedula=StringVar()
sfecha=StringVar()
#se declara un cuadro de texto
NombreBox=Entry(miFrame, textvariable=snombre)
NombreBox.grid(row=0, column=1, padx=10, pady=10)
#se declara un cuadro de texto
ApellidoBox=Entry(miFrame,textvariable=sapellido)
ApellidoBox.grid(row=1, column=1, padx=10, pady=10)
#se declara un cuadro de texto
CedulaBox=Entry(miFrame, textvariable=scedula)
CedulaBox.grid(row=2, column=1, padx=10, pady=10)
#se declara un cuadro de texto
FechaBox=Entry(miFrame, textvariable=sfecha)
FechaBox.grid(row=3, column=1, padx=10, pady=10)
#se declara una etiqueta
NombreLabel= Label(miFrame, text="Nombre:")
NombreLabel.grid(row=0, column=0, sticky="e", padx=10, pady=10)
#se declara una etiqueta
ApellidoLabel= Label(miFrame, text="Apellido:")
ApellidoLabel.grid(row=1, column=0, sticky="e", padx=10, pady=10)
#se declara una etiqueta
CedulaLabel= Label(miFrame, text="Cedula:")
CedulaLabel.grid(row=2, column=0, sticky="e", padx=10, pady=10)
#se declara una etiqueta
FechaLabel= Label(miFrame, text="Fecha:")
FechaLabel.grid(row=3, column=0, sticky="e", padx=10, pady=10)
def codigoBoton():
while True:
_, img = cap.read()
data, bbox, _ = detector.detectAndDecode(img)
if(bbox is not None):
for i in range(len(bbox)):
cv2.line(img, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255,
0, 255), thickness=2)
cv2.putText(img, data, (int(bbox[0][0][0]), int(bbox[0][0][1]) - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
if data:
if(control!=data):
try:
with conexion.cursor() as cursor:
consulta = "SELECT nombre, apellido FROM datos WHERE cedula = %s;"
cursor.execute(consulta, (data,))
# Con fetchall traemos todas las filas
datos = cursor.fetchall()
#print(datos)
# Recorrer e imprimir
#for dato in datos:
#print(dato)
except psycopg2.Error as e:
print("Ocurrió un error al consultar con where: ", e)
if datos:
tiempo=str(datetime.now())
for dato in datos:
snombre.set(dato[0])
sapellido.set(dato[1])
scedula.set(data)
sfecha.set(tiempo)
print( data, dato[0], tiempo)
else:
print("No registrado")
snombre.set("No registrado")
control=data
#cv2.imshow("Scanner QR", img)
#if(cv2.waitKey(1) == ord("q")):
break
botonEnvio=Button(raiz, text="Enviar", command=codigoBoton)
botonEnvio.pack()
conexion.close()
#cap.release()
#cv2.destroyAllWindows()
raiz.mainloop()
|
py
|
1a57616474eb326b8e4d4a71f2999cd6599e5d21
|
#-----------------------------------------------------------------------------
# consumer_charwallet.py
# https://github.com/brentnowak/spotmarket
#-----------------------------------------------------------------------------
# Version: 0.1
# - Initial release
#-----------------------------------------------------------------------------
from _charwallet import *
from time import sleep
import sys
import evelink.char
import evelink.api
import evelink.parsing.wallet_transactions
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
# Suppress InsecurePlatformWarning messages
def main():
service = "consumer_charwallet.py"
# Get characters with walletEnabled = 1
characters = json.loads(getcharacters())
for character in characters:
characterName = character['characterName']
characterID = character['characterID']
walletID = character['walletID']
keyID = character['keyID']
vCode = character['vCode']
api_key = (keyID, vCode)
eveapi = evelink.api.API(base_url='api.eveonline.com', api_key=api_key)
charapi = evelink.char.Char(characterID, eveapi)
charresponse = charapi.wallet_transactions()
charresponse = charresponse[0]
insertcount = 0
for row in charresponse:
transactionDateTime = row['timestamp']
transactionDateTime = arrow.get(transactionDateTime)
transactionDateTime = transactionDateTime.format('YYYY-MM-DD HH:mm:ss')
transactionID = row['id']
quantity = row['quantity']
typeName = row['type']['name']
typeID = row['type']['id']
price = row['price']
clientID = row['client']['id']
clientName = row['client']['name']
walletID = walletID
stationID = row['station']['id']
#stationName = row['station']['name'] Not used
transactionType = row['action']
transactionFor = row['for']
journalTransactionID = row['journal_id']
personal = 0 # TODO allow user to true/false switch items for personal use
profit = 0 # TODO profit calculations based on a first in/first out movement if items in a inventory table
insertcount += insertwallettransaction(transactionDateTime, transactionID, quantity, typeName, typeID, price, clientID,
clientName, characterID, stationID, transactionType, personal, profit, transactionFor, journalTransactionID)
detail = "[character:" + str(characterName) + "][insert:" + str(insertcount) + "]"
timestamp = arrow.utcnow().format('YYYY-MM-DD HH:mm:ss')
insertlog(service, 0, detail, timestamp)
if __name__ == "__main__":
main()
# Sleep for 1 hour + extra before ending and triggering another run via supervisor
print("[Completed Run:Sleeping for 1 Hour]")
sys.stdout.flush()
sleep(3900)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.