ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5d738403e70168a84e389fd0dbc42ca23f4480
|
import validator.BaseValidator as BaseValidator
from builtins import str
from validate_email import validate_email
class EmailValidator(BaseValidator.BaseValidator):
message = "Value is not correct email address"
def validate(self, value):
#possible null values
if value is None:
return True
value = super(EmailValidator, self).validate(value)
if type(value) is str:
return validate_email(value)
return False
def __init__(self, params):
super(EmailValidator, self).__init__(params)
|
py
|
1a5d73abeccf3c9128120c56e6b8f29df7029e17
|
import click
from chia import __version__
from chia.cmds.configure import configure_cmd
from chia.cmds.farm import farm_cmd
from chia.cmds.init import init_cmd
from chia.cmds.keys import keys_cmd
from chia.cmds.netspace import netspace_cmd
from chia.cmds.plots import plots_cmd
from chia.cmds.show import show_cmd
from chia.cmds.start import start_cmd
from chia.cmds.stop import stop_cmd
from chia.cmds.wallet import wallet_cmd
from chia.cmds.plotnft import plotnft_cmd
from chia.util.default_root import DEFAULT_ROOT_PATH
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
def monkey_patch_click() -> None:
# this hacks around what seems to be an incompatibility between the python from `pyinstaller`
# and `click`
#
# Not 100% sure on the details, but it seems that `click` performs a check on start-up
# that `codecs.lookup(locale.getpreferredencoding()).name != 'ascii'`, and refuses to start
# if it's not. The python that comes with `pyinstaller` fails this check.
#
# This will probably cause problems with the command-line tools that use parameters that
# are not strict ascii. The real fix is likely with the `pyinstaller` python.
import click.core
click.core._verify_python3_env = lambda *args, **kwargs: 0 # type: ignore
@click.group(
help=f"\n Manage lucky blockchain infrastructure ({__version__})\n",
epilog="Try 'lucky start node', 'lucky netspace -d 192', or 'lucky show -s'",
context_settings=CONTEXT_SETTINGS,
)
@click.option("--root-path", default=DEFAULT_ROOT_PATH, help="Config file root", type=click.Path(), show_default=True)
@click.pass_context
def cli(ctx: click.Context, root_path: str) -> None:
from pathlib import Path
ctx.ensure_object(dict)
ctx.obj["root_path"] = Path(root_path)
@cli.command("version", short_help="Show lucky version")
def version_cmd() -> None:
print(__version__)
@cli.command("run_daemon", short_help="Runs lucky daemon")
@click.pass_context
def run_daemon_cmd(ctx: click.Context) -> None:
from chia.daemon.server import async_run_daemon
import asyncio
asyncio.get_event_loop().run_until_complete(async_run_daemon(ctx.obj["root_path"]))
cli.add_command(keys_cmd)
cli.add_command(plots_cmd)
cli.add_command(wallet_cmd)
cli.add_command(plotnft_cmd)
cli.add_command(configure_cmd)
cli.add_command(init_cmd)
cli.add_command(show_cmd)
cli.add_command(start_cmd)
cli.add_command(stop_cmd)
cli.add_command(netspace_cmd)
cli.add_command(farm_cmd)
def main() -> None:
monkey_patch_click()
cli() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
|
py
|
1a5d746539de6602934c20eee1a303ff2b4948aa
|
from pytorch_lightning.strategies.ddp import DDPStrategy # noqa: F401
from pytorch_lightning.strategies.ddp2 import DDP2Strategy # noqa: F401
from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy # noqa: F401
from pytorch_lightning.strategies.deepspeed import DeepSpeedStrategy # noqa: F401
from pytorch_lightning.strategies.dp import DataParallelStrategy # noqa: F401
from pytorch_lightning.strategies.fully_sharded import DDPFullyShardedStrategy # noqa: F401
from pytorch_lightning.strategies.horovod import HorovodStrategy # noqa: F401
from pytorch_lightning.strategies.parallel import ParallelStrategy # noqa: F401
from pytorch_lightning.strategies.sharded import DDPShardedStrategy # noqa: F401
from pytorch_lightning.strategies.sharded_spawn import DDPSpawnShardedStrategy # noqa: F401
from pytorch_lightning.strategies.single_device import SingleDeviceStrategy # noqa: F401
from pytorch_lightning.strategies.single_tpu import SingleTPUStrategy # noqa: F401
from pytorch_lightning.strategies.strategy import Strategy # noqa: F401
from pytorch_lightning.strategies.tpu_spawn import TPUSpawnStrategy # noqa: F401
|
py
|
1a5d74be539aaab31b13ba766f9443bac09fe07e
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import numpy as np
import time
import pycocotools.mask as mask_util
import paddlex.utils.logging as logging
from paddlex.utils import is_pic
from .det_metrics.coco_utils import loadRes
def visualize_detection(image,
result,
threshold=0.5,
save_dir='./',
color=None):
"""
Visualize bbox and mask results
"""
if isinstance(image, np.ndarray):
image_name = str(int(time.time() * 1000)) + '.jpg'
else:
image_name = os.path.split(image)[-1]
image = cv2.imread(image)
image = draw_bbox_mask(image, result, threshold=threshold, color_map=color)
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
out_path = os.path.join(save_dir, 'visualize_{}'.format(image_name))
cv2.imwrite(out_path, image)
logging.info('The visualized result is saved at {}'.format(out_path))
else:
return image
def visualize_segmentation(image,
result,
weight=0.6,
save_dir='./',
color=None):
"""
Convert segment result to color image, and save added image.
Args:
image: the path of origin image
result: the predict result of image
weight: the image weight of visual image, and the result weight is (1 - weight)
save_dir: the directory for saving visual image
color: the list of a BGR-mode color for each label.
"""
label_map = result['label_map']
color_map = get_color_map_list(256)
if color is not None:
for i in range(len(color) // 3):
color_map[i] = color[i * 3:(i + 1) * 3]
color_map = np.array(color_map).astype("uint8")
# Use OpenCV LUT for color mapping
c1 = cv2.LUT(label_map, color_map[:, 0])
c2 = cv2.LUT(label_map, color_map[:, 1])
c3 = cv2.LUT(label_map, color_map[:, 2])
pseudo_img = np.dstack((c1, c2, c3))
if isinstance(image, np.ndarray):
im = image
image_name = str(int(time.time() * 1000)) + '.jpg'
if image.shape[2] != 3:
logging.info(
"The image is not 3-channel array, so predicted label map is shown as a pseudo color image."
)
weight = 0.
else:
image_name = os.path.split(image)[-1]
if not is_pic(image):
logging.info(
"The image cannot be opened by opencv, so predicted label map is shown as a pseudo color image."
)
image_name = image_name.split('.')[0] + '.jpg'
weight = 0.
else:
im = cv2.imread(image)
if abs(weight) < 1e-5:
vis_result = pseudo_img
else:
vis_result = cv2.addWeighted(im, weight,
pseudo_img.astype(im.dtype), 1 - weight,
0)
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
out_path = os.path.join(save_dir, 'visualize_{}'.format(image_name))
cv2.imwrite(out_path, vis_result)
logging.info('The visualized result is saved as {}'.format(out_path))
else:
return vis_result
def get_color_map_list(num_classes):
""" Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.
Args:
num_classes: Number of classes
Returns:
The color map
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
# expand an array of boxes by a given scale.
def expand_boxes(boxes, scale):
"""
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def clip_bbox(bbox):
xmin = max(min(bbox[0], 1.), 0.)
ymin = max(min(bbox[1], 1.), 0.)
xmax = max(min(bbox[2], 1.), 0.)
ymax = max(min(bbox[3], 1.), 0.)
return xmin, ymin, xmax, ymax
def draw_bbox_mask(image, results, threshold=0.5, color_map=None):
_SMALL_OBJECT_AREA_THRESH = 1000
height, width = image.shape[:2]
default_font_scale = max(np.sqrt(height * width) // 900, .5)
linewidth = max(default_font_scale / 40, 2)
labels = list()
for dt in results:
if dt['category'] not in labels:
labels.append(dt['category'])
if color_map is None:
color_map = get_color_map_list(len(labels) + 2)[2:]
else:
color_map = np.asarray(color_map)
if color_map.shape[0] != len(labels) or color_map.shape[1] != 3:
raise Exception(
"The shape for color_map is required to be {}x3, but recieved shape is {}x{}.".
format(len(labels), color_map.shape))
if np.max(color_map) > 255 or np.min(color_map) < 0:
raise ValueError(
" The values in color_map should be within 0-255 range.")
keep_results = []
areas = []
for dt in results:
cname, bbox, score = dt['category'], dt['bbox'], dt['score']
if score < threshold:
continue
keep_results.append(dt)
areas.append(bbox[2] * bbox[3])
areas = np.asarray(areas)
sorted_idxs = np.argsort(-areas).tolist()
keep_results = [keep_results[k]
for k in sorted_idxs] if keep_results else []
for dt in keep_results:
cname, bbox, score = dt['category'], dt['bbox'], dt['score']
bbox = list(map(int, bbox))
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
color = tuple(color_map[labels.index(cname)])
# draw bbox
image = cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color,
linewidth)
# draw mask
if 'mask' in dt:
mask = mask_util.decode(dt['mask']) * 255
image = image.astype('float32')
alpha = .7
w_ratio = .4
color_mask = np.asarray(color, dtype=np.int)
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
image[idx[0], idx[1], :] *= 1.0 - alpha
image[idx[0], idx[1], :] += alpha * color_mask
image = image.astype("uint8")
contours = cv2.findContours(
mask.astype("uint8"), cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_NONE)[-2]
image = cv2.drawContours(
image,
contours,
contourIdx=-1,
color=color,
thickness=1,
lineType=cv2.LINE_AA)
# draw label
text_pos = (xmin, ymin)
instance_area = w * h
if (instance_area < _SMALL_OBJECT_AREA_THRESH or h < 40):
if ymin >= height - 5:
text_pos = (xmin, ymin)
else:
text_pos = (xmin, ymax)
height_ratio = h / np.sqrt(height * width)
font_scale = (np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2,
2) * 0.5 * default_font_scale)
text = "{} {:.2f}".format(cname, score)
(tw, th), baseline = cv2.getTextSize(
text,
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=font_scale,
thickness=1)
image = cv2.rectangle(
image,
text_pos, (text_pos[0] + tw, text_pos[1] + th + baseline),
color=color,
thickness=-1)
image = cv2.putText(
image,
text, (text_pos[0], text_pos[1] + th),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=font_scale,
color=(255, 255, 255),
thickness=1,
lineType=cv2.LINE_AA)
return image
def draw_pr_curve(eval_details_file=None,
gt=None,
pred_bbox=None,
pred_mask=None,
iou_thresh=0.5,
save_dir='./'):
if eval_details_file is not None:
import json
with open(eval_details_file, 'r') as f:
eval_details = json.load(f)
pred_bbox = eval_details['bbox']
if 'mask' in eval_details:
pred_mask = eval_details['mask']
gt = eval_details['gt']
if gt is None or pred_bbox is None:
raise Exception(
"gt/pred_bbox/pred_mask is None now, please set right eval_details_file or gt/pred_bbox/pred_mask."
)
if pred_bbox is not None and len(pred_bbox) == 0:
raise Exception("There is no predicted bbox.")
if pred_mask is not None and len(pred_mask) == 0:
raise Exception("There is no predicted mask.")
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco = COCO()
coco.dataset = gt
coco.createIndex()
def _summarize(coco_gt, ap=1, iouThr=None, areaRng='all', maxDets=100):
p = coco_gt.params
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = coco_gt.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = coco_gt.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
return mean_s
def cal_pr(coco_gt, coco_dt, iou_thresh, save_dir, style='bbox'):
coco_dt = loadRes(coco_gt, coco_dt)
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.params.iouThrs = np.linspace(
iou_thresh, iou_thresh, 1, endpoint=True)
coco_eval.evaluate()
coco_eval.accumulate()
stats = _summarize(coco_eval, iouThr=iou_thresh)
catIds = coco_gt.getCatIds()
if len(catIds) != coco_eval.eval['precision'].shape[2]:
raise Exception(
"The category number must be same as the third dimension of precisions."
)
x = np.arange(0.0, 1.01, 0.01)
color_map = get_color_map_list(256)[1:256]
plt.subplot(1, 2, 1)
plt.title(style + " precision-recall IoU={}".format(iou_thresh))
plt.xlabel("recall")
plt.ylabel("precision")
plt.xlim(0, 1.01)
plt.ylim(0, 1.01)
plt.grid(linestyle='--', linewidth=1)
plt.plot([0, 1], [0, 1], 'r--', linewidth=1)
my_x_ticks = np.arange(0, 1.01, 0.1)
my_y_ticks = np.arange(0, 1.01, 0.1)
plt.xticks(my_x_ticks, fontsize=5)
plt.yticks(my_y_ticks, fontsize=5)
for idx, catId in enumerate(catIds):
pr_array = coco_eval.eval['precision'][0, :, idx, 0, 2]
precision = pr_array[pr_array > -1]
ap = np.mean(precision) if precision.size else float('nan')
nm = coco_gt.loadCats(catId)[0]['name'] + ' AP={:0.2f}'.format(
float(ap * 100))
color = tuple(color_map[idx])
color = [float(c) / 255 for c in color]
color.append(0.75)
plt.plot(x, pr_array, color=color, label=nm, linewidth=1)
plt.legend(loc="lower left", fontsize=5)
plt.subplot(1, 2, 2)
plt.title(style + " score-recall IoU={}".format(iou_thresh))
plt.xlabel('recall')
plt.ylabel('score')
plt.xlim(0, 1.01)
plt.ylim(0, 1.01)
plt.grid(linestyle='--', linewidth=1)
plt.xticks(my_x_ticks, fontsize=5)
plt.yticks(my_y_ticks, fontsize=5)
for idx, catId in enumerate(catIds):
nm = coco_gt.loadCats(catId)[0]['name']
sr_array = coco_eval.eval['scores'][0, :, idx, 0, 2]
color = tuple(color_map[idx])
color = [float(c) / 255 for c in color]
color.append(0.75)
plt.plot(x, sr_array, color=color, label=nm, linewidth=1)
plt.legend(loc="lower left", fontsize=5)
plt.savefig(
os.path.join(
save_dir,
"./{}_pr_curve(iou-{}).png".format(style, iou_thresh)),
dpi=800)
plt.close()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cal_pr(coco, pred_bbox, iou_thresh, save_dir, style='bbox')
if pred_mask is not None:
cal_pr(coco, pred_mask, iou_thresh, save_dir, style='segm')
|
py
|
1a5d756a4d7cda9738026f88bba108edd2fc4907
|
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
# noinspection SpellCheckingInspection
setuptools.setup(
name='hellpy',
license='MIT',
version='1.1.0',
python_requires=">=3.6",
author='Manan (mentix02)',
long_description=long_description,
description='A connector for HellDB.',
author_email='[email protected]',
packages=['hellpy', 'hellpy.structures'],
url='https://github.com/helldatabase/hellpy',
long_description_content_type='text/markdown',
classifiers=[
"Topic :: Database",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
)
|
py
|
1a5d757c8c27f94b07839f790498cf4a0583e119
|
from settings import DB_URL, DB_USER, DB_PASSWORD, DB_NAME
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
class GenericModel(object):
def __init__(self, class_name):
self.base = automap_base()
self.engine = create_engine(
"mysql+pymyql://{0}:{1}@{2}/{3}".format(
DB_USER, DB_PASSWORD, DB_URL, DB_NAME
)
)
self.base.prepare(self.engine, reflect=True)
self.this_class = self.base.classes.class_name
def get_fields(self, class_name):
return self.this_class.__dict__
|
py
|
1a5d75e4e0682e7f155baff0698ce3938488fdd4
|
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
cap = cv2.VideoCapture(0)
count = 0
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame, (1024, 768))
cv2.imshow("capture", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('s'):
cv2.imwrite('./image/'+str(count) + '.jpg', frame)
count = count + 1
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
py
|
1a5d7604dec377a24b1f6e7fe549889a85d27111
|
# LANGUAGE: Python
# AUTHOR: Ajay Bairwa
# GITHUB: https://github.com/bairwa25
print('Hello Hacktober2020')
|
py
|
1a5d7778f0eec41c51583791e352b14dee1903c4
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3], redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 TGIF from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 TGIF in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True, False, True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True, False, True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 TGIF normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.00001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 TGIF with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 TGIF
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 TGIF with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3], redirect_stderr=True))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
# set lower ancestor limit for later
self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
stop_node(self.nodes[0],0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
|
py
|
1a5d77c94fb55e3d3c37ff2136ed2fd4ccad4a01
|
from collections import OrderedDict
import math
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
def load_weights_sequential(target, source_state):
new_dict = OrderedDict()
for (k1, v1), (k2, v2) in zip(target.state_dict().items(), source_state.items()):
new_dict[k1] = v2
target.load_state_dict(new_dict)
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation,
padding=dilation, bias=False)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers=(3, 4, 23, 3)):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False)
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x_3 = self.layer3(x)
x = self.layer4(x_3)
return x, x_3
def resnet18(pretrained=False):
model = ResNet(BasicBlock, [2, 2, 2, 2])
return model
def resnet34(pretrained=False):
model = ResNet(BasicBlock, [3, 4, 6, 3])
return model
def resnet50(pretrained=False):
model = ResNet(Bottleneck, [3, 4, 6, 3])
return model
def resnet101(pretrained=False):
model = ResNet(Bottleneck, [3, 4, 23, 3])
return model
def resnet152(pretrained=False):
model = ResNet(Bottleneck, [3, 8, 36, 3])
return model
|
py
|
1a5d78f0eeb786e0f1731a723f8b73f385ab18ac
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import values as ds_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import callbacks as callbacks_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import compile_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine import network
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer as lso
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils import version_utils
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.profiler import trace
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import keras_export
_keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras',
'keras api usage', 'method')
def enable_multi_worker(method):
"""Decorator that handles running `method` with multi-worker strategy."""
def _method_wrapper(self, *args, **kwargs):
if not self._in_multi_worker_mode(): # pylint: disable=protected-access
return method(self, *args, **kwargs)
# Running inside `run_distribute_coordinator` already.
if dc_context.get_current_worker_context():
return method(self, *args, **kwargs)
return dc.run_distribute_coordinator(
lambda _: method(self, *args, **kwargs),
self.distribute_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
def disable_multi_worker(method):
"""Decorator that disallows multi-worker use of `method`."""
def _method_wrapper(self, *args, **kwargs):
if self._in_multi_worker_mode(): # pylint: disable=protected-access
raise ValueError('{} is not supported in multi-worker mode.'.format(
method.__name__))
return method(self, *args, **kwargs)
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
@keras_export('keras.Model', 'keras.models.Model')
class Model(network.Network, version_utils.ModelVersionSelector):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
Once the model is created, you can config the model with losses and metrics
with `model.compile()`, train the model with `model.fit()`, or use the model
to do prediction with `model.predict()`.
Checkout [guide](https://www.tensorflow.org/guide/keras/overview) for
additional details.
"""
_TF_MODULE_IGNORED_PROPERTIES = frozenset(
itertools.chain(('_train_counter', '_test_counter', '_predict_counter',
'_steps_per_execution'),
network.Network._TF_MODULE_IGNORED_PROPERTIES)) # pylint: disable=protected-access
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
_keras_api_gauge.get_cell('model').set(True)
# Model must be created under scope of DistStrat it will be trained with.
if ds_context.has_strategy():
self._distribution_strategy = ds_context.get_strategy()
else:
self._distribution_strategy = None
# Defaults to value of `tf.config.experimental_functions_run_eagerly`.
self._run_eagerly = None
self.stop_training = False
# Initialize cache attrs.
self._reset_compile_cache()
# Fault-tolerance handler. Set in `ModelCheckpoint`.
self._training_state = None
self.history = None
# These objects are used in the default `Model.compile`. They are not
# guaranteed to be set after `Model.compile` is called, as users can
# override compile with custom logic.
self.compiled_loss = None
self.compiled_metrics = None
self._init_batch_counters()
@trackable.no_automatic_dependency_tracking
def _init_batch_counters(self):
# Untracked Variables, used to keep track of mini-batches seen in `fit`,
# `evaluate`, and `predict`.
agg = variables.VariableAggregationV2.ONLY_FIRST_REPLICA
self._train_counter = variables.Variable(0, dtype='int64', aggregation=agg)
self._test_counter = variables.Variable(0, dtype='int64', aggregation=agg)
self._predict_counter = variables.Variable(
0, dtype='int64', aggregation=agg)
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
with self.distribute_strategy.scope():
return super(Model, self).get_weights()
def load_weights(self, filepath, by_name=False, skip_mismatch=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
skip_mismatch: Boolean, whether to skip loading of layers where there is
a mismatch in the number of weights, or a mismatch in the shape of
the weight (only valid when `by_name=True`).
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
ValueError: If `skip_mismatch` is set to `True` when `by_name` is
`False`.
"""
if dist_utils.is_tpu_strategy(self._distribution_strategy):
if (self._distribution_strategy.extended.steps_per_run > 1 and
(not network._is_hdf5_filepath(filepath))): # pylint: disable=protected-access
raise ValueError('Load weights is not yet supported with TPUStrategy '
'with steps_per_run greater than 1.')
return super(Model, self).load_weights(filepath, by_name, skip_mismatch)
def compile(self,
optimizer='rmsprop',
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance. See
`tf.keras.optimizers`.
loss: String (name of objective function), objective function or
`tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective
function is any callable with the signature `loss = fn(y_true,
y_pred)`, where y_true = ground truth values with shape =
`[batch_size, d0, .. dN]`, except sparse loss functions such as sparse
categorical crossentropy where shape = `[batch_size, d0, .. dN-1]`.
y_pred = predicted values with shape = `[batch_size, d0, .. dN]`. It
returns a weighted loss float tensor. If a custom `Loss` instance is
used and reduction is set to NONE, return value has the shape
[batch_size, d0, .. dN-1] ie. per-sample or per-timestep loss values;
otherwise, it is a scalar. If the model has multiple outputs, you can
use a different loss on each output by passing a dictionary or a list
of losses. The loss value that will be minimized by the model will
then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model during training
and testing. Each of this can be a string (name of a built-in
function), function or a `tf.keras.metrics.Metric` instance. See
`tf.keras.metrics`. Typically you will use `metrics=['accuracy']`. A
function is any callable with the signature `result = fn(y_true,
y_pred)`. To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary, such as
`metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.
You can also pass a list (len = len(outputs)) of lists of metrics
such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or
`metrics=['accuracy', ['accuracy', 'mse']]`. When you pass the
strings 'accuracy' or 'acc', we convert this to one of
`tf.keras.metrics.BinaryAccuracy`,
`tf.keras.metrics.CategoricalAccuracy`,
`tf.keras.metrics.SparseCategoricalAccuracy` based on the loss
function used and the model output shape. We do a similar
conversion for the strings 'crossentropy' and 'ce' as well.
loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then
be the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping to the model's
outputs. If a dict, it is expected to map output names (strings)
to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise sample weighting (2D
weights), set this to `"temporal"`. `None` defaults to sample-wise
weights (1D). If the model has multiple outputs, you can use a
different `sample_weight_mode` on each output by passing a dictionary
or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted by
sample_weight or class_weight during training and testing.
run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s
logic will not be wrapped in a `tf.function`. Recommended to leave
this as `None` unless your `Model` cannot be run inside a
`tf.function`.
**kwargs: Any additional arguments. Supported arguments:
`experimental_steps_per_execution`: Int. The number of batches to
run during each `tf.function` call. Running multiple batches
inside a single `tf.function` call can greatly improve performance
on TPUs or small models with a large Python overhead. Note that if
this value is set to `N`, `Callback.on_batch` methods will only be
called every `N` batches. This currently defaults to `1`. At most,
one full epoch will be run each execution. If a number larger than
the size of the epoch is passed, the execution will be truncated
to the size of the epoch.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
_keras_api_gauge.get_cell('compile').set(True)
with self.distribute_strategy.scope():
self._validate_compile(optimizer, metrics, **kwargs)
self._run_eagerly = kwargs.pop('run_eagerly', None)
self.optimizer = self._get_optimizer(optimizer)
self.compiled_loss = compile_utils.LossesContainer(
loss, loss_weights, output_names=self.output_names)
self.compiled_metrics = compile_utils.MetricsContainer(
metrics, weighted_metrics, output_names=self.output_names)
experimental_steps_per_execution = kwargs.pop(
'experimental_steps_per_execution', 1)
self._configure_steps_per_execution(experimental_steps_per_execution)
# Initializes attrs that are reset each time `compile` is called.
self._reset_compile_cache()
self._is_compiled = True
self.loss = loss or {} # Backwards compat.
def _get_optimizer(self, optimizer):
"""Wraps `optimizer` in `LossScaleOptimizer` if necessary."""
def _get_single_optimizer(opt):
opt = optimizers.get(opt)
if (self._dtype_policy.loss_scale is not None and
not isinstance(opt, lso.LossScaleOptimizer)):
opt = lso.LossScaleOptimizer(opt, self._dtype_policy.loss_scale)
return opt
return nest.map_structure(_get_single_optimizer, optimizer)
@trackable.no_automatic_dependency_tracking
def _reset_compile_cache(self):
self.train_function = None
self.test_function = None
self.predict_function = None
# Used to cache `trainable` attr of `Layer`s for `fit`.
self._compiled_trainable_state = self._get_trainable_state()
@trackable.no_automatic_dependency_tracking
def _configure_steps_per_execution(self, steps_per_execution):
self._steps_per_execution = variables.Variable(
steps_per_execution,
dtype='int64',
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
@property
def metrics(self):
"""Returns the model's metrics added using `compile`, `add_metric` APIs."""
metrics = []
if self._is_compiled:
# TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects
# so that attr names are not load-bearing.
if self.compiled_loss is not None:
metrics += self.compiled_loss.metrics
if self.compiled_metrics is not None:
metrics += self.compiled_metrics.metrics
all_layers = self._gather_unique_layers()
for l in all_layers:
metrics.extend(l._metrics) # pylint: disable=protected-access
return metrics
@property
def metrics_names(self):
"""Returns the model's display labels for all outputs."""
# This property includes all output names including `loss` and per-output
# losses for backward compatibility.
return [m.name for m in self.metrics]
@property
def distribute_strategy(self):
"""The `tf.distribute.Strategy` this model was created under."""
return self._distribution_strategy or ds_context.get_strategy()
@property
def run_eagerly(self):
"""Settable attribute indicating whether the model should run eagerly.
Running eagerly means that your model will be run step by step,
like Python code. Your model might run slower, but it should become easier
for you to debug it by stepping into individual layer calls.
By default, we will attempt to compile your model to a static graph to
deliver the best execution performance.
Returns:
Boolean, whether the model should run eagerly.
"""
if self._run_eagerly is True and not context.executing_eagerly():
raise ValueError('You can only set `run_eagerly=True` if eager execution '
'is enabled.')
if not self.dynamic:
if self._run_eagerly is None:
# Respect `tf.config.experimental_run_functions_eagerly` unless
# `run_eagerly` was explicitly passed to `compile`.
return def_function.RUN_FUNCTIONS_EAGERLY
else:
return self._run_eagerly
else:
if not context.executing_eagerly():
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You must enable eager execution with '
'`tf.enable_eager_execution()`.')
if self._run_eagerly is False:
# TODO(fchollet): consider using py_func to enable this.
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You cannot set `run_eagerly=False`.')
return context.executing_eagerly()
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
def train_step(self, data):
"""The logic for one training step.
This method can be overridden to support custom training logic.
This method is called by `Model._make_train_function`.
This method should contain the mathemetical logic for one step of training.
This typically includes the forward pass, loss calculation, backpropagation,
and metric updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model._make_train_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
# These are the only transformations `Model.fit` applies to user-input
# data when a `tf.data.Dataset` is provided. These utilities will be exposed
# publicly.
data = data_adapter.expand_1d(data)
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
with backprop.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
# For custom training steps, users can just write:
# trainable_variables = self.trainable_variables
# gradients = tape.gradient(loss, trainable_variables)
# self.optimizer.apply_gradients(zip(gradients, trainable_variables))
# The _minimize call does a few extra steps unnecessary in most cases,
# such as loss scaling and gradient clipping.
_minimize(self.distribute_strategy, tape, self.optimizer, loss,
self.trainable_variables)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def make_train_function(self):
"""Creates a function that executes one step of training.
This method can be overridden to support custom training logic.
This method is called by `Model.fit` and `Model.train_on_batch`.
Typically, this method directly controls `tf.function` and
`tf.distribute.Strategy` settings, and delegates the actual training
logic to `Model._train_step`.
This function is cached the first time `Model.fit` or
`Model.train_on_batch` is called. The cache is cleared whenever
`Model.compile` is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, and return a `dict` containing values that will
be passed to `tf.keras.Callbacks.on_train_batch_end`, such as
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self.train_function is not None:
return self.train_function
def step_function(model, iterator):
"""Runs a single training step."""
def run_step(data):
outputs = model.train_step(data)
# Ensure counter is updated only if `train_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._train_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(
outputs, self.distribute_strategy, reduction='first')
write_scalar_summaries(outputs, step=model._train_counter) # pylint: disable=protected-access
return outputs
if self._steps_per_execution.numpy().item() == 1:
def train_function(iterator):
"""Runs a training execution with one step."""
return step_function(self, iterator)
else:
def train_function(iterator):
"""Runs a training execution with multiple steps."""
outputs = step_function(self, iterator)
for _ in math_ops.range(self._steps_per_execution - 1):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
train_function = def_function.function(
train_function, experimental_relax_shapes=True)
self.train_function = train_function
return self.train_function
@enable_multi_worker
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
A more detailed description of unpacking behavior for iterator types
(Dataset, generator, Sequence) is given below.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, generator,
or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of datasets, generators, or `keras.utils.Sequence` instances
(since they generate batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Note that the progress bar is not particularly useful when
logged to a file, so verbose=2 is recommended when not running
interactively (eg, in a production environment).
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `tf.keras.callbacks`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. Thus, note the fact
that the validation loss of data provided using `validation_split`
or `validation_data` is not affected by regularization layers like
noise and dropuout.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset
For the first two cases, `batch_size` must be provided.
For the last case, `validation_steps` could be provided.
Note that `validation_data` does not support all the data types that
are supported in `x`, eg, dict, generator or `keras.utils.Sequence`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch'). This argument is ignored
when `x` is a generator. 'batch' is a special option for dealing
with the limitations of HDF5 data; it shuffles in batch-sized
chunks. Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined. If x is a
`tf.data` dataset, and 'steps_per_epoch'
is None, the epoch will run until the input dataset is exhausted.
When passing an infinitely repeating dataset, you must specify the
`steps_per_epoch` argument. This argument is not supported with
array inputs.
validation_steps: Only relevant if `validation_data` is provided and
is a `tf.data` dataset. Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch. If 'validation_steps' is None, validation
will run until the `validation_data` dataset is exhausted. In the
case of an infinitely repeated dataset, it will run into an
infinite loop. If 'validation_steps' is specified and only part of
the dataset will be consumed, the evaluation will start from the
beginning of the dataset at each epoch. This ensures that the same
validation samples are used every time.
validation_batch_size: Integer or `None`.
Number of samples per validation batch.
If unspecified, will default to `batch_size`.
Do not specify the `validation_batch_size` if your data is in the
form of datasets, generators, or `keras.utils.Sequence` instances
(since they generate batches).
validation_freq: Only relevant if validation data is provided. Integer
or `collections_abc.Container` instance (e.g. list, tuple, etc.).
If an integer, specifies how many training epochs to run before a
new validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Unpacking behavior for iterator-like inputs:
A common pattern is to pass a tf.data.Dataset, generator, or
tf.keras.utils.Sequence to the `x` argument of fit, which will in fact
yield not only features (x) but optionally targets (y) and sample weights.
Keras requires that the output of such iterator-likes be unambiguous. The
iterator should return a tuple of length 1, 2, or 3, where the optional
second and third elements will be used for y and sample_weight
respectively. Any other type provided will be wrapped in a length one
tuple, effectively treating everything as 'x'. When yielding dicts, they
should still adhere to the top-level tuple structure.
e.g. `({"x0": x0, "x1": x1}, y)`. Keras will not attempt to separate
features, targets, and weights from the keys of a single dict.
A notable unsupported data type is the namedtuple. The reason is that
it behaves like both an ordered datatype (tuple) and a mapping
datatype (dict). So given a namedtuple of the form:
`namedtuple("example_tuple", ["y", "x"])`
it is ambiguous whether to reverse the order of the elements when
interpreting the value. Even worse is a tuple of the form:
`namedtuple("other_tuple", ["x", "y", "z"])`
where it is unclear if the tuple was intended to be unpacked into x, y,
and sample_weight or passed through as a single element to `x`. As a
result the data processing code will simply raise a ValueError if it
encounters a namedtuple. (Along with instructions to remedy the issue.)
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
_keras_api_gauge.get_cell('fit').set(True)
# Legacy graph support is contained in `training_v1.Model`.
version_utils.disallow_legacy_graph('Model', 'fit')
self._assert_compile_was_called()
self._check_call_args('fit')
if validation_split:
# Create the validation data using the training data. Only supported for
# `Tensor` and `NumPy` input.
(x, y, sample_weight), validation_data = (
data_adapter.train_validation_split((x, y, sample_weight),
validation_split=validation_split,
shuffle=False))
with self.distribute_strategy.scope(), \
training_utils.RespectCompiledTrainableState(self):
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
data_handler = data_adapter.DataHandler(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
initial_epoch=initial_epoch,
epochs=epochs,
shuffle=shuffle,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
model=self,
steps_per_execution=self._steps_per_execution)
# Container that configures and calls `tf.keras.Callback`s.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
model=self,
verbose=verbose,
epochs=epochs,
steps=data_handler.inferred_steps)
self.stop_training = False
train_function = self.make_train_function()
self._train_counter.assign(0)
callbacks.on_train_begin()
# Handle fault-tolerance for multi-worker.
# TODO(omalleyt): Fix the ordering issues that mean this has to
# happen after `callbacks.on_train_begin`.
data_handler._initial_epoch = ( # pylint: disable=protected-access
self._maybe_load_initial_epoch_from_ckpt(initial_epoch))
for epoch, iterator in data_handler.enumerate_epochs():
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
with trace.Trace(
'TraceContext',
graph_type='train',
epoch_num=epoch,
step_num=step,
batch_size=batch_size):
callbacks.on_train_batch_begin(step)
tmp_logs = train_function(iterator)
if data_handler.should_sync:
context.async_wait()
logs = tmp_logs # No error, now safe to assign to logs.
end_step = step + data_handler.step_increment
callbacks.on_train_batch_end(end_step, logs)
epoch_logs = copy.copy(logs)
# Run validation.
if validation_data and self._should_eval(epoch, validation_freq):
val_x, val_y, val_sample_weight = (
data_adapter.unpack_x_y_sample_weight(validation_data))
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
return_dict=True)
val_logs = {'val_' + name: val for name, val in val_logs.items()}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
callbacks.on_train_end()
return self.history
def test_step(self, data):
"""The logic for one evaluation step.
This method can be overridden to support custom evaluation logic.
This method is called by `Model._make_test_function`.
This function should contain the mathemetical logic for one step of
evaluation.
This typically includes the forward pass, loss calculation, and metrics
updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model._make_test_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned.
"""
data = data_adapter.expand_1d(data)
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
y_pred = self(x, training=False)
# Updates stateful loss metrics.
self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def make_test_function(self):
"""Creates a function that executes one step of evaluation.
This method can be overridden to support custom evaluation logic.
This method is called by `Model.evaluate` and `Model.test_on_batch`.
Typically, this method directly controls `tf.function` and
`tf.distribute.Strategy` settings, and delegates the actual evaluation
logic to `Model._test_step`.
This function is cached the first time `Model.evaluate` or
`Model.test_on_batch` is called. The cache is cleared whenever
`Model.compile` is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, and return a `dict` containing values that will
be passed to `tf.keras.Callbacks.on_test_batch_end`.
"""
if self.test_function is not None:
return self.test_function
def step_function(model, iterator):
"""Runs a single evaluation step."""
def run_step(data):
outputs = model.test_step(data)
# Ensure counter is updated only if `test_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._test_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(
outputs, self.distribute_strategy, reduction='first')
return outputs
if self._steps_per_execution.numpy().item() == 1:
def test_function(iterator):
"""Runs an evaluation execution with one step."""
return step_function(self, iterator)
else:
def test_function(iterator):
"""Runs an evaluation execution with multiple steps."""
outputs = step_function(self, iterator)
for _ in math_ops.range(self._steps_per_execution - 1):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
test_function = def_function.function(
test_function, experimental_relax_shapes=True)
self.test_function = test_function
return self.test_function
@enable_multi_worker
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
return_dict=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors, if
the model has named inputs. - A `tf.data` dataset. - A generator or
`keras.utils.Sequence` instance. A more detailed description of
unpacking behavior for iterator types (Dataset, generator, Sequence)
is given in the `Unpacking behavior for iterator-like inputs` section
of `Model.fit`.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely). If
`x` is a dataset, generator or `keras.utils.Sequence` instance, `y`
should not be specified (since targets will be obtained from the
iterator/dataset).
batch_size: Integer or `None`. Number of samples per gradient update. If
unspecified, `batch_size` will default to 32. Do not specify the
`batch_size` if your data is in the form of a dataset, generators,
or `keras.utils.Sequence` instances (since they generate batches).
verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for the test samples,
used for weighting the loss function. You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples), or in the case of
temporal data, you can pass a 2D array with shape `(samples,
sequence_length)`, to apply a different weight to every timestep
of every sample. In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is
not supported when `x` is a dataset, instead pass sample weights
as the third element of `x`.
steps: Integer or `None`. Total number of steps (batches of samples)
before declaring the evaluation round finished. Ignored with the
default value of `None`. If x is a `tf.data` dataset and `steps` is
None, 'evaluate' will run until the dataset is exhausted. This
argument is not supported with array inputs.
callbacks: List of `keras.callbacks.Callback` instances. List of
callbacks to apply during evaluation. See
[callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue. If unspecified,
`max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using process-based
threading. If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to the
generator as they can't be passed easily to children processes.
return_dict: If `True`, loss and metric results are returned as a dict,
with each key being the name of the metric. If `False`, they are
returned as a list.
See the discussion of `Unpacking behavior for iterator-like inputs` for
`Model.fit`.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
_keras_api_gauge.get_cell('evaluate').set(True)
version_utils.disallow_legacy_graph('Model', 'evaluate')
self._assert_compile_was_called()
self._check_call_args('evaluate')
with self.distribute_strategy.scope():
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
data_handler = data_adapter.DataHandler(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
initial_epoch=0,
epochs=1,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
model=self,
steps_per_execution=self._steps_per_execution)
# Container that configures and calls `tf.keras.Callback`s.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
model=self,
verbose=verbose,
epochs=1,
steps=data_handler.inferred_steps)
test_function = self.make_test_function()
self._test_counter.assign(0)
callbacks.on_test_begin()
for _, iterator in data_handler.enumerate_epochs(): # Single epoch.
self.reset_metrics()
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
with trace.Trace('TraceContext', graph_type='test', step_num=step):
callbacks.on_test_batch_begin(step)
tmp_logs = test_function(iterator)
if data_handler.should_sync:
context.async_wait()
logs = tmp_logs # No error, now safe to assign to logs.
end_step = step + data_handler.step_increment
callbacks.on_test_batch_end(end_step, logs)
callbacks.on_test_end()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def predict_step(self, data):
"""The logic for one inference step.
This method can be overridden to support custom inference logic.
This method is called by `Model._make_predict_function`.
This method should contain the mathemetical logic for one step of inference.
This typically includes the forward pass.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model._make_predict_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
The result of one inference step, typically the output of calling the
`Model` on data.
"""
data = data_adapter.expand_1d(data)
x, _, _ = data_adapter.unpack_x_y_sample_weight(data)
return self(x, training=False)
def make_predict_function(self):
"""Creates a function that executes one step of inference.
This method can be overridden to support custom inference logic.
This method is called by `Model.predict` and `Model.predict_on_batch`.
Typically, this method directly controls `tf.function` and
`tf.distribute.Strategy` settings, and delegates the actual evaluation
logic to `Model._predict_step`.
This function is cached the first time `Model.predict` or
`Model.predict_on_batch` is called. The cache is cleared whenever
`Model.compile` is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, and return the outputs of the `Model`.
"""
if self.predict_function is not None:
return self.predict_function
def predict_function(iterator):
"""Runs one call to `self.predict_function`."""
def run_step(data):
outputs = self.predict_step(data)
# Ensure counter is updated only if `predict_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
self._predict_counter.assign_add(1)
return outputs
data = next(iterator)
outputs = self.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(
outputs, self.distribute_strategy, reduction='concat')
return outputs
if not self.run_eagerly:
predict_function = def_function.function(
predict_function, experimental_relax_shapes=True)
self.predict_function = predict_function
return self.predict_function
@disable_multi_worker
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches. This method is designed for performance in
large scale inputs. For small amount of inputs that fit in one batch,
directly using `__call__` is recommended for faster execution, e.g.,
`model(x)`, or `model(x, training=False)` if you have layers such as
`tf.keras.layers.BatchNormalization` that behaves differently during
inference. Also, note the fact that test loss is not affected by
regularization layers like noise and dropout.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
A more detailed description of unpacking behavior for iterator types
(Dataset, generator, Sequence) is given in the `Unpacking behavior
for iterator-like inputs` section of `Model.fit`.
batch_size: Integer or `None`.
Number of samples per batch.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of dataset, generators, or `keras.utils.Sequence` instances
(since they generate batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
dataset and `steps` is None, `predict` will
run until the input dataset is exhausted.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
See the discussion of `Unpacking behavior for iterator-like inputs` for
`Model.fit`. Note that Model.predict uses the same interpretation rules as
`Model.fit` and `Model.evaluate`, so inputs must be unambiguous for all
three methods.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
print("=============================================test")
_keras_api_gauge.get_cell('predict').set(True)
version_utils.disallow_legacy_graph('Model', 'predict')
self._check_call_args('predict')
outputs = None
with self.distribute_strategy.scope():
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
data_handler = data_adapter.DataHandler(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
initial_epoch=0,
epochs=1,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
model=self)
# Container that configures and calls `tf.keras.Callback`s.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
model=self,
verbose=verbose,
epochs=1,
steps=data_handler.inferred_steps)
predict_function = self.make_predict_function()
self._predict_counter.assign(0)
callbacks.on_predict_begin()
for _, iterator in data_handler.enumerate_epochs(): # Single epoch.
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
callbacks.on_predict_batch_begin(step)
tmp_batch_outputs = predict_function(iterator)
if data_handler.should_sync:
context.async_wait()
batch_outputs = tmp_batch_outputs # No error, now safe to assign.
if outputs is None:
outputs = nest.map_structure(lambda batch_output: [batch_output],
batch_outputs)
else:
nest.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs, batch_outputs)
callbacks.on_predict_batch_end(step, {'outputs': batch_outputs})
callbacks.on_predict_end()
all_outputs = nest.map_structure_up_to(batch_outputs, concat, outputs)
return tf_utils.to_numpy_or_python_type(all_outputs)
def reset_metrics(self):
"""Resets the state of metrics."""
for m in self.metrics:
m.reset_states()
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True,
return_dict=False):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping class indices (integers) to a
weight (float) to apply to the model's loss for the samples from this
class during training. This can be useful to tell the model to "pay
more attention" to samples from an under-represented class.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
return_dict: If `True`, loss and metric results are returned as a dict,
with each key being the name of the metric. If `False`, they are
returned as a list.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
with self.distribute_strategy.scope(), \
training_utils.RespectCompiledTrainableState(self):
iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x,
y, sample_weight,
class_weight)
train_function = self.make_train_function()
logs = train_function(iterator)
if reset_metrics:
self.reset_metrics()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def test_on_batch(self,
x,
y=None,
sample_weight=None,
reset_metrics=True,
return_dict=False):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors, if
the model has named inputs.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
return_dict: If `True`, loss and metric results are returned as a dict,
with each key being the name of the metric. If `False`, they are
returned as a list.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('test_on_batch')
with self.distribute_strategy.scope():
iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x,
y, sample_weight)
test_function = self.make_test_function()
logs = test_function(iterator)
if reset_metrics:
self.reset_metrics()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs).
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
self._check_call_args('predict_on_batch')
with self.distribute_strategy.scope():
iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x)
predict_function = self.make_predict_function()
outputs = predict_function(iterator)
return tf_utils.to_numpy_or_python_type(outputs)
@deprecation.deprecated(
None, 'Please use Model.fit, which supports generators.')
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
validation_freq=1,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
DEPRECATED:
`Model.fit` now supports generators, so there is no longer any need to use
this endpoint.
"""
_keras_api_gauge.get_cell('fit_generator').set(True)
return self.fit(
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
@deprecation.deprecated(
None, 'Please use Model.evaluate, which supports generators.')
def evaluate_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
DEPRECATED:
`Model.evaluate` now supports generators, so there is no longer any need
to use this endpoint.
"""
_keras_api_gauge.get_cell('evaluate_generator').set(True)
self._check_call_args('evaluate_generator')
return self.evaluate(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
@deprecation.deprecated(
None, 'Please use Model.predict, which supports generators.')
def predict_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
DEPRECATED:
`Model.predict` now supports generators, so there is no longer any need
to use this endpoint.
"""
_keras_api_gauge.get_cell('predict_generator').set(True)
return self.predict(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
def _check_call_args(self, method_name):
"""Check that `call` has only one positional arg."""
# Always allow first arg, regardless of arg name.
fullargspec = self._call_full_argspec
if fullargspec.defaults:
positional_args = fullargspec.args[:-len(fullargspec.defaults)]
else:
positional_args = fullargspec.args
if 'training' in positional_args:
positional_args.remove('training')
# self and first arg can be positional.
if len(positional_args) > 2:
extra_args = positional_args[2:]
raise ValueError(
'Models passed to `' + method_name + '` can only have `training` '
'and the first argument in `call` as positional arguments, '
'found: ' + str(extra_args) + '.')
def _validate_compile(self, optimizer, metrics, **kwargs):
"""Performs validation checks for the default `compile`."""
if any(
isinstance(opt, optimizers.Optimizer)
for opt in nest.flatten(optimizer)):
raise ValueError(
'`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
'not supported when eager execution is enabled. Use a '
'`tf.keras` Optimizer instead, or disable eager '
'execution.')
kwargs.pop('cloning', None) # Legacy DistStrat argument, never used.
kwargs.pop('experimental_run_tf_function', None) # Always `True`.
if kwargs.pop('distribute', None) is not None:
raise ValueError(
'Distribute argument in compile is not available in TF 2.0 please '
'create the model under the distribution strategy scope.')
if kwargs.pop('target_tensors', None) is not None:
raise ValueError(
'target_tensors argument is not supported when executing eagerly.')
invalid_kwargs = set(kwargs) - {'experimental_steps_per_execution'}
if invalid_kwargs:
raise TypeError('Invalid keyword argument(s) in `compile`: %s' %
(invalid_kwargs,))
# Model must be created and compiled with the same DistStrat.
if self.built and ds_context.has_strategy():
strategy = ds_context.get_strategy()
for v in self.variables:
if not strategy.extended.variable_created_in_scope(v):
raise ValueError(
'Variable (%s) was not created in the distribution strategy '
'scope of (%s). It is most likely due to not all layers or '
'the model or optimizer being created outside the distribution '
'strategy scope. Try to make sure your code looks similar '
'to the following.\n'
'with strategy.scope():\n'
' model=_create_model()\n'
' model.compile(...)' % (v, strategy))
# Model metrics must be created in the same distribution strategy scope
# as the model.
strategy = self._get_distribution_strategy()
for metric in nest.flatten(metrics):
for v in getattr(metric, 'variables', []):
if not strategy.extended.variable_created_in_scope(v):
raise ValueError(
'Metric (%s) passed to model.compile was created inside of a '
'different distribution strategy scope than the model. All '
'metrics must be created in the same distribution strategy '
'scope as the model (in this case %s). If you pass in a string '
'identifier for a metric to compile the metric will '
'automatically be created in the correct distribution '
'strategy scope.' % (metric, strategy)
)
def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
Refer to tensorflow/python/keras/distribute/multi_worker_training_state.py
for more information.
Arguments:
initial_epoch: The original initial_epoch user passes in in `fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
if self._training_state is not None:
return self._training_state.maybe_load_initial_epoch_from_ckpt(
initial_epoch, mode=ModeKeys.TRAIN)
return initial_epoch
def _assert_compile_was_called(self):
# Checks whether `compile` has been called. If it has been called,
# then the optimizer is set. This is different from whether the
# model is compiled
# (i.e. whether the model is built and its inputs/outputs are set).
if not self._is_compiled:
raise RuntimeError('You must compile your model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
def _set_inputs(self, inputs, outputs=None, training=None):
"""This method is for compat with Modelv1. Only inputs are needed here."""
self._set_save_spec(inputs)
@property
def _trackable_saved_model_saver(self):
return model_serialization.ModelSavedModelSaver(self)
def _list_functions_for_serialization(self, serialization_cache):
# SavedModel needs to ignore the execution functions.
train_function = self.train_function
test_function = self.test_function
predict_function = self.predict_function
self.train_function = None
self.test_function = None
self.predict_function = None
functions = super(
Model, self)._list_functions_for_serialization(serialization_cache)
self.train_function = train_function
self.test_function = test_function
self.predict_function = predict_function
return functions
def _should_eval(self, epoch, validation_freq):
epoch = epoch + 1 # one-index the user-facing epoch.
if isinstance(validation_freq, int):
return epoch % validation_freq == 0
elif isinstance(validation_freq, list):
return epoch in validation_freq
else:
raise ValueError('Expected `validation_freq` to be a list or int.')
######################################################################
# Functions below exist only as v1 / v2 compatibility shims.
######################################################################
def _get_compile_args(self):
"""Used for saving or cloning a Model."""
self._assert_compile_was_called()
# pylint: disable=protected-access
compile_args = {
'optimizer': self.optimizer,
'loss': self.compiled_loss._user_losses,
'metrics': self.compiled_metrics._user_metrics,
'weighted_metrics': self.compiled_metrics._user_weighted_metrics,
'loss_weights': self.compiled_loss._user_loss_weights,
'sample_weight_mode': None,
}
# pylint: enable=protected-access
return compile_args
def _get_callback_model(self):
return self
def _in_multi_worker_mode(self):
return self.distribute_strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _get_distribution_strategy(self):
return self.distribute_strategy
@property
def _compile_was_called(self):
return self._is_compiled
def reduce_per_replica(values, strategy, reduction='first'):
"""Reduce PerReplica objects.
Arguments:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of 'first', 'concat'.
Returns:
Structure of `Tensor`s.
"""
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if not isinstance(v, ds_values.PerReplica):
return v
elif reduction == 'first':
return strategy.unwrap(v)[0]
elif reduction == 'concat':
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.unwrap(v))
else:
raise ValueError('`reduction` must be "first" or "concat".')
return nest.map_structure(_reduce, values)
def concat(tensors, axis=0):
"""Concats `tensor`s along `axis`."""
if isinstance(tensors[0], sparse_tensor.SparseTensor):
return sparse_ops.sparse_concat_v2(axis=axis, sp_inputs=tensors)
if isinstance(tensors[0], ragged_tensor.RaggedTensor):
return ragged_concat_ops.concat(tensors, axis=axis)
return array_ops.concat(tensors, axis=axis)
def _is_tpu_multi_host(strategy):
return (dist_utils.is_tpu_strategy(strategy) and
strategy.extended.num_hosts > 1)
def _tpu_multi_host_concat(v, strategy):
"""Correctly order TPU PerReplica objects."""
replicas = strategy.unwrap(v)
# When distributed datasets are created from Tensors / NumPy,
# TPUStrategy.experimental_distribute_dataset shards data in
# (Replica, Host) order, and TPUStrategy.unwrap returns it in
# (Host, Replica) order.
# TODO(b/150317897): Figure out long-term plan here.
num_replicas_per_host = strategy.extended.num_replicas_per_host
ordered_replicas = []
for replica_id in range(num_replicas_per_host):
ordered_replicas += replicas[replica_id::num_replicas_per_host]
return concat(ordered_replicas)
def _minimize(strategy, tape, optimizer, loss, trainable_variables):
"""Minimizes loss for one step by updating `trainable_variables`.
This is roughly equivalent to
```python
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
```
However, this function also applies gradient clipping and loss scaling if the
optimizer is a LossScaleOptimizer.
Args:
strategy: `tf.distribute.Strategy`.
tape: A gradient tape. The loss must have been computed under this tape.
optimizer: The optimizer used to minimize the loss.
loss: The loss tensor.
trainable_variables: The variables that will be updated in order to minimize
the loss.
"""
with tape:
if isinstance(optimizer, lso.LossScaleOptimizer):
loss = optimizer.get_scaled_loss(loss)
gradients = tape.gradient(loss, trainable_variables)
# Whether to aggregate gradients outside of optimizer. This requires support
# of the optimizer and doesn't work with ParameterServerStrategy and
# CentralStroageStrategy.
aggregate_grads_outside_optimizer = (
optimizer._HAS_AGGREGATE_GRAD and # pylint: disable=protected-access
not isinstance(strategy.extended,
parameter_server_strategy.ParameterServerStrategyExtended))
if aggregate_grads_outside_optimizer:
# We aggregate gradients before unscaling them, in case a subclass of
# LossScaleOptimizer all-reduces in fp16. All-reducing in fp16 can only be
# done on scaled gradients, not unscaled gradients, for numeric stability.
gradients = optimizer._aggregate_gradients(zip(gradients, # pylint: disable=protected-access
trainable_variables))
if isinstance(optimizer, lso.LossScaleOptimizer):
gradients = optimizer.get_unscaled_gradients(gradients)
gradients = optimizer._clip_gradients(gradients) # pylint: disable=protected-access
if trainable_variables:
if aggregate_grads_outside_optimizer:
optimizer.apply_gradients(
zip(gradients, trainable_variables),
experimental_aggregate_gradients=False)
else:
optimizer.apply_gradients(zip(gradients, trainable_variables))
def _is_scalar(x):
return isinstance(x, (ops.Tensor, variables.Variable)) and x.shape.rank == 0
def write_scalar_summaries(logs, step):
for name, value in logs.items():
if _is_scalar(value):
summary_ops_v2.scalar('batch_' + name, value, step=step)
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded."""
if context.executing_eagerly():
return [] # Control dependencies not needed.
outputs = nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, variables.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
|
py
|
1a5d7901736e4bc8b30aea70467174d7740f353c
|
'''Class to hold race management variables.'''
class RHRace():
'''Class to hold race management variables.'''
def __init__(self):
self.num_nodes = 0
self.current_heat = 1
self.race_status = 0
self.timer_running = 0
def get_race_state():
'''Returns the race object.'''
return RHRace()
|
py
|
1a5d799d727b605d0a3a4bb0bdba2ddc22a4e4d8
|
# PPO (Percentage Price Oscillator)
# https://school.stockcharts.com/doku.php?id=technical_indicators:price_oscillators_ppo
# Yüzde Fiyat Osilatörü (PPO), iki hareketli ortalama arasındaki farkı
# daha büyük hareketli ortalamanın yüzdesi olarak ölçen bir momentum osilatörüdür.
# Argümanlar:
# close(pandas.Series): veri kümesi 'Fiyat' sütunu.
# window_slow(int): n dönem uzun vadeli.
# window_fast(int): n dönem kısa vadeli.
# window_sign(int): sinyal için n periyodu.
# fillna(bool): True ise, nan değerlerini doldur.
import pandas as pd
from _utilities import IndicatorMixin, _ema
class PercentagePriceOscillator(IndicatorMixin):
def __init__(
self,
close: pd.Series,
window_slow: int = 26,
window_fast: int = 12,
window_sign: int = 9,
fillna: bool = False,
):
self._close = close
self._window_slow = window_slow
self._window_fast = window_fast
self._window_sign = window_sign
self._fillna = fillna
self._run()
def _run(self):
_emafast = _ema(self._close, self._window_fast, self._fillna)
_emaslow = _ema(self._close, self._window_slow, self._fillna)
self._ppo = ((_emafast - _emaslow) / _emaslow) * 100
self._ppo_signal = _ema(self._ppo, self._window_sign, self._fillna)
self._ppo_hist = self._ppo - self._ppo_signal
def ppo(self):
ppo_series = self._check_fillna(self._ppo, value=0)
return pd.Series(ppo_series, name=f"PPO_{self._window_fast}_{self._window_slow}")
def ppo_signal(self):
ppo_signal_series = self._check_fillna(self._ppo_signal, value=0)
return pd.Series(ppo_signal_series, name=f"PPO_sign_{self._window_fast}_{self._window_slow}")
def ppo_hist(self):
ppo_hist_series = self._check_fillna(self._ppo_hist, value=0)
return pd.Series(ppo_hist_series, name=f"PPO_hist_{self._window_fast}_{self._window_slow}")
|
py
|
1a5d799dfa02c7baa1662efcc7d8fe27af96927d
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Tests for general zero-order property package
"""
import pytest
import os
from idaes.core import declare_process_block_class, FlowsheetBlock
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core.solvers import get_solver
import idaes.core.util.scaling as iscale
from pyomo.environ import (
check_optimal_termination,
ConcreteModel,
Constraint,
Param,
Set,
value,
Var,
)
from pyomo.network import Port
from pyomo.util.check_units import assert_units_consistent
from watertap.core import (
Database,
WaterParameterBlock,
WaterStateBlock,
ZeroOrderBaseData,
)
from watertap.core.zero_order_sido_reactive import (
build_sido_reactive,
initialize_sidor,
calculate_scaling_factors_sidor,
_get_Q_sidor,
)
solver = get_solver()
local_path = os.path.dirname(os.path.abspath(__file__))
@declare_process_block_class("DerivedSIDOR")
class DerivedSIDORData(ZeroOrderBaseData):
def build(self):
super().build()
self._tech_type = "test_sidor_data"
build_sido_reactive(self)
class TestSIDOR:
@pytest.fixture(scope="module")
def model(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(1000)
m.fs.unit.inlet.flow_mass_comp[0, "A"].fix(10)
m.fs.unit.inlet.flow_mass_comp[0, "B"].fix(20)
m.fs.unit.inlet.flow_mass_comp[0, "C"].fix(30)
m.fs.unit.load_parameters_from_database(use_default_removal=True)
return m
@pytest.mark.unit
def test_private_attributes(self, model):
assert model.fs.unit._has_recovery_removal is True
assert model.fs.unit._fixed_perf_vars == []
assert model.fs.unit._initialize is initialize_sidor
assert model.fs.unit._scaling is calculate_scaling_factors_sidor
assert model.fs.unit._get_Q is _get_Q_sidor
assert model.fs.unit._stream_table_dict == {
"Inlet": model.fs.unit.inlet,
"Treated": model.fs.unit.treated,
"Byproduct": model.fs.unit.byproduct,
}
assert model.fs.unit._perf_var_dict == {
"Water Recovery": model.fs.unit.recovery_frac_mass_H2O,
"Solute Removal": model.fs.unit.removal_frac_mass_solute,
"Reaction Extent": model.fs.unit.extent_of_reaction,
}
@pytest.mark.unit
def test_build(self, model):
assert isinstance(model.fs.unit.properties_in, WaterStateBlock)
assert isinstance(model.fs.unit.properties_treated, WaterStateBlock)
assert isinstance(model.fs.unit.properties_byproduct, WaterStateBlock)
assert isinstance(model.fs.unit.inlet, Port)
assert isinstance(model.fs.unit.treated, Port)
assert isinstance(model.fs.unit.byproduct, Port)
assert isinstance(model.fs.unit.recovery_frac_mass_H2O, Var)
assert len(model.fs.unit.recovery_frac_mass_H2O) == 1
assert isinstance(model.fs.unit.removal_frac_mass_solute, Var)
assert len(model.fs.unit.removal_frac_mass_solute) == 3
assert isinstance(model.fs.unit.water_recovery_equation, Constraint)
assert len(model.fs.unit.water_recovery_equation) == 1
assert isinstance(model.fs.unit.water_balance, Constraint)
assert len(model.fs.unit.water_balance) == 1
assert isinstance(model.fs.unit.solute_removal_equation, Constraint)
assert len(model.fs.unit.solute_removal_equation) == 3
assert isinstance(model.fs.unit.solute_treated_equation, Constraint)
assert len(model.fs.unit.solute_treated_equation) == 3
assert isinstance(model.fs.unit.reaction_set, Set)
assert isinstance(model.fs.unit.generation_ratio, Var)
assert isinstance(model.fs.unit.reaction_conversion, Var)
assert isinstance(model.fs.unit.extent_of_reaction, Var)
assert isinstance(model.fs.unit.reaction_extent_equation, Constraint)
for r in model.fs.unit.reaction_set:
assert r in ["Rxn1", "Rxn2"]
assert (0, r) in model.fs.unit.reaction_conversion
assert (0, r) in model.fs.unit.extent_of_reaction
assert (0, r) in model.fs.unit.reaction_extent_equation
for j in model.fs.water_props.component_list:
assert (r, j) in model.fs.unit.generation_ratio
@pytest.mark.unit
def test_key_components(self, model):
assert str(model.fs.unit.properties_in[0].flow_mass_comp["A"]) in str(
model.fs.unit.reaction_extent_equation[0, "Rxn1"].body
)
assert str(model.fs.unit.properties_in[0].flow_mass_comp["B"]) in str(
model.fs.unit.reaction_extent_equation[0, "Rxn2"].body
)
@pytest.mark.unit
def test_loading_data(self, model):
# RXn 1 is all conversion ratios
# Rxn 2 is all stocihiometry (incl. water)
cfactor = {
"Rxn1": {"A": -1, "B": 1, "C": 0, "H2O": 0},
"Rxn2": {"A": 0, "B": -1, "C": 22 * 2 / 20, "H2O": -1 * 18 / 20},
}
for (r, j), p in model.fs.unit.generation_ratio.items():
assert value(p) == cfactor[r][j]
assert model.fs.unit.reaction_conversion[0, "Rxn1"].value == 0.8
assert model.fs.unit.reaction_conversion[0, "Rxn2"].value == 0.1
assert model.fs.unit.recovery_frac_mass_H2O[0].value == 0.85
assert model.fs.unit.removal_frac_mass_solute[0, "A"].value == 0.5
assert model.fs.unit.removal_frac_mass_solute[0, "B"].value == 0.4
assert model.fs.unit.removal_frac_mass_solute[0, "C"].value == 0.0
@pytest.mark.unit
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model)
@pytest.mark.component
def test_scaling(self, model):
iscale.calculate_scaling_factors(model)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.water_recovery_equation[0]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.water_balance[0]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "A"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "B"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "C"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_treated_equation[0, "A"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_treated_equation[0, "B"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_treated_equation[0, "C"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.reaction_extent_equation[0, "Rxn1"]
)
== 1e5
)
assert (
iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.reaction_extent_equation[0, "Rxn2"]
)
== 1e5
)
@pytest.mark.component
def test_initialization(self, model):
initialization_tester(model)
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.component
def test_solution(self, model):
model.fs.unit.treated.display()
model.fs.unit.byproduct.display()
assert pytest.approx((1000 - 0.1 * 20 * 18 / 20) * 0.85, rel=1e-5) == value(
model.fs.unit.treated.flow_mass_comp[0, "H2O"]
)
assert pytest.approx((1000 - 0.1 * 20 * 18 / 20) * 0.15, rel=1e-5) == value(
model.fs.unit.byproduct.flow_mass_comp[0, "H2O"]
)
assert pytest.approx((10 * 0.2) * (1 - 0.5), rel=1e-5) == value(
model.fs.unit.treated.flow_mass_comp[0, "A"]
)
assert pytest.approx((10 * 0.2) * 0.5, rel=1e-5) == value(
model.fs.unit.byproduct.flow_mass_comp[0, "A"]
)
assert pytest.approx((20 + 0.8 * 10 - 0.1 * 20) * (1 - 0.4), rel=1e-5) == value(
model.fs.unit.treated.flow_mass_comp[0, "B"]
)
assert pytest.approx((20 + 0.8 * 10 - 0.1 * 20) * 0.4, rel=1e-5) == value(
model.fs.unit.byproduct.flow_mass_comp[0, "B"]
)
assert pytest.approx(30 + 0.1 * 20 * 2 * 22 / 20, rel=1e-5) == value(
model.fs.unit.treated.flow_mass_comp[0, "C"]
)
assert pytest.approx(0, rel=1e-5) == value(
model.fs.unit.byproduct.flow_mass_comp[0, "C"]
)
# Conservation would be similar to above, as need to calculate generation
@pytest.mark.component
def test_report(self, model):
model.fs.unit.report()
class TestSIDORErrors:
@pytest.mark.unit
def test_reaction_list(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"] = {}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not contain a list of "
"reactions for this technology.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_missing_conversion(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not "
"contain an entry for conversion for reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_missing_key_reactant(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not "
"contain an entry for key_reactant for reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_invlaid_key_reactant(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "foo"
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
ValueError,
match="fs.unit - key_reactant foo for reaction Rxn3 "
"is not in the component list used by the assigned property "
"package.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_missing_stoichiometry(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "A"
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not "
"contain an entry for stoichiometry for reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_ratio_and_order(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "A"
R3["stoichiometry"] = {"A": {"order": 1, "conversion_ratio": 1}}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
RuntimeError,
match="fs.unit - database provides entries for both "
"conversion_ratio and reaction order in reaction Rxn3. "
"Please provide only one or the other.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_no_ratio_or_order(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "A"
R3["stoichiometry"] = {"A": {}}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
RuntimeError,
match="fs.unit - database provided does not "
"contain any information for conversion_ratio or reaction "
"order w.r.t. species A in reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_order_no_mw(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "B"
R3["stoichiometry"] = {
"A": {"order": 1},
"B": {"order": 1, "molecular_weight": 1},
}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not "
"contain an entry for molecular_weight w.r.t. "
"species A in reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_order_no_key_order(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "B"
R3["stoichiometry"] = {"A": {"order": 1, "molecular_weight": 1}}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not "
"contain an entry for order w.r.t. species "
"B in reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
@pytest.mark.unit
def test_order_no_key_mw(self):
m = ConcreteModel()
m.db = Database(dbpath=local_path)
# Load data from YAML so that we can modify it
m.db._get_technology("test_sidor_data")
m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"] = {}
R3 = m.db._cached_files["test_sidor_data"]["default"]["reactions"]["Rxn3"]
R3["conversion"] = 0.5
R3["key_reactant"] = "B"
R3["stoichiometry"] = {
"A": {"order": 1, "molecular_weight": 1},
"B": {"order": 1},
}
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(default={"solute_list": ["A", "B", "C"]})
with pytest.raises(
KeyError,
match="fs.unit - database provided does not "
"contain an entry for molecular_weight w.r.t. "
"species B in reaction Rxn3.",
):
m.fs.unit = DerivedSIDOR(
default={"property_package": m.fs.water_props, "database": m.db}
)
|
py
|
1a5d79d12179996c3fb198fa1b3c125881622091
|
from decoder import Parser
from extract_training_data import FeatureExtractor
from conll_reader import conll_reader
import sys
def compare_parser(target, predict):
target_unlabeled = set((d.id,d.head) for d in target.deprels.values())
target_labeled = set((d.id,d.head,d.deprel) for d in target.deprels.values())
predict_unlabeled = set((d.id,d.head) for d in predict.deprels.values())
predict_labeled = set((d.id,d.head,d.deprel) for d in predict.deprels.values())
labeled_correct = len(predict_labeled.intersection(target_labeled))
unlabeled_correct = len(predict_unlabeled.intersection(target_unlabeled))
num_words = len(predict_labeled)
return labeled_correct, unlabeled_correct, num_words
if __name__ == "__main__":
WORD_VOCAB_FILE = 'data/words.vocab'
POS_VOCAB_FILE = 'data/pos.vocab'
try:
word_vocab_f = open(WORD_VOCAB_FILE,'r')
pos_vocab_f = open(POS_VOCAB_FILE,'r')
except FileNotFoundError:
print("Could not find vocabulary files {} and {}".format(WORD_VOCAB_FILE, POS_VOCAB_FILE))
sys.exit(1)
extractor = FeatureExtractor(word_vocab_f, pos_vocab_f)
parser = Parser(extractor, sys.argv[1])
total_labeled_correct = 0
total_unlabeled_correct = 0
total_words = 0
las_list = []
uas_list = []
count = 0
with open(sys.argv[2],'r') as in_file:
print("Evaluating. (Each . represents 100 test dependency trees)")
for dtree in conll_reader(in_file):
words = dtree.words()
pos = dtree.pos()
predict = parser.parse_sentence(words, pos)
labeled_correct, unlabeled_correct, num_words = compare_parser(dtree, predict)
las_s = labeled_correct / float(num_words)
uas_s = unlabeled_correct / float(num_words)
las_list.append(las_s)
uas_list.append(uas_s)
total_labeled_correct += labeled_correct
total_unlabeled_correct += unlabeled_correct
total_words += num_words
count +=1
if count % 100 == 0:
print(".",end="")
sys.stdout.flush()
print()
las_micro = total_labeled_correct / float(total_words)
uas_micro = total_unlabeled_correct / float(total_words)
las_macro = sum(las_list) / len(las_list)
uas_macro = sum(uas_list) / len(uas_list)
print("{} sentence.\n".format(len(las_list)))
print("Micro Avg. Labeled Attachment Score: {}".format(las_micro))
print("Micro Avg. Unlabeled Attachment Score: {}\n".format(uas_micro))
print("Macro Avg. Labeled Attachment Score: {}".format(las_macro))
print("Macro Avg. Unlabeled Attachment Score: {}".format(uas_macro))
|
py
|
1a5d7ad2a73d8646da049a4752418272fd9dde9f
|
from .analyzer import JSHintAnalyzer
from .issues_data import issues_data
analyzers = {
'jshint' :
{
'title' : 'JSHint',
'class' : JSHintAnalyzer,
'language' : 'javascript',
'issues_data' : issues_data,
},
}
|
py
|
1a5d7ad514b2d272e4abd3957dbec46e0db4c47d
|
from urllib.parse import urlparse
from django.contrib.sites.models import Site
from cms.api import add_plugin
from djangocms_url_manager.compat import get_page_placeholders
from .base import BaseUrlTestCase
class UrlManagerModelsTestCase(BaseUrlTestCase):
def test_get_absolute_url_page(self):
url = self._create_url(content_object=self.page)
parsed = urlparse(url.get_absolute_url())
self.assertEqual(parsed.netloc, "example.com")
self.assertEqual(parsed.path, "/en/test/")
def test_str_page(self):
url = self._create_url(content_object=self.page)
parsed = urlparse(str(url))
self.assertEqual(parsed.netloc, "example.com")
self.assertEqual(parsed.path, "/en/test/")
def test_get_absolute_url_manual_url(self):
url = self._create_url(manual_url="https://google.com")
self.assertEqual(url.get_absolute_url(), "https://google.com")
def test_str_manual_url(self):
url = self._create_url(manual_url="https://google.com")
self.assertEqual(str(url), "https://google.com")
def test_get_absolute_url_phone(self):
url = self._create_url(phone="555555555")
self.assertEqual(url.get_absolute_url(), "tel:555555555")
def test_str_phone(self):
url = self._create_url(phone="555555555")
self.assertEqual(str(url), "tel:555555555")
def test_get_absolute_url_mailto(self):
url = self._create_url(mailto="[email protected]")
self.assertEqual(url.get_absolute_url(), "mailto:[email protected]")
def test_str_mailto(self):
url = self._create_url(mailto="[email protected]")
self.assertEqual(str(url), "mailto:[email protected]")
def test_get_absolute_url_anchor(self):
url = self._create_url(anchor="foo")
self.assertEqual(url.get_absolute_url(), "#foo")
def test_str_anchor(self):
url = self._create_url(anchor="foo")
self.assertEqual(str(url), "#foo")
def test_url_str(self):
self.assertEqual(str(self.url), "//example.com/en/test/")
def test_urlplugin_str(self):
placeholder = get_page_placeholders(self.page, self.language).get(
slot="content"
)
plugin = add_plugin(
placeholder,
"HtmlLink",
language=self.language,
url=self.url,
label="Test URL plugin",
)
self.assertEqual(str(plugin), plugin.label)
class GetUrlTestCase(BaseUrlTestCase):
def _compare_page_get_url_result(self, url):
parsed = urlparse(url.get_url(url.site))
self.assertEqual(parsed.netloc, "example.com")
self.assertEqual(parsed.path, "/en/test/")
def test_get_url_obj(self):
self.assertEqual(self.url._get_url_obj(self.url.site), self.url)
def test_get_url_obj_other_site(self):
urloverride = self._create_url_override(self.url, self.site2, self.page2)
self.assertEqual(self.url._get_url_obj(self.site2), urloverride)
def test_get_url_obj_other_site_with_no_override(self):
site3 = Site.objects.create(name="bar.com", domain="bar.com")
self.assertEqual(self.url._get_url_obj(site3), self.url)
def test_get_url_page(self):
url = self._create_url(content_object=self.page)
parsed = urlparse(url.get_url(url.site))
self.assertEqual(parsed.netloc, "example.com")
self.assertEqual(parsed.path, "/en/test/")
def test_get_url_manual_url(self):
url = self._create_url(manual_url="https://google.com")
self.assertEqual(url.get_url(url.site), "https://google.com")
def test_get_url_relative_path(self):
url = self._create_url(manual_url="/some/random/path")
self.assertEqual(url.get_url(url.site), "/some/random/path")
def test_get_url_phone(self):
url = self._create_url(phone="555555555")
self.assertEqual(url.get_url(url.site), "tel:555555555")
def test_get_url_mailto(self):
url = self._create_url(mailto="[email protected]")
self.assertEqual(url.get_url(url.site), "mailto:[email protected]")
def test_get_url_no_data(self):
url = self._create_url()
self.assertEqual(url.get_url(url.site), "")
def test_get_url_anchor(self):
url = self._create_url(anchor="foo")
self.assertEqual(url.get_url(url.site), "#foo")
def test_get_url_page_combined_with_anchor(self):
url = self._create_url(content_object=self.page, anchor="foo")
parsed = urlparse(url.get_url(url.site))
self.assertEqual(parsed.netloc, "example.com")
self.assertEqual(parsed.path, "/en/test/")
self.assertEqual(parsed.fragment, "foo")
def test_get_url_phone_not_combined_with_anchor(self):
url = self._create_url(phone="555555555", anchor="foo")
self.assertEqual(url.get_url(url.site), "tel:555555555")
def test_get_url_phone_shadows_mailto(self):
url = self._create_url(
phone="555555555", mailto="[email protected]", anchor="foo"
)
self.assertEqual(url.get_url(url.site), "tel:555555555")
def test_get_url_manual_url_shadows_phone_and_mailto(self):
url = self._create_url(
manual_url="https://google.com", phone="555555555", anchor="foo"
)
self.assertEqual(url.get_url(url.site), "https://google.com")
def test_get_url_relative_path_shadows_phone_and_mailto(self):
url = self._create_url(
relative_path="/some/random/path", phone="555555555", anchor="foo"
)
self.assertEqual(url.get_url(url.site), "/some/random/path")
def test_get_url_page_shadows_manual_url_phone_and_mailto(self):
url = self._create_url(
content_object=self.page, manual_url="https://google.com", anchor="foo"
)
parsed = urlparse(url.get_url(url.site))
self.assertEqual(parsed.netloc, "example.com")
self.assertEqual(parsed.path, "/en/test/")
self.assertEqual(parsed.fragment, "foo")
|
py
|
1a5d7b661653c7b4b68c09f37640cd8d363c4260
|
# File: A (Python 2.4)
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
class AwardMaker(DistributedObjectGlobal):
pass
|
py
|
1a5d7cdb76e9f90afc31545465e87f20e9fa7fa1
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import numpy as np
import logging
import pandas as pd
def get_summary_fn(key):
summary_fn_map = {
"count": get_count,
"distance": get_distance,
"duration": get_duration,
"median_speed": get_median_speed,
"mean_speed": get_mean_speed
}
return summary_fn_map[key]
def get_count(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
ret_dict[mode] = len(mode_section_df)
return ret_dict
def get_distance(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
ret_dict[mode] = float(mode_section_df.distance.sum())
return ret_dict
def get_duration(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
ret_dict[mode] = float(mode_section_df.duration.sum())
return ret_dict
# Redirect from median to mean for backwards compatibility
# TODO: Remove in Dec 2022
def get_median_speed(mode_section_grouped_df):
return get_mean_speed(mode_section_grouped_df)
def get_mean_speed(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
# mean_speeds is a series with one row per section/trip where the
# value is the mean speed (distance/duration) for that section/trip
mean_speeds = mode_section_df.distance / mode_section_df.duration
mode_mean = mean_speeds.dropna().mean()
if np.isnan(mode_mean):
logging.debug("still found nan for mode %s, skipping")
else:
ret_dict[mode] = float(mode_mean)
return ret_dict
|
py
|
1a5d7cef4a4357dbee8fb80dadfd7cc30b929233
|
import os
from pint import UnitRegistry
# load up the registry to be used everywhere
ureg = UnitRegistry()
# add currency as a type of unit since it is not part of the default
ureg.define('usd = [currency]')
Q_ = ureg.Quantity
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CREDIT = "CREDIT"
CHECKING = "CHECKING"
VALID_ACCT_TYPES = [CREDIT, CHECKING]
DEBT_RATIO = "DEBT/CREDIT RATIO HIGH"
OVERDRAFT = "CHECKING ACCOUNT OVERDRAFT"
OVERCREDIT = "BALANCE HIGHER THAN CREDIT LIMIT"
ISSUE_NOTES = {DEBT_RATIO: "FICO recommends keeping a debt to limit ratio of under 25%",
OVERDRAFT: "Spent more money than you have in this checking account",
OVERCREDIT: "Credit card balance exceeds your limit."}
FOREVER_RECURRING = "recurring payment like forever"
|
py
|
1a5d7d384fe0bad9e2c8babfcf2fd7481da4d345
|
import logging
from abc import ABC
from abc import abstractmethod
from typing import Tuple
import transformers
import tensorflow as tf
from tensorflow.keras import layers
logger = logging.getLogger('absa.model')
class ABSClassifier(tf.keras.Model, ABC):
"""
The model's aim is to classify the sentiment. The model contains the
fine-tuned language model, which holds most parameters. The classifier
itself is a tiny linear layer on top of a language model.
We use the BERT language model, because we can benefit from the BERT's
next-sentence prediction and formulate the task as the sequence-pair
classification. Each example is described as one sequence in the format:
"[CLS] text subtokens [SEP] aspect subtokens [SEP]". The relation between
the text and aspect is encoded into the CLS token. The classifier just
makes a linear transformation of the final special CLS token representation.
The pipeline applies the softmax to get distribution over sentiment classes.
Note how to train a model. We start with the original BERT version as a
basis, and we divide the training into two stages. Firstly, due to the
fact that the BERT is pretrained on dry Wikipedia texts, we wish to bias
language model towards more informal language or a specific domain. To do
so, we select texts close to the target domain and do the self-supervised
**language model** post-training. The routine is the same as for the
pre-training, but we need carefully set up optimization parameters.
Secondly, we do regular supervised training. We train the whole model
using a labeled dataset to classify a sentiment.
Please note that the package contains the submodule `absa.training`. You
can find there complete routines to tune or train either the language
model or the classifier. Check out examples on the package website.
References:
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
[Utilizing BERT for Aspect-Based Sentiment Analysis via Constructing
Auxiliary Sentence](http://arxiv.org/abs/1903.09588)
[BERT Post-Training for Review Reading Comprehension and Aspect-based
Sentiment Analysis](http://arxiv.org/abs/1904.02232)
[Adapt or Get Left Behind: Domain Adaptation through BERT Language
Model Finetuning for Aspect-Target Sentiment Classification]
(http://arxiv.org/abs/1908.11860)
"""
@abstractmethod
def call(
self,
token_ids: tf.Tensor,
attention_mask: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
training: bool = False,
**bert_kwargs
) -> Tuple[tf.Tensor, Tuple[tf.Tensor, ...], Tuple[tf.Tensor, ...]]:
"""
Perform the sentiment classification. We formulate the task as the
sequence-pair classification. Each example is described as one
sequence in the format:
"[CLS] text subtokens [SEP] aspect subtokens [SEP]".
Parameters
----------
token_ids
Indices of input sequence subtokens in the vocabulary.
attention_mask
Bool mask used to avoid performing attention on padding token
indices in a batch (this is not related with masks from the
language modeling task).
token_type_ids
Segment token indices to indicate first and second portions
of the inputs, zeros and ones.
training
Whether to activate a dropout (True) during training or
to de-activate them (False) for evaluation.
bert_kwargs
Auxiliary parameters which we forward directly to
the **transformers** language model implementation.
Returns
-------
logits
The classifier final outputs.
hidden_states
Tuple of tensors: one for the output of the embeddings and one
for the output of each layer.
attentions
Tuple of tensors: Attentions weights after the attention softmax,
used to compute the weighted average in the self-attention heads.
"""
def force_to_return_details(kwargs: dict):
""" Force a model to output attentions and hidden states due to the fixed
definition of the output batch (the well-defined interface). """
condition = not kwargs.get('output_attentions', False) or \
not kwargs.get('output_hidden_states', False)
if condition:
logger.info('Model should output attentions and hidden states.')
kwargs['output_attentions'] = True
kwargs['output_hidden_states'] = True
class BertABSCConfig(transformers.BertConfig):
def __init__(self, num_polarities: int = 3, **kwargs):
force_to_return_details(kwargs)
super().__init__(**kwargs)
self.num_polarities = num_polarities
class BertABSClassifier(ABSClassifier, transformers.TFBertPreTrainedModel):
def __init__(self, config: BertABSCConfig, **kwargs):
super().__init__(config, **kwargs)
self.bert = transformers.TFBertMainLayer(
config, name="bert")
initializer = transformers.modeling_tf_utils.get_initializer(
config.initializer_range)
self.dropout = layers.Dropout(config.hidden_dropout_prob)
self.classifier = layers.Dense(
config.num_polarities,
kernel_initializer=initializer,
name='classifier'
)
def call(
self,
token_ids: tf.Tensor,
attention_mask: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
training: bool = False,
**bert_kwargs
) -> Tuple[tf.Tensor, Tuple[tf.Tensor, ...], Tuple[tf.Tensor, ...]]:
outputs = self.bert(
inputs=token_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
training=training,
**bert_kwargs
)
sequence_output, pooled_output, hidden_states, attentions = outputs
pooled_output = self.dropout(pooled_output, training=training)
logits = self.classifier(pooled_output)
return logits, hidden_states, attentions
|
py
|
1a5d7d94d77146d00c1e1b90c4bb89fd0c512df5
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import hashlib
import logging
import os
import subprocess
from dataclasses import dataclass
from enum import Enum
from textwrap import dedent
from typing import Iterable, Sequence
from pants.core.subsystems import python_bootstrap
from pants.core.subsystems.python_bootstrap import PythonBootstrap
from pants.engine.collection import DeduplicatedCollection
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.fs import CreateDigest, FileContent
from pants.engine.internals.native_engine import Digest
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import OrderedSet
from pants.util.strutil import create_path_env_var, pluralize
logger = logging.getLogger(__name__)
# -------------------------------------------------------------------------------------------
# `BinaryPath` types
# -------------------------------------------------------------------------------------------
# TODO(#14492): This should be configurable via `[system-binaries]` subsystem, likely per-binary.
SEARCH_PATHS = ("/usr/bin", "/bin", "/usr/local/bin")
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPath:
path: str
fingerprint: str
def __init__(self, path: str, fingerprint: str | None = None) -> None:
self.path = path
self.fingerprint = self._fingerprint() if fingerprint is None else fingerprint
@staticmethod
def _fingerprint(content: bytes | bytearray | memoryview | None = None) -> str:
hasher = hashlib.sha256() if content is None else hashlib.sha256(content)
return hasher.hexdigest()
@classmethod
def fingerprinted(
cls, path: str, representative_content: bytes | bytearray | memoryview
) -> BinaryPath:
return cls(path, fingerprint=cls._fingerprint(representative_content))
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPathTest:
args: tuple[str, ...]
fingerprint_stdout: bool
def __init__(self, args: Iterable[str], fingerprint_stdout: bool = True) -> None:
self.args = tuple(args)
self.fingerprint_stdout = fingerprint_stdout
class SearchPath(DeduplicatedCollection[str]):
"""The search path for binaries; i.e.: the $PATH."""
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPathRequest:
"""Request to find a binary of a given name.
If `check_file_entries` is `True` a BinaryPathRequest will consider any entries in the
`search_path` that are file paths in addition to traditional directory paths.
If a `test` is specified all binaries that are found will be executed with the test args and
only those binaries whose test executions exit with return code 0 will be retained.
Additionally, if test execution includes stdout content, that will be used to fingerprint the
binary path so that upgrades and downgrades can be detected. A reasonable test for many programs
might be `BinaryPathTest(args=["--version"])` since it will both ensure the program runs and
also produce stdout text that changes upon upgrade or downgrade of the binary at the discovered
path.
"""
search_path: SearchPath
binary_name: str
check_file_entries: bool
test: BinaryPathTest | None
def __init__(
self,
*,
search_path: Iterable[str],
binary_name: str,
check_file_entries: bool = False,
test: BinaryPathTest | None = None,
) -> None:
self.search_path = SearchPath(search_path)
self.binary_name = binary_name
self.check_file_entries = check_file_entries
self.test = test
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPaths(EngineAwareReturnType):
binary_name: str
paths: tuple[BinaryPath, ...]
def __init__(self, binary_name: str, paths: Iterable[BinaryPath] | None = None):
self.binary_name = binary_name
self.paths = tuple(OrderedSet(paths) if paths else ())
def message(self) -> str:
if not self.paths:
return f"failed to find {self.binary_name}"
found_msg = f"found {self.binary_name} at {self.paths[0]}"
if len(self.paths) > 1:
found_msg = f"{found_msg} and {pluralize(len(self.paths) - 1, 'other location')}"
return found_msg
@property
def first_path(self) -> BinaryPath | None:
"""Return the first path to the binary that was discovered, if any."""
return next(iter(self.paths), None)
def first_path_or_raise(self, request: BinaryPathRequest, *, rationale: str) -> BinaryPath:
"""Return the first path to the binary that was discovered, if any."""
first_path = self.first_path
if not first_path:
raise BinaryNotFoundError.from_request(request, rationale=rationale)
return first_path
class BinaryNotFoundError(EnvironmentError):
@classmethod
def from_request(
cls,
request: BinaryPathRequest,
*,
rationale: str | None = None,
alternative_solution: str | None = None,
) -> BinaryNotFoundError:
"""When no binary is found via `BinaryPaths`, and it is not recoverable.
:param rationale: A short description of why this binary is needed, e.g.
"download the tools Pants needs" or "run Python programs".
:param alternative_solution: A description of what else users can do to fix the issue,
beyond installing the program. For example, "Alternatively, you can set the option
`--python-bootstrap-search-path` to change the paths searched."
"""
msg = (
f"Cannot find `{request.binary_name}` on `{sorted(request.search_path)}`. Please "
"ensure that it is installed"
)
msg += f" so that Pants can {rationale}." if rationale else "."
if alternative_solution:
msg += f"\n\n{alternative_solution}"
return BinaryNotFoundError(msg)
# -------------------------------------------------------------------------------------------
# Binary shims
# Creates a Digest with a shim for each requested binary in a directory suitable for PATH.
# -------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class BinaryShimsRequest:
"""Request to create shims for one or more system binaries."""
output_directory: str
rationale: str = dataclasses.field(compare=False)
# Create shims for provided binary paths
paths: tuple[BinaryPath, ...] = tuple()
# Create shims for the provided binary names after looking up the paths.
requests: tuple[BinaryPathRequest, ...] = tuple()
@classmethod
def for_binaries(
cls, *names: str, rationale: str, output_directory: str, search_path: Sequence[str]
) -> BinaryShimsRequest:
return cls(
requests=tuple(
BinaryPathRequest(binary_name=binary_name, search_path=search_path)
for binary_name in names
),
rationale=rationale,
output_directory=output_directory,
)
@classmethod
def for_paths(
cls, *paths: BinaryPath, rationale: str, output_directory: str
) -> BinaryShimsRequest:
return cls(paths=paths, rationale=rationale, output_directory=output_directory)
@dataclass(frozen=True)
class BinaryShims:
"""The shims created for a BinaryShimsRequest is placed in `bin_directory` of the `digest`.
The purpose of these shims is so that a Process may be executed with `bin_directory` added to
PATH so that the binaries are available for execution.
The alternative is to add the directories hosting the binaries directly, but that opens up for
many more unrelated binaries to also be executable from PATH, leaking into the sandbox
unnecessarily.
"""
bin_directory: str
digest: Digest
# -------------------------------------------------------------------------------------------
# Binaries
# -------------------------------------------------------------------------------------------
class BashBinary(BinaryPath):
"""The `bash` binary."""
DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
@dataclass(frozen=True)
class BashBinaryRequest:
search_path: SearchPath = BashBinary.DEFAULT_SEARCH_PATH
class PythonBinary(BinaryPath):
"""A Python3 interpreter for use by `@rule` code as an alternative to BashBinary scripts.
Python is usable for `@rule` scripting independently of `pants.backend.python`, but currently
thirdparty dependencies are not supported, because PEX lives in that backend.
TODO: Consider extracting PEX out into the core in order to support thirdparty dependencies.
"""
# Note that updating this will impact the `archive` target defined in `core/target_types.py`.
class ArchiveFormat(Enum):
TAR = "tar"
TGZ = "tar.gz"
TBZ2 = "tar.bz2"
TXZ = "tar.xz"
ZIP = "zip"
class ZipBinary(BinaryPath):
def create_archive_argv(
self, output_filename: str, input_files: Sequence[str]
) -> tuple[str, ...]:
return (self.path, output_filename, *input_files)
class UnzipBinary(BinaryPath):
def extract_archive_argv(self, archive_path: str, extract_path: str) -> tuple[str, ...]:
# Note that the `output_dir` does not need to already exist.
# The caller should validate that it's a valid `.zip` file.
return (self.path, archive_path, "-d", extract_path)
@dataclass(frozen=True)
class GunzipBinary:
python: PythonBinary
def extract_archive_argv(self, archive_path: str, extract_path: str) -> tuple[str, ...]:
archive_name = os.path.basename(archive_path)
dest_file_name = os.path.splitext(archive_name)[0]
dest_path = os.path.join(extract_path, dest_file_name)
script = dedent(
f"""
import gzip
import shutil
with gzip.GzipFile(filename={archive_path!r}, mode="rb") as source:
with open({dest_path!r}, "wb") as dest:
shutil.copyfileobj(source, dest)
"""
)
return (self.python.path, "-c", script)
class TarBinary(BinaryPath):
def create_archive_argv(
self, output_filename: str, input_files: Sequence[str], tar_format: ArchiveFormat
) -> tuple[str, ...]:
# Note that the parent directory for the output_filename must already exist.
#
# We do not use `-a` (auto-set compression) because it does not work with older tar
# versions. Not all tar implementations will support these compression formats - in that
# case, the user will need to choose a different format.
compression = {ArchiveFormat.TGZ: "z", ArchiveFormat.TBZ2: "j", ArchiveFormat.TXZ: "J"}.get(
tar_format, ""
)
return (self.path, f"c{compression}f", output_filename, *input_files)
def extract_archive_argv(
self, archive_path: str, extract_path: str, *, archive_suffix: str
) -> tuple[str, ...]:
# Note that the `output_dir` must already exist.
# The caller should validate that it's a valid `.tar` file.
prog_args = ("-Ilz4",) if archive_suffix == ".tar.lz4" else ()
return (self.path, *prog_args, "-xf", archive_path, "-C", extract_path)
class MkdirBinary(BinaryPath):
pass
class ChmodBinary(BinaryPath):
pass
class DiffBinary(BinaryPath):
pass
class ReadlinkBinary(BinaryPath):
pass
class GitBinaryException(Exception):
pass
class GitBinary(BinaryPath):
def _invoke_unsandboxed(self, cmd: list[str]) -> str:
"""Invoke the given git command, _without_ the sandboxing provided by the `Process` API.
This API is for internal use only: users should prefer to consume methods of the
`GitWorktree` class.
"""
cmd = [self.path, *cmd]
self._log_call(cmd)
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
cmd_str = " ".join(cmd)
raise GitBinaryException(f"Failed to execute command {cmd_str}: {e!r}")
out, err = process.communicate()
self._check_result(cmd, process.returncode, err.decode())
return out.decode().strip()
def _check_result(
self, cmd: Iterable[str], result: int, failure_msg: str | None = None
) -> None:
if result != 0:
cmd_str = " ".join(cmd)
raise GitBinaryException(failure_msg or f"{cmd_str} failed with exit code {result}")
def _log_call(self, cmd: Iterable[str]) -> None:
logger.debug("Executing: " + " ".join(cmd))
# -------------------------------------------------------------------------------------------
# Binaries Rules
# -------------------------------------------------------------------------------------------
@rule
async def create_binary_shims(
binary_shims_request: BinaryShimsRequest,
bash: BashBinary,
mkdir: MkdirBinary,
chmod: ChmodBinary,
) -> BinaryShims:
"""Creates a bin directory with shims for all requested binaries.
Useful as input digest for a Process to setup a `bin` directory for PATH.
"""
paths = binary_shims_request.paths
requests = binary_shims_request.requests
if requests:
all_binary_paths = await MultiGet(
Get(BinaryPaths, BinaryPathRequest, request) for request in requests
)
first_paths = tuple(
binary_paths.first_path_or_raise(request, rationale=binary_shims_request.rationale)
for binary_paths, request in zip(all_binary_paths, requests)
)
paths += first_paths
all_paths = (binary.path for binary in paths)
bin_relpath = binary_shims_request.output_directory
script = ";".join(
(
f"{mkdir.path} -p {bin_relpath}",
*(
" && ".join(
[
(
# The `printf` cmd is a bash builtin, so always available.
f"printf '{_create_shim(bash.path, binary_path)}'"
f" > '{bin_relpath}/{os.path.basename(binary_path)}'"
),
f"{chmod.path} +x '{bin_relpath}/{os.path.basename(binary_path)}'",
]
)
for binary_path in all_paths
),
)
)
result = await Get(
ProcessResult,
Process(
argv=(bash.path, "-c", script),
description=f"Setup binary shims so that Pants can {binary_shims_request.rationale}.",
output_directories=(bin_relpath,),
level=LogLevel.DEBUG,
),
)
return BinaryShims(bin_relpath, result.output_digest)
def _create_shim(bash: str, binary: str) -> str:
"""The binary shim script to be placed in the output directory for the digest."""
return dedent(
f"""\
#!{bash}
exec "{binary}" "$@"
"""
)
@rule(desc="Finding the `bash` binary", level=LogLevel.DEBUG)
async def find_bash(bash_request: BashBinaryRequest) -> BashBinary:
request = BinaryPathRequest(
binary_name="bash",
search_path=bash_request.search_path,
test=BinaryPathTest(args=["--version"]),
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path
if not first_path:
raise BinaryNotFoundError.from_request(request)
return BashBinary(first_path.path, first_path.fingerprint)
@rule
async def get_bash() -> BashBinary:
# Expose bash to external consumers.
return await Get(BashBinary, BashBinaryRequest())
@rule
async def find_binary(request: BinaryPathRequest) -> BinaryPaths:
# If we are not already locating bash, recurse to locate bash to use it as an absolute path in
# our shebang. This avoids mixing locations that we would search for bash into the search paths
# of the request we are servicing.
# TODO(#10769): Replace this script with a statically linked native binary so we don't
# depend on either /bin/bash being available on the Process host.
if request.binary_name == "bash":
shebang = "#!/usr/bin/env bash"
else:
bash = await Get(BashBinary, BashBinaryRequest())
shebang = f"#!{bash.path}"
script_path = "./find_binary.sh"
script_header = dedent(
f"""\
{shebang}
set -euox pipefail
CHECK_FILE_ENTRIES={'1' if request.check_file_entries else ''}
"""
)
script_body = dedent(
"""\
for path in ${PATH//:/ }; do
if [[ -d "${path}" ]]; then
# Handle traditional directory PATH element.
maybe_exe="${path}/$1"
elif [[ -n "${CHECK_FILE_ENTRIES}" ]]; then
# Handle PATH elements that are filenames to allow for precise selection.
maybe_exe="${path}"
else
maybe_exe=
fi
if [[ "$1" == "${maybe_exe##*/}" && -f "${maybe_exe}" && -x "${maybe_exe}" ]]
then
echo "${maybe_exe}"
fi
done
"""
)
script_content = script_header + script_body
script_digest = await Get(
Digest,
CreateDigest([FileContent(script_path, script_content.encode(), is_executable=True)]),
)
# Some subtle notes about executing this script:
#
# - We run the script with `ProcessResult` instead of `FallibleProcessResult` so that we
# can catch bugs in the script itself, given an earlier silent failure.
# - We set `ProcessCacheScope.PER_RESTART_SUCCESSFUL` to force re-run since any binary found
# on the host system today could be gone tomorrow. Ideally we'd only do this for local
# processes since all known remoting configurations include a static container image as
# part of their cache key which automatically avoids this problem. See #10769 for a
# solution that is less of a tradeoff.
search_path = create_path_env_var(request.search_path)
result = await Get(
ProcessResult,
Process(
description=f"Searching for `{request.binary_name}` on PATH={search_path}",
level=LogLevel.DEBUG,
input_digest=script_digest,
argv=[script_path, request.binary_name],
env={"PATH": search_path},
cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
),
)
binary_paths = BinaryPaths(binary_name=request.binary_name)
found_paths = result.stdout.decode().splitlines()
if not request.test:
return dataclasses.replace(binary_paths, paths=[BinaryPath(path) for path in found_paths])
results = await MultiGet(
Get(
FallibleProcessResult,
Process(
description=f"Test binary {path}.",
level=LogLevel.DEBUG,
argv=[path, *request.test.args],
# NB: Since a failure is a valid result for this script, we always cache it for
# `pantsd`'s lifetime, regardless of success or failure.
cache_scope=ProcessCacheScope.PER_RESTART_ALWAYS,
),
)
for path in found_paths
)
return dataclasses.replace(
binary_paths,
paths=[
(
BinaryPath.fingerprinted(path, result.stdout)
if request.test.fingerprint_stdout
else BinaryPath(path, result.stdout.decode())
)
for path, result in zip(found_paths, results)
if result.exit_code == 0
],
)
@rule(desc="Finding a `python` binary", level=LogLevel.TRACE)
async def find_python(python_bootstrap: PythonBootstrap) -> PythonBinary:
# PEX files are compatible with bootstrapping via Python 2.7 or Python 3.5+, but we select 3.6+
# for maximum compatibility with internal scripts.
interpreter_search_paths = python_bootstrap.interpreter_search_paths()
all_python_binary_paths = await MultiGet(
Get(
BinaryPaths,
BinaryPathRequest(
search_path=interpreter_search_paths,
binary_name=binary_name,
check_file_entries=True,
test=BinaryPathTest(
args=[
"-c",
# N.B.: The following code snippet must be compatible with Python 3.6+.
#
# We hash the underlying Python interpreter executable to ensure we detect
# changes in the real interpreter that might otherwise be masked by Pyenv
# shim scripts found on the search path. Naively, just printing out the full
# version_info would be enough, but that does not account for supported abi
# changes (e.g.: a pyenv switch from a py27mu interpreter to a py27m
# interpreter.)
#
# When hashing, we pick 8192 for efficiency of reads and fingerprint updates
# (writes) since it's a common OS buffer size and an even multiple of the
# hash block size.
dedent(
"""\
import sys
major, minor = sys.version_info[:2]
if not (major == 3 and minor >= 6):
sys.exit(1)
import hashlib
hasher = hashlib.sha256()
with open(sys.executable, "rb") as fp:
for chunk in iter(lambda: fp.read(8192), b""):
hasher.update(chunk)
sys.stdout.write(hasher.hexdigest())
"""
),
],
fingerprint_stdout=False, # We already emit a usable fingerprint to stdout.
),
),
)
for binary_name in python_bootstrap.interpreter_names
)
for binary_paths in all_python_binary_paths:
path = binary_paths.first_path
if path:
return PythonBinary(
path=path.path,
fingerprint=path.fingerprint,
)
raise BinaryNotFoundError(
"Was not able to locate a Python interpreter to execute rule code.\n"
"Please ensure that Python is available in one of the locations identified by "
"`[python-bootstrap] search_path`, which currently expands to:\n"
f" {interpreter_search_paths}"
)
@rule(desc="Finding the `zip` binary", level=LogLevel.DEBUG)
async def find_zip() -> ZipBinary:
request = BinaryPathRequest(
binary_name="zip", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["-v"])
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="create `.zip` archives")
return ZipBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `unzip` binary", level=LogLevel.DEBUG)
async def find_unzip() -> UnzipBinary:
request = BinaryPathRequest(
binary_name="unzip", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["-v"])
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="download the tools Pants needs to run"
)
return UnzipBinary(first_path.path, first_path.fingerprint)
@rule
def find_gunzip(python: PythonBinary) -> GunzipBinary:
return GunzipBinary(python)
@rule(desc="Finding the `tar` binary", level=LogLevel.DEBUG)
async def find_tar() -> TarBinary:
request = BinaryPathRequest(
binary_name="tar", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["--version"])
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="download the tools Pants needs to run"
)
return TarBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `mkdir` binary", level=LogLevel.DEBUG)
async def find_mkdir() -> MkdirBinary:
request = BinaryPathRequest(binary_name="mkdir", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="create directories")
return MkdirBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `chmod` binary", level=LogLevel.DEBUG)
async def find_chmod() -> ChmodBinary:
request = BinaryPathRequest(binary_name="chmod", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="change file modes or Access Control Lists"
)
return ChmodBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `diff` binary", level=LogLevel.DEBUG)
async def find_diff() -> DiffBinary:
request = BinaryPathRequest(binary_name="diff", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="compare files line by line")
return DiffBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `readlink` binary", level=LogLevel.DEBUG)
async def find_readlink() -> ReadlinkBinary:
request = BinaryPathRequest(binary_name="readlink", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="defererence symlinks")
return ReadlinkBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `git` binary", level=LogLevel.DEBUG)
async def find_git() -> GitBinary:
request = BinaryPathRequest(binary_name="git", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="track changes to files in your build environment"
)
return GitBinary(first_path.path, first_path.fingerprint)
# -------------------------------------------------------------------------------------------
# Rules for lazy requests
# TODO(#12946): Get rid of this when it becomes possible to use `Get()` with only one arg.
# -------------------------------------------------------------------------------------------
class ZipBinaryRequest:
pass
class UnzipBinaryRequest:
pass
class GunzipBinaryRequest:
pass
class TarBinaryRequest:
pass
class MkdirBinaryRequest:
pass
class ChmodBinaryRequest:
pass
class DiffBinaryRequest:
pass
class ReadlinkBinaryRequest:
pass
class GitBinaryRequest:
pass
@rule
async def find_zip_wrapper(_: ZipBinaryRequest, zip_binary: ZipBinary) -> ZipBinary:
return zip_binary
@rule
async def find_unzip_wrapper(_: UnzipBinaryRequest, unzip_binary: UnzipBinary) -> UnzipBinary:
return unzip_binary
@rule
async def find_gunzip_wrapper(_: GunzipBinaryRequest, gunzip: GunzipBinary) -> GunzipBinary:
return gunzip
@rule
async def find_tar_wrapper(_: TarBinaryRequest, tar_binary: TarBinary) -> TarBinary:
return tar_binary
@rule
async def find_mkdir_wrapper(_: MkdirBinaryRequest, mkdir_binary: MkdirBinary) -> MkdirBinary:
return mkdir_binary
@rule
async def find_readlink_wrapper(
_: ReadlinkBinaryRequest, readlink_binary: ReadlinkBinary
) -> ReadlinkBinary:
return readlink_binary
@rule
async def find_chmod_wrapper(_: ChmodBinaryRequest, chmod_binary: ChmodBinary) -> ChmodBinary:
return chmod_binary
@rule
async def find_diff_wrapper(_: DiffBinaryRequest, diff_binary: DiffBinary) -> DiffBinary:
return diff_binary
@rule
async def find_git_wrapper(_: GitBinaryRequest, git_binary: GitBinary) -> GitBinary:
return git_binary
def rules():
return [*collect_rules(), *python_bootstrap.rules()]
# -------------------------------------------------------------------------------------------
# Rules for fallible binaries
# -------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MaybeGitBinary:
git_binary: GitBinary | None = None
@rule(desc="Finding the `git` binary", level=LogLevel.DEBUG)
async def maybe_find_git() -> MaybeGitBinary:
request = BinaryPathRequest(binary_name="git", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path
if not first_path:
return MaybeGitBinary()
return MaybeGitBinary(GitBinary(first_path.path, first_path.fingerprint))
class MaybeGitBinaryRequest:
pass
@rule
async def maybe_find_git_wrapper(
_: MaybeGitBinaryRequest, maybe_git_binary: MaybeGitBinary
) -> MaybeGitBinary:
return maybe_git_binary
|
py
|
1a5d7d9f85bc01e4397091fce3024bc1ed0282eb
|
# Filename: zbnode.py
# Description: This contains all the values code (and static data structures) act as a zigbee node
# and to communicate with Alertme nodes
from pyalertme.node import Node
import time
import threading
from xbee import ZigBee
import copy
import struct
import pprint
# ZigBee Addressing
BROADCAST_LONG = b'\x00\x00\x00\x00\x00\x00\xff\xff'
BROADCAST_SHORT = b'\xff\xfe'
# ZigBee Profile IDs
PROFILE_ID_ZDP = b'\x00\x00' # ZigBee Device Profile
PROFILE_ID_HA = b'\x01\x04' # HA Device Profile
PROFILE_ID_LL = b'\xc0\x5e' # Light Link Profile
PROFILE_ID_ALERTME = b'\xc2\x16' # AlertMe Private Profile
# ZigBee Endpoints
ENDPOINT_ZDO = b'\x00' # ZigBee Device Objects Endpoint
ENDPOINT_ALERTME = b'\x02' # AlertMe / Iris Endpoint
# ZDP Status
ZDP_STATUS_OK = b'\x00'
ZDP_STATUS_INVALID = b'\x80'
ZDP_STATUS_NOT_FOUND = b'\x81'
# ZDO Clusters
# See:
# http://ftp1.digi.com/support/images/APP_NOTE_XBee_ZigBee_Device_Profile.pdf
# http://www.cel.com/pdf/misc/zic09_zdp_api.pdf
CLUSTER_ID_ZDO_NWK_ADDR_REQ = b'\x00\x00' # Network (16-bit) Address Request
CLUSTER_ID_ZDO_NWK_ADDR_RSP = b'\x80\x00' # Network (16-bit) Address Response
CLUSTER_ID_ZDO_SIMPLE_DESC_REQ = b'\x00\x04' # Simple Descriptor Request
CLUSTER_ID_ZDO_ACTIVE_EP_REQ = b'\x00\x05' # Active Endpoints Request
CLUSTER_ID_ZDO_ACTIVE_EP_RSP = b'\x80\x05' # Active Endpoints Response
CLUSTER_ID_ZDO_MATCH_DESC_REQ = b'\x00\x06' # Match Descriptor Request
CLUSTER_ID_ZDO_MATCH_DESC_RSP = b'\x80\x06' # Match Descriptor Response
CLUSTER_ID_ZDO_END_DEVICE_ANNCE = b'\x00\x13' # End Device Announce
CLUSTER_ID_ZDO_MGMT_RTG_REQ = b'\x00\x32' # Management Routing Request
CLUSTER_ID_ZDO_MGMT_RTG_RSP = b'\x80\x32' # Management Routing Response (seen in outputs as x802 but the '2' char is really 0x32 printed as a char)
CLUSTER_ID_ZDO_MGMT_PERMIT_JOIN_REQ = b'\x00\x36' # Permit Join Request Request
CLUSTER_ID_ZDO_MGMT_NETWORK_UPDATE = b'\x80\x36' # Management Network Update (seen in outputs as x806 but the '6' is really 0x36
# AlertMe Clusters
# See:
# http://www.desert-home.com/2015/06/hacking-into-iris-door-sensor-part-4.html
CLUSTER_ID_AM_SWITCH = b'\x00\xee' # SmartPlug Switch Cluster
CLUSTER_ID_AM_POWER = b'\x00\xef' # Power Information
CLUSTER_ID_AM_STATUS = b'\x00\xf0' # Device Status
CLUSTER_ID_AM_TAMPER = b'\x00\xf2' # Device Tamper Cluster
CLUSTER_ID_AM_BUTTON = b'\x00\xf3' # Keyfob / Button
CLUSTER_ID_AM_DISCOVERY = b'\x00\xf6' # Device Discovery
CLUSTER_ID_AM_SECURITY = b'\x05\x00' # Security
# AlertMe Cluster Commands
# Security IasZoneCluster commands cluster b'\x05\x00' = 1280
CLUSTER_CMD_AM_SEC_STATUS_CHANGE = b'\x00' # Security Event (Sensors)
CLUSTER_CMD_AM_SEC_ENROLL_REQ = b'\x01' #
# AmGeneralCluster commands cluster b'\x00\xf0' = 240
CLUSTER_CMD_AM_SET_RTC_CMD = b'\x00' # SET_RTC_CMD = 0
CLUSTER_CMD_AM_RTC_CMD_REQ = b'\x80' # REQUEST_RTC_CMD = 128
CLUSTER_CMD_AM_LIFESIGN_CMD = b'\xfb' # LIFESIGN_CMD = 251
CLUSTER_CMD_AM_SET_MODE_CMD = b'\xfa' # SET_MODE_CMD = 250
CLUSTER_CMD_AM_STOP_POLLING_CMD = b'\xfd' # STOP_POLLING_CMD = 253
DEVICE_MODE_NORMAL_OPS = 0
DEVICE_MODE_RANGE_TEST = 1
DEVICE_MODE_TEST = 2
DEVICE_MODE_SEEKING = 3
DEVICE_MODE_IDLE = 4
DEVICE_MODE_QUIESCENT = 5
DEVICE_MODE_OPT_NONE = 0
DEVICE_MODE_OPT_SET_HNF = 1
DEVICE_MODE_OPT_CLEAR_HNF = 2
# AmPowerCtrlCluster commands cluster b'\x00\xee' = 238
CLUSTER_CMD_AM_STATE_REQ = b'\x01' # CMD_SET_OPERATING_MODE = 1 # State Request (SmartPlug)
CLUSTER_CMD_AM_STATE_CHANGE = b'\x02' # CMD_SET_RELAY_STATE = 2 # Change State (SmartPlug)
CLUSTER_CMD_AM_STATE_REPORT_REQ = b'\x03' # CMD_REQUEST_REPORT = 3
CLUSTER_CMD_AM_STATE_RESP = b'\x80' # CMD_STATUS_REPORT = 128 # Switch Status Update
# AmPowerMonCluster commands cluster b'\x00\xef = 239
CLUSTER_CMD_AM_PWR_SET_REPT_PARAMS = b'\x00' # CMD_SET_REPT_PARAMS = 0
CLUSTER_CMD_AM_PWR_REQUEST_REPORT = b'\x03' # CMD_REQUEST_REPORT = 3
CLUSTER_CMD_AM_PWR_SET_REPORT_RATE = b'\x04' # CMD_SET_REPORT_RATE= 4
CLUSTER_CMD_AM_PWR_DEMAND = b'\x81' # CMD_POWER_REPORT = 129 # Power Demand Update
CLUSTER_CMD_AM_PWR_CONSUMPTION = b'\x82' # CMD_ENERGY_REPORT = 130 #Power Consumption & Uptime Update
CLUSTER_CMD_AM_PWD_BATCH_POWER_REPORT = b'\x84' # CMD_BATCH_POWER_REPORT = 132
CLUSTER_CMD_AM_PWD_BATCH_ENERGY_REPORT = b'\x85' # CMD_BATCH_ENERGY_REPORT = 133
CLUSTER_CMD_AM_PWD_POWER_ENERGY_REPORT = b'\x86' # CMD_POWER_ENERGY_REPORT = 134
CLUSTER_CMD_AM_PWD_BATCH_POWER_ENERGY_REPORT = b'\x87' # CMD_BATCH_POWER_ENERGY_REPORT = 135
CLUSTER_CMD_AM_PWR_UNKNOWN = b'\x86' # Unknown British Gas Power Meter Update
# AmMaintenanceCluster commands cluster b'\x00\xf6' = 246
CLUSTER_CMD_AM_MAINT_HELLO_REQ = b'\xfc' # HELLO_WORLD_REQ = 252
CLUSTER_CMD_AM_MAINT_HELLO_RESP = b'\xfe' # HELLO_WORLD_RESP = 254
CLUSTER_CMD_AM_MAINT_RANGE_TEST_REQ = b'\xfd' # RANGE_TEST_SEND_CMD = 253
CLUSTER_CMD_AM_MAINT_RANGE_TEST_RESP = b'\xfd' # RANGE_TEST_RECV_CMD = 253
CLUSTER_CMD_AM_MODE_REQ = b'\xfa' # Mode Change Request
CLUSTER_CMD_AM_STATUS = b'\xfb' # Status Update
CLUSTER_CMD_AM_VERSION_REQ = b'\xfc' # Version Information Request
CLUSTER_CMD_AM_RSSI = b'\xfd' # RSSI Range Test Update
CLUSTER_CMD_AM_VERSION_RESP = b'\xfe' # Version Information Response
# At the moment I am not sure what/if the following dictionary will be used?
# It is here to describe the relationship between Cluster ID and Cmd.
# One day this may be used by the parse_message() function and link with the parse_xxxxx() functions?
alertme_cluster_cmds = {
CLUSTER_ID_AM_SWITCH: {
CLUSTER_CMD_AM_STATE_CHANGE: "Relay State Change (SmartPlug)",
CLUSTER_CMD_AM_STATE_RESP: "Switch Status Update"
},
CLUSTER_ID_AM_POWER: {
CLUSTER_CMD_AM_PWR_DEMAND: "Power Demand Update",
CLUSTER_CMD_AM_PWR_CONSUMPTION: "Power Consumption & Uptime Update",
CLUSTER_CMD_AM_PWR_UNKNOWN: "Unknown British Gas Power Meter Update"
},
CLUSTER_ID_AM_STATUS: {
CLUSTER_CMD_AM_MODE_REQ: "Mode Change Request",
CLUSTER_CMD_AM_STATUS: "Status Update"
},
CLUSTER_ID_AM_TAMPER: {},
CLUSTER_ID_AM_BUTTON: {},
CLUSTER_ID_AM_DISCOVERY: {
CLUSTER_CMD_AM_RSSI: "RSSI Range Test Update",
CLUSTER_CMD_AM_VERSION_REQ: "Version Information Request",
CLUSTER_CMD_AM_VERSION_RESP: "Version Information Response"
},
CLUSTER_ID_AM_SECURITY: {
CLUSTER_CMD_AM_SEC_ENROLL_REQ: "Security Command"
}
}
# This messages dict holds the skeleton for the various ZDO and AlertMe messages.
# it is used in conjunction with get_message() to generate the messages.
# Those with a lambda in the data key make use of the generate_xxxx() functions
# to generate the data based on parameters pasded.
messages = {
'version_info_request': {
'name': 'Version Info Request',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_DISCOVERY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_version_info_request(params)
}
},
'version_info_update': {
'name': 'Version Info Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_DISCOVERY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_version_info_update(params)
},
'expected_params': ['hwMajorVersion', 'hwMinorVersion', 'type', 'manu_string', 'manu_date']
},
'range_update': {
'name': 'Range Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_DISCOVERY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_range_update(params)
},
'expected_params': ['rssi']
},
'switch_state_request': {
'name': 'Relay State Request',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_SWITCH,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_switch_state_request(params)
},
'expected_params': ['switch_state']
},
'switch_state_update': {
'name': 'Relay State Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_SWITCH,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_switch_state_update(params)
},
'expected_params': ['switch_state']
},
'mode_change_request': {
'name': 'Mode Change Request',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_STATUS,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_mode_change_request(params)
},
'expected_params': ['mode']
},
'status_update': {
'name': 'Status Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_STATUS,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_status_update(params)
}
},
'power_demand_update': {
'name': 'Power Demand Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_POWER,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_power_demand_update(params)
},
'expected_params': ['power_demand']
},
'power_consumption_update': {
'name': 'Power Consumption Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_POWER,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_power_consumption_update(params)
},
'expected_params': ['power_consumption', 'up_time']
},
'button_press': {
'name': 'Button Press',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_BUTTON,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_button_press(params)
},
'params': ['button_state', 'counter']
},
'security_init': {
'name': 'Security Initialization',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_SECURITY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_security_init(params)
}
},
'active_endpoints_request': {
'name': 'Active Endpoints Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_ACTIVE_EP_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': lambda self, params: self.generate_active_endpoints_request(params)
},
'expected_params': ['zdo_sequence', 'addr_short']
},
'match_descriptor_request': {
'name': 'Match Descriptor Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MATCH_DESC_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': lambda self, params: self.generate_match_descriptor_request(params)
},
'expected_params': ['zdo_sequence', 'addr_short', 'profile_id', 'in_cluster_list', 'out_cluster_list']
},
'match_descriptor_response': {
'name': 'Match Descriptor Response',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MATCH_DESC_RSP,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': lambda self, params: self.generate_match_descriptor_response(params)
},
'expected_params': ['zdo_sequence', 'addr_short', 'endpoint_list']
},
'routing_table_request': {
'name': 'Management Routing Table Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MGMT_RTG_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': b'\x12\x01'
}
},
'permit_join_request': {
'name': 'Management Permit Join Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MGMT_PERMIT_JOIN_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': b'\xff\x00'
}
}
}
class ZBNode(Node):
"""
ZigBee Node object.
"""
def __init__(self, serial, callback=None):
"""
ZigBee Node Constructor.
:param serial: Serial Object
:param callback: Optional
"""
Node.__init__(self, callback)
# Type Info
self.type = 'ZBNode'
self.hwMajorVersion = 123
self.hwMinorVersion = 45
self.manu_string = 'PyAlertMe'
self.manu_date = '2017-01-01'
# Start up Serial and ZigBee
self._serial = serial
self._xbee = ZigBee(ser=self._serial, callback=self.receive_message, error_callback=self.xbee_error, escaped=True)
# My addresses
self.addr_long = None
self.addr_short = None
# Fire off messages to discover own addresses
self._addr_long_list = [b'', b'']
self.read_addresses()
# Scheduler Thread
self._started = True
self._schedule_thread = threading.Thread(target=self._schedule_loop)
self._schedule_interval = 2
self._schedule_thread.start()
self.endpoint_list = [ENDPOINT_ZDO, ENDPOINT_ALERTME]
def _schedule_loop(self):
"""
Continual Updates Thread calls the _updates() function every at
intervals set in self._schedule_interval.
"""
while self._started:
if self.associated:
self._schedule_event()
# The following for loop is being used in place of a simple:
# time.sleep(self._schedule_interval)
# This is done so we can interrupt the thread quicker and
# Unit tests finish faster.
for i in range(self._schedule_interval * 10):
if self._started:
time.sleep(0.1)
def _schedule_event(self):
"""
The _schedule_event() function is called by the _schedule_loop()
thread function called at regular intervals.
Stub, to be overwritten by ZBHub or ZBDevice.
"""
self._logger.debug('[STUB] schedule_event: Continual Update')
def halt(self):
"""
Halt Device.
Close XBee and Serial.
:return:
"""
self._started = False # This should kill the updates thread
self._schedule_thread.join() # Wait for updates thread to finish
self._xbee.halt()
self._serial.close()
def generate_message(self, message_id, params=None):
"""
Generate Message.
:param message_id: Message ID
:return:
"""
self._logger.debug('Generating message %s' % message_id)
if message_id in messages.keys():
# Take a deep copy of the message
message = copy.deepcopy(messages[message_id])
if params:
# If we have manually been provided any params then use these
if 'expected_params' in message.keys():
expected_params = sorted(message['expected_params'])
provided_params = sorted(params.keys())
missing_params = sorted(set(expected_params).difference(set(provided_params)))
# We need to check if there are any missing
if len(missing_params) > 0:
raise Exception("Missing Parameters: %s" % missing_params)
else:
# Otherwise attempt to auto calculate params from the device object
params = {}
if 'expected_params' in message.keys():
for param in message['expected_params']:
params[param] = self.get_attribute(param)
# If 'data' is a lambda, then call it and replace with the return value
data = message['frame']['data']
if callable(message['frame']['data']):
message['frame']['data'] = data(self, params)
# Return processed message
return message['frame']
else:
raise Exception("Message '%s' does not exist" % message_id)
def list_messages(self):
"""
List messages.
:return:
"""
actions = {}
for message_id, message in messages.items():
actions[message_id] = {'name': message['name']}
if 'expected_params' in message.keys():
actions[message_id]['expected_params'] = message['expected_params']
return actions
def xbee_error(self, error):
"""
On XBee error this function is called.
:param error:
:return:
"""
self._logger.critical('XBee Error: %s', error)
def read_addresses(self):
"""
Work out own address.
"""
self._logger.debug('Requesting own addresses')
self._xbee.send('at', command='MY')
time.sleep(0.05)
self._xbee.send('at', command='SH')
time.sleep(0.05)
self._xbee.send('at', command='SL')
time.sleep(0.05)
def send_message(self, message, dest_addr_long, dest_addr_short):
"""
Send message to XBee.
:param message: Dict message
:param dest_addr_long: 48-bits Long Address
:param dest_addr_short: 16-bit Short Address
:return:
"""
# Tack on destination addresses
message['dest_addr_long'] = dest_addr_long
message['dest_addr'] = dest_addr_short
self._logger.debug('Sending Message: %s', message)
self._xbee.send('tx_explicit', **message)
def receive_message(self, message):
"""
Receive message from XBee.
Parse incoming message.
Process parsed result.
:param message: Dict of message
:return:
"""
ret = self.parse_message(message)
if message['id'] == 'rx_explicit':
source_addr_long = message['source_addr_long']
source_addr_short = message['source_addr']
# Send any replies which may need sending
for reply in ret['replies']:
message_id = reply['message_id']
if 'params' in reply.keys():
params = reply['params']
else:
params = {}
reply = self.generate_message(message_id, params)
self.send_message(reply, source_addr_long, source_addr_short)
time.sleep(0.5)
# Update any attributes which may need updating
self.process_message(source_addr_long, source_addr_short, ret['attributes'])
def parse_message(self, message):
"""
Parse ZigBee message. Work out any attribute changes and reply messages.
:param message: Dict of message
:return:
"""
self._logger.debug('Received Message: %s ', message)
try:
nicestring = ' '.join(('%#04x' % ord(c) for c in message['rf_data']))
self._logger.debug('RF_data: %s ', nicestring)
except:
self._logger.debug('no RF_data')
try:
nicestring = ' '.join(('%#04x' % ord(c) for c in message['cluster']))
self._logger.debug('Cluster: %s ', nicestring)
except:
self._logger.debug('Issue with decoding cluster')
attributes = {}
replies = []
# AT Packets
if message['id'] == 'at_response':
if message['command'] == 'MY':
self.addr_short = message['parameter']
if message['command'] == 'SH':
self._addr_long_list[0] = message['parameter']
if message['command'] == 'SL':
self._addr_long_list[1] = message['parameter']
# If we have worked out both the High and Low addresses then calculate the full addr_long
if self._addr_long_list[0] and self._addr_long_list[1]:
self.addr_long = b''.join(self._addr_long_list)
# ZigBee Explicit Packets
if message['id'] == 'rx_explicit':
source_addr_long = message['source_addr_long']
source_addr_short = message['source_addr']
profile_id = message['profile']
cluster_id = message['cluster']
if profile_id == PROFILE_ID_ZDP:
# ZigBee Device Profile ID
self._logger.debug('Received ZigBee Device Profile Packet')
zdo_sequence = message['rf_data'][0:1]
if cluster_id == CLUSTER_ID_ZDO_NWK_ADDR_REQ:
# Network (16-bit) Address Request
self._logger.debug('Received Network (16-bit) Address Request')
elif cluster_id == CLUSTER_ID_ZDO_NWK_ADDR_RSP:
# Network (16-bit) Address Response
self._logger.debug('Received Network (16-bit) Address Response')
elif cluster_id == CLUSTER_ID_ZDO_MGMT_RTG_REQ:
# Management Routing Table Request
self._logger.debug('Received Management Routing Table Request')
elif cluster_id == CLUSTER_ID_ZDO_MGMT_RTG_RSP:
# Management Routing Response
self._logger.debug('Received Management Routing Response')
elif cluster_id == CLUSTER_ID_ZDO_SIMPLE_DESC_REQ:
# Simple Descriptor Request
self._logger.debug('Received Simple Descriptor Request')
elif cluster_id == CLUSTER_ID_ZDO_ACTIVE_EP_REQ: #0x0005
# Active Endpoint Request
self._logger.debug('Received Active Endpoint Request')
elif cluster_id == CLUSTER_ID_ZDO_ACTIVE_EP_RSP: #0x8005
# Active Endpoints Response
# This message tells us what the device can do, but it isn't
# constructed correctly to match what the switch can do according
# to the spec. This is another message that gets it's response
# after we receive the Match Descriptor below.
self._logger.debug('Received Active Endpoint Response')
elif cluster_id == CLUSTER_ID_ZDO_MATCH_DESC_REQ: #0x0006
# Match Descriptor Request
self._logger.debug('Received Match Descriptor Request')
# This is the point where we finally respond to the switch.
# A couple of messages are sent to cause the switch to join with
# the controller at a network level and to cause it to regard
# this controller as valid.
# First send the Match Descriptor Response
params = {
'zdo_sequence': zdo_sequence,
'addr_short': source_addr_short,
'endpoint_list': self.endpoint_list
}
replies.append({'message_id': 'match_descriptor_response', 'params': params})
elif cluster_id == CLUSTER_ID_ZDO_MATCH_DESC_RSP:
# Match Descriptor Response
self._logger.debug('Received Match Descriptor Response')
elif cluster_id == CLUSTER_ID_ZDO_END_DEVICE_ANNCE: #0x0013
# Device Announce Message
self._logger.debug('Received Device Announce Message')
# This will tell me the address of the new thing,
# so we're going to send an Active Endpoint Request.
params = {
'zdo_sequence': zdo_sequence,
'addr_short': source_addr_short
}
replies.append({'message_id': 'active_endpoints_request', 'params': params})
elif cluster_id == CLUSTER_ID_ZDO_MGMT_NETWORK_UPDATE:
# Management Network Update Notify
self._logger.debug('Received Management Network Update Notify')
else:
self._logger.error('Unrecognised Cluster ID: %r', cluster_id)
elif profile_id == PROFILE_ID_ALERTME:
# AlertMe Profile ID
self._logger.debug('Received AlertMe Specific Profile Packet')
cluster_cmd = message['rf_data'][2:3]
if cluster_id == CLUSTER_ID_AM_SWITCH:
if cluster_cmd == CLUSTER_CMD_AM_STATE_REQ:
# Switch State Request
# b'\x11\x00\x01\x01'
self._logger.debug('Received Switch State Request')
replies.append({'message_id': 'switch_state_update'})
elif cluster_cmd == CLUSTER_CMD_AM_STATE_RESP:
self._logger.debug('Received Switch State Update')
attributes = self.parse_switch_state_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_STATE_CHANGE:
# Switch Change State
# b'\x11\x00\x02\x01\x01' On
# b'\x11\x00\x02\x00\x01' Off
self._logger.debug('Received Switch State Change')
attributes = self.parse_switch_state_request(message['rf_data'])
replies.append({'message_id': 'switch_state_update'})
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif cluster_id == CLUSTER_ID_AM_POWER:
if cluster_cmd == CLUSTER_CMD_AM_PWR_DEMAND:
self._logger.debug('Received Power Demand Update')
attributes = self.parse_power_demand(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_PWR_CONSUMPTION:
self._logger.debug('Received Power Consumption & Uptime Update')
attributes = self.parse_power_consumption(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_PWR_UNKNOWN:
self._logger.debug('Unknown Power Update')
attributes = self.parse_power_unknown(message['rf_data'])
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif cluster_id == CLUSTER_ID_AM_TAMPER:
self._logger.debug('Received Tamper Switch Triggered')
attributes = self.parse_tamper_state(message['rf_data'])
elif cluster_id == CLUSTER_ID_AM_BUTTON:
self._logger.debug('Received Button Pressed')
attributes = self.parse_button_press(message['rf_data'])
elif cluster_id == CLUSTER_ID_AM_SECURITY:
self._logger.debug('Received Security Event')
# Security Cluster
# When the device first connects, it comes up in a state that
# needs initialization, this command seems to take care of that.
# So, look at the value of the data and send the command.
if message['rf_data'][3:7] == b'\x15\x00\x39\x10':
replies.append({'message_id': 'security_init'})
attributes = self.parse_security_state(message['rf_data'])
elif cluster_id == CLUSTER_ID_AM_DISCOVERY:
if cluster_cmd == CLUSTER_CMD_AM_RSSI:
self._logger.debug('Received RSSI Range Update')
attributes = self.parse_range_info_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_VERSION_RESP:
self._logger.debug('Received Version Information')
attributes = self.parse_version_info_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_VERSION_REQ:
# b'\x11\x00\xfc\x00\x01'
self._logger.debug('Received Version Request')
replies.append({'message_id': 'version_info_update'})
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif cluster_id == CLUSTER_ID_AM_STATUS:
if cluster_cmd == CLUSTER_CMD_AM_STATUS:
self._logger.debug('Received Status Update')
attributes = self.parse_status_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_MODE_REQ:
self._logger.debug('Received Mode Change Request')
mode_cmd = message['rf_data'][3] + message['rf_data'][4]
mode = 'normal'
if mode_cmd == b'\x00\x01':
# Normal
# b'\x11\x00\xfa\x00\x01'
self._logger.debug('Normal Mode')
mode = 'normal'
elif mode_cmd == b'\x01\x01':
# Range Test
# b'\x11\x00\xfa\x01\x01'
self._logger.debug('Range Test Mode')
mode = 'range'
elif mode_cmd == b'\x02\x01':
# Locked
# b'\x11\x00\xfa\x02\x01'
self._logger.debug('Locked Mode')
mode = 'locked'
elif mode_cmd == b'\x03\x01':
# Silent
# b'\x11\x00\xfa\x03\x01'
self._logger.debug('Silent Mode')
mode = 'silent'
attributes = {'mode': mode}
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif profile_id == PROFILE_ID_HA:
# HA Profile ID
self._logger.debug('Received HA Profile Packet')
else:
self._logger.error('Unrecognised Profile ID: %r', profile_id)
return {'attributes': attributes, 'replies': replies}
def process_message(self, addr_long, addr_short, attributes):
"""
Process after message received. Stub, to be overwritten by ZBHub or ZBDevice.
:param addr_long: Short Address
:param addr_short: Long Address
:param attributes: Dict of message
:return:
"""
self._logger.debug('[STUB] process_message: %s', attributes)
def generate_version_info_request(self, params=None):
"""
Generate Version Info Request.
This message is sent FROM the Hub TO the SmartPlug requesting version information.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Version Information Request (b'\xfc')
:param params: Parameter dictionary (none required)
:return: Message data
"""
preamble = b'\x11\x00'
cluster_cmd = CLUSTER_CMD_AM_VERSION_REQ
payload = b'' # No data required in request
data = preamble + cluster_cmd + payload
return data
def generate_version_info_update(self, params):
"""
Generate Version Info Update.
This message is sent TO the Hub FROM the SmartPlug advertising version information.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Version Information Response (b'\xfe')
Unknown 17 Unknown Values TBC. There may be more interesting stuff in here?
HW Version 2 Hardware Version
Type Info Variable Type Information (b'AlertMe.com\nSmartPlug\n2013-09-26')
:param params: Parameter dictionary of version info
:return: Message data
"""
preamble = b'\x09\x71' # b'\tq'
cluster_cmd = CLUSTER_CMD_AM_VERSION_RESP
payload = b'\x48\x41' + b'\xd2\x1b\x19\x00\x00\x6f\x0d\x00' + b'\x39\x10' \
+ struct.pack('<HBBBB', 7, 1, 28, params['hwMinorVersion'], params['hwMajorVersion']) \
+ struct.pack('B', len(params['manu_string'])) \
+ params['manu_string'] \
+ struct.pack('B', len(params['type'])) \
+ params['type'] \
+ struct.pack('B', len(params['manu_date'])) \
+ params['manu_date']
data = preamble + cluster_cmd + payload
return data
def parse_version_info_update(self, data):
"""
Process message, parse for version information:
Version, Type, Manufacturer, Date
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Version Information Response (b'\xfe')
NodeID 2 unsigned short (H)
EUI64Str 8 8x Char (8s)
mfgID 2 unsigned short (H)
DeviceType 2 unsigned short (H)
AppRelease 1 unsigned inter (B)
AppVersion 1 unsigned inter (B)
HWMinor 1 unsigned inter (B)
HWMajor 1 unsigned inter (B
Type Info Variable Type Information (b'AlertMe.com\nSmartPlug\n2013-09-26')
:param data: Message data
:return: Parameter dictionary of version info
"""
ret = dict()
ret['nodeId'], Eui64str, ret['mfgId'], ret['deviceType'], ret['appRelease'], ret['appVersion'], ret['hwMinorVersion'], ret['hwMajorVersion'] = struct.unpack('<H8sHHBBBB', data[3:21])
# In ZclStrings the first byte is the lenght of that string feild, followed by more string feilds
ret['manu_string'], message = self.getZclString(data[21:])
ret['type'], message = self.getZclString(message)
ret['manu_date'], message = self.getZclString(message)
return ret
def generate_range_update(self, params):
"""
Generate range message.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - RSSI Range Test Update (b'\xfd')
RSSI Value 1 RSSI Range Test Value
Unknown 1 ???
:param params: Parameter dictionary of RSSI value
:return: Message data
"""
preamble = b'\x09\x2b' # b'\t+'
cluster_cmd = CLUSTER_CMD_AM_RSSI
payload = struct.pack('B 1x', params['rssi'])
data = preamble + cluster_cmd + payload
return data
def parse_range_info_update(self, data):
"""
Process message, parse for RSSI range test value.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - RSSI Range Test Update (b'\xfd')
RSSI Value 1 RSSI Range Test Value
Unknown 1 ???
:param data: Message data
:return: Parameter dictionary of RSSI value
"""
values = dict(zip(
('cluster_cmd', 'rssi'),
struct.unpack('< 2x s B 1x', data)
))
rssi = values['rssi']
return {'rssi': rssi}
def generate_power_demand_update(self, params):
"""
Generate Power Demand Update message data.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Demand Update (b'\x81')
Power Value 2 Power Demand Value (kW)
:param params: Parameter dictionary of power demand value
:return: Message data
"""
preamble = b'\x09\x6a' # b'\tj'
cluster_cmd = CLUSTER_CMD_AM_PWR_DEMAND
payload = struct.pack('H', params['power_demand'])
data = preamble + cluster_cmd + payload
return data
def generate_power_consumption_update(self, params):
"""
Power Consumption & Uptime Update.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Consumption & Uptime Update (b'\x82')
Power Value 4 Power Consumption Value (kWh)
Up Time 4 Up Time Value (seconds)
Unknown 1 ???
:return: Message
"""
params = {
'power_consumption': 19973,
'up_time': 33207
}
# At the moment this just generates a hard coded message.
# Also see parse_power_consumption().
data = b'\tn\x82\x05N\x00\x00\xb7\x81\x00\x00\x01'
return data
def parse_power_demand(self, data):
"""
Process message, parse for power demand value.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Demand Update (b'\x81')
Power Value 2 Power Demand Value (kW)
Examples:
b'\tj\x81\x00\x00' {'PowerDemand': 0}
b'\tj\x81%\x00' {'PowerDemand': 37}
b'\tj\x81\x16\x00' {'PowerDemand': 22}
:param data: Message data
:return: Parameter dictionary of power demand value
"""
ret = dict(zip(
('cluster_cmd', 'power_demand'),
struct.unpack('< 2x s H', data)
))
del ret['cluster_cmd']
return ret
def parse_power_unknown(self, data):
"""
Parse unknown power message seen from British Gas (AlertMe) power monitor.
Could this be the same or merged with parse_power_demand() or parse_power_consumption()?
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC (b'\t\x00')
Cluster Command 1 Cluster Command - Unknown Power (b'\x86')
Unknown 11 ?? TODO Work out what power values this message contains!
Examples:
b'\t\x00\x86\x00\x00\x00\x00\x00\x00/\x00\x00\x00\x00' = 0
b'\t\x00\x86\x91\x012"\x00\x00M\x00\x00\x00\x00' = ?
b'\t\x00\x86F\x01{\xc9\x02\x007\x02\x00\x00\x00' = ?
:param data: Message data
:return: Parameter dictionary of power demand value
"""
value = struct.unpack('<H', data[3:5])[0] # TBC
return {'power_demand': value}
def parse_power_consumption(self, data):
"""
Process message, parse for power consumption value.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Consumption & Uptime Update (b'\x82')
Power Value 4 Power Consumption Value (kWh)
Up Time 4 Up Time Value (seconds)
Unknown 1 ???
:param data: Message data
:return: Parameter dictionary of usage stats
"""
ret = dict(zip(
('cluster_cmd', 'power_consumption', 'up_time'),
struct.unpack('< 2x s I I 1x', data)
))
del ret['cluster_cmd']
return ret
def generate_mode_change_request(self, params=None):
"""
Generate Mode Change Request.
Available Modes: 'Normal', 'RangeTest', 'Locked', 'Silent'
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Mode Change Request (b'\xfa')
Mode 2 Requested Mode (1: Normal, 257: Range Test, 513: Locked, 769: Silent)
:param params: Parameter dictionary of requested mode
:return: Message data
"""
preamble = b'\x11\x00'
cluster_cmd = CLUSTER_CMD_AM_MODE_REQ
payload = b'\x00\x01' # Default normal if no mode
if not params:
mode = 'normal'
else:
mode = params['mode']
if mode == 'normal':
payload = b'\x00\x01'
elif mode == 'range':
payload = b'\x01\x01'
elif mode == 'locked':
payload = b'\x02\x01'
elif mode == 'silent':
payload = b'\x03\x01'
elif mode == 'idle':
payload = b'\x04\x01'
else:
self._logger.error('Invalid mode request %s', mode)
data = preamble + cluster_cmd + payload
return data
def generate_switch_state_request(self, params):
"""
Generate Switch State Change request data.
This message is sent FROM the Hub TO the SmartPlug requesting state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Change State (SmartPlug) (b'\x01' / b'\x02')
Requested Relay State 2* b'\x01' = Check Only, b'\x01\x01' = On, b'\x00\x01' = Off
* Size = 1 if check only
:param params: Parameter dictionary of switch relay state
:return: Message data
"""
preamble = b'\x11\x00'
if params['switch_state'] != '':
cluster_cmd = CLUSTER_CMD_AM_STATE_CHANGE
if int(params['switch_state']) == 1:
payload = b'\x01\x01' # On
else:
payload = b'\x00\x01' # Off
else:
# Check Only
cluster_cmd = CLUSTER_CMD_AM_STATE_REQ
payload = b'\x01'
data = preamble + cluster_cmd + payload
return data
def parse_switch_state_request(self, data):
"""
Process message, parse for switch relay state change request.
This message is sent FROM the Hub TO the SmartPlug requesting state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Change State (SmartPlug) (b'\x02')
Requested Relay State 2 b'\x01\x01' = On, b'\x00\x01' = Off
:param data: Message data
:return: Parameter dictionary of switch relay state
"""
# Parse Switch State Request
if data == b'\x11\x00\x02\x01\x01':
return {'switch_state': 1}
elif data == b'\x11\x00\x02\x00\x01':
return {'switch_state': 0}
else:
self._logger.error('Unknown State Request')
def generate_switch_state_update(self, params):
"""
Generate Switch State update message data.
This message is sent TO the Hub FROM the SmartPlug advertising state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Switch Status Update (b'\x80')
Relay State 2 b'\x07\x01' = On, b'\x06\x00' = Off
:param params: Parameter dictionary of switch relay state
:return: Message data
"""
preamble = b'\x09\x68' # b'\th'
cluster_cmd = CLUSTER_CMD_AM_STATE_RESP
payload = b'\x07\x01' if params['switch_state'] else b'\x06\x00'
data = preamble + cluster_cmd + payload
return data
def parse_switch_state_update(self, data):
"""
Process message, parse for switch status.
This message is sent TO the Hub FROM the SmartPlug advertising state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Switch Status Update (b'\x80')
Relay State 2 b'\x07\x01' = On, b'\x06\x00' = Off
Examples:
b'\th\x80\x07\x01'
b'\th\x80\x06\x00'
:param data: Message data
:return: Parameter dictionary of switch status
"""
values = struct.unpack('< 2x b b b', data)
if values[2] & 0x01:
return {'switch_state': 1}
else:
return {'switch_state': 0}
def generate_button_press(self, params=None):
"""
Button Press Update.
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Button State 1 Button State (b'\x01' = On, b'\x00' = Off)
Unknown 1 ??? (b'\x00')
Unknown 1 ??? (b'\x01')
Counter 2 Counter (milliseconds) (b'X\xf4')
Unknown 2 ??? (b'\x00\x00')
:return: Message
"""
params = {
'button_state': 1,
'counter': 62552
}
# At the moment this just generates a hard coded message.
# Also see parse_button_press().
data = b'\t\x00\x01\x00\x01X\xf4\x00\x00'
return data
def parse_button_press(self, data):
"""
Process message, parse for button press status.
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Button State 1 Button State (b'\x01' = On, b'\x00' = Off)
Unknown 1 ??? (b'\x00')
Unknown 1 ??? (b'\x01', b'\x02')
Counter 2 Counter (milliseconds) (b'\xbf\xc3', b\x12\xca)
Unknown 2 ??? (b'\x00\x00')
Examples:
b'\t\x00\x00\x00\x02\xbf\xc3\x00\x00' {'State': 0, 'Counter': 50111}
b'\t\x00\x01\x00\x01\x12\xca\x00\x00' {'State': 1, 'Counter': 51730}
:param data: Message data
:return: Parameter dictionary of button status
"""
ret = {}
if ord(data[2]) == 0x00:
ret['button_state'] = 0
elif ord(data[2]) == 0x01:
ret['button_state'] = 1
ret['counter'] = struct.unpack('<H', data[5:7])[0]
return ret
def parse_tamper_state(self, data):
"""
Process message, parse for Tamper Switch State Change.
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Unknown 1 ??? (b'\x00', b'\x01')
Tamper State 1 Tamper State (b'\x01' = Closed, b'\x02' = Open)
Counter 2 Counter (milliseconds) (b'\xe8\xa6')
Unknown 2 ??? (b'\x00\x00')
Examples:
b'\t\x00\x00\x02\xe8\xa6\x00\x00' {'Counter': 42728, 'TamperState': 1}
b'\t\x00\x01\x01+\xab\x00\x00' {'Counter': 43819, 'TamperState': 0}
:param data: Message data
:return: Parameter dictionary of tamper status
"""
ret = {}
if ord(data[3]) == 0x02:
ret['tamper_state'] = 1 # Open
else:
ret['tamper_state'] = 0 # Closed
ret['counter'] = struct.unpack('<H', data[4:6])[0]
return ret
def parse_security_state(self, data):
"""
Process message, parse for security state.
TODO: Is this the SAME AS parse_tamper_state!?!
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Unknown 1 ??? (b'\x00')
Button State 1 Security States Bitfield (b'\00', b'\01', b'\04', b'\05')
Unknown 2 ??? (b'\x00\x00')
Examples:
b'\t\x00\x00\x00\x00\x00' {'TriggerState': 0, 'TamperState': 0}
b'\t\x00\x00\x01\x00\x00' {'TriggerState': 1, 'TamperState': 0}
b'\t\x00\x00\x04\x00\x00' {'TriggerState': 0, 'TamperState': 1}
b'\t\x00\x00\x05\x00\x00' {'TriggerState': 1, 'TamperState': 1}
:param data: Message data
:return: Parameter dictionary of security state
"""
ret = {}
# The security states are in byte [3] and is a bitfield:
# bit 0 is the magnetic reed switch state
# bit 3 is the tamper switch state
state = ord(data[3])
if state & 0x01:
ret['trigger_state'] = 1 # Open
else:
ret['trigger_state'] = 0 # Closed
if state & 0x04:
ret['tamper_state'] = 1 # Open
else:
ret['tamper_state'] = 0 # Closed
return ret
def generate_security_init(self, params=None):
"""
Generate Security Initialisation.
Keeps security devices joined?
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC (b'\x11\x80')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Unknown 2 ??? (b'\x00\x05')
:param params: Parameter dictionary (none required)
:return: Message data
"""
preamble = b'\x11\x80'
cluster_cmd = CLUSTER_CMD_AM_SEC_STATUS_CHANGE
payload = b'\x00\x05'
data = preamble + cluster_cmd + payload
return data
def parse_status_update(self, data):
"""
Process message, parse for status update.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC (b'\t\x89')
Cluster Command 1 Cluster Command - Status Update (b'\xfb')
Type 1 b'\x1b' Clamp, b'\x1c' Switch, b'\x1d' Key Fob, b'\x1e', b'\x1f' Door
Counter 4 Counter (b'\xdb2\x00\x00')
TempFahrenheit 2 Temperature (Fahrenheit) (b'\xf0\x0b')
Unknown 6 ??? (b'na\xd3\xff\x03\x00')
Examples:
b'\t\x89\xfb\x1d\xdb2\x00\x00\xf0\x0bna\xd3\xff\x03\x00' {'Temperature': 87.008, 'Counter': 13019}
b'\t\r\xfb\x1f<\xf1\x08\x02/\x10D\x02\xcf\xff\x01\x00' {'Temperature': 106.574, 'TriggerState': 0, 'TamperState': 1}
:param data: Message data
:return: Parameter dictionary of state
"""
ret = {}
_type = data[3]
if _type == b'\x1b':
# Power Clamp
# Unknown
pass
elif _type == b'\x1c':
# Power Switch
# Unknown
pass
elif _type == b'\x1d':
# Key Fob
ret['temperature'] = float(struct.unpack("<h", data[8:10])[0]) / 100.0 * 1.8 + 32
ret['counter'] = struct.unpack('<I', data[4:8])[0]
elif _type == b'\x1e' or _type == b'\x1f':
# Door Sensor
ret['temperature'] = float(struct.unpack("<h", data[8:10])[0]) / 100.0 * 1.8 + 32
if ord(data[-1]) & 0x01 == 1:
ret['trigger_state'] = 1 # Open
else:
ret['trigger_state'] = 0 # Closed
if ord(data[-1]) & 0x02 == 0:
ret['tamper_state'] = 1 # Open
else:
ret['tamper_state'] = 0 # Closed
else:
self._logger.error('Unrecognised Device Status %r %r', _type, data)
return ret
def generate_status_update(self, params):
"""
Generate Status Update
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC b'\t\r'
Cluster Command 1 Cluster Command - Status Update (b'\xfb')
Type 1 b'\x1b' Clamp, b'\x1c' Switch, b'\x1d' Key Fob, b'\x1e', b'\x1f' Door
Counter 4 Counter
TempFahrenheit 2 Temperature (Fahrenheit)
Unknown 6 ???
:return: Message
"""
params = {
'trigger_state': 0,
'temperature': 106.574,
'tamper_state': 1
}
# At the moment this just generates a hard coded message.
# The below is just one type of status update, see parse_status_update() for more.
data = b'\t\r\xfb\x1f<\xf1\x08\x02/\x10D\x02\xcf\xff\x01\x00'
return data
def generate_active_endpoints_request(self, params):
"""
Generate Active Endpoints Request.
The active endpoint request needs the short address of the device
in the payload. Remember, it needs to be little endian (backwards)
The first byte in the payload is simply a number to identify the message
the response will have the same number in it.
Field Name Size Description
---------- ---- -----------
Sequence 1 Frame Sequence
Network Address 2 16-bit address of a device in the network whose active endpoint list being requested.
:param params:
:return: Message data
Example:
b'\xaa\x9f\x88'
"""
zdo_sequence = params['zdo_sequence'] # b'\xaa'
net_addr = params['addr_short'][1] + params['addr_short'][0] # b'\x9f\x88'
data = zdo_sequence + net_addr
return data
def generate_match_descriptor_request(self, params):
"""
Generate Match Descriptor Request.
Broadcast or unicast transmission used to discover the device(s) that supports
a specified profile ID and/or clusters.
Field Name Size Description
---------- ---- -----------
Sequence 1 Frame Sequence
Network Address 2 16-bit address of a device in the network whose power descriptor is being requested.
Profile ID 2 Profile ID to be matched at the destination.
Number of Input Clusters 1 The number of input clusters in the In Cluster List for matching. Set to 0 if no clusters supplied.
Input Cluster List 2* List of input cluster IDs to be used for matching.
Number of Output Clusters 1 The number of output clusters in the Output Cluster List for matching. Set to 0 if no clusters supplied.
Output Cluster List 2* List of output cluster IDs to be used for matching.
* Number of Input Clusters
Example:
b'\x01\xfd\xff\x16\xc2\x00\x01\xf0\x00'
:param params:
:return: Message data
"""
zdo_sequence = params['zdo_sequence'] # b'\x01'
net_addr = params['addr_short'][1] + params['addr_short'][0] # b'\xfd\xff'
profile_id = params['profile_id'][1] + params['profile_id'][0] # b'\x16\xc2' PROFILE_ID_ALERTME (reversed)
num_input_clusters = struct.pack('B', len(params['in_cluster_list']) / 2) # b'\x00'
input_cluster_list = params['in_cluster_list'] # b''
num_output_clusters = struct.pack('B', len(params['out_cluster_list']) / 2) # b'\x01'
output_cluster_list = params['out_cluster_list'][1] + params['out_cluster_list'][0] # b'\xf0\x00' CLUSTER_ID_AM_STATUS (reversed)
# TODO Finish this off! At the moment this does not support multiple clusters, it just supports one!
data = zdo_sequence + net_addr + profile_id + num_input_clusters + input_cluster_list + num_output_clusters + output_cluster_list
return data
def generate_match_descriptor_response(self, params):
"""
Generate Match Descriptor Response.
If a descriptor match is found on the device, this response contains a list of endpoints that
support the request criteria.
Field Name Size Description
---------- ---- -----------
Sequence 1 Frame Sequence
Status 1 Response Status
Network Address 2 Indicates the 16-bit address of the responding device.
Length 1 The number of endpoints on the remote device that match the request criteria.
Match List Variable List of endpoints on the remote that match the request criteria.
Example:
b'\x01\x00\x00\xe1\x02\x00\x02'
:param params:
:return: Message data
"""
zdo_sequence = params['zdo_sequence'] # b'\x04'
status = ZDP_STATUS_OK # b'\x00'
status = ZDP_STATUS_OK # b'\x00'
net_addr = params['addr_short'][1] + params['addr_short'][0] # b'\x00\x00'
length = struct.pack('B', len(params['endpoint_list'])) # b'\x02'
match_list = b''.join(params['endpoint_list']) # b'\x00\x02'
data = zdo_sequence + status + net_addr + length + match_list
return data
def getZclString(self, message):
zclStringLength = ord(message[0])
zclString = message[1:1 + zclStringLength]
remainder = message[1 + zclStringLength:]
return (
zclString, remainder)
|
py
|
1a5d7ddee7e0d00f19d71a4db2926724da25e762
|
from .entity_interface import EntityInterface
class ItemRank(EntityInterface):
def __init__(self, id: int, name: str):
super().__init__(id)
self.name = name
self.is_rare = False
|
py
|
1a5d7ec8db74f11d2bf3cdb8916ddc86a3b8a263
|
# Generated by Django 2.2.2 on 2020-02-04 10:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py
|
1a5d7edf63f89bd24357deac6f2240e214619618
|
# -*- coding:utf-8 -*-
#!/bin/bash
import string
'''
1.1 每次处理一个字符
'''
thestring = "wertyuiopasdfghjklzxcvbnm,1234567890-"
thelist = list(thestring)
print thelist
for c in thestring:
print c,
print "\n"
results_toUpper = [ c.upper() for c in thestring ]
print results_toUpper
results_map = map(string.upper, thestring)
print results_map
|
py
|
1a5d7f6b43b338e615e03f5dde706660954c9a41
|
#!/usr/bin/env python
try:
import ipdb
ist = ipdb.set_trace
except ImportError:
ist = None
import argparse
import cPickle
parser = argparse.ArgumentParser()
parser.add_argument('pkl', type=str)
parser.add_argument('-s','--set',nargs=3, type=str)
if ist is not None:
parser.add_argument('-i','--iset',action="store_true")
args = parser.parse_args()
def printJob():
for (key,val) in vars(job).items():
print "{:20s}: {:10s}".format(key,str(val))
print '>>> Reading job file: {}'.format(args.pkl)
with open(args.pkl,'rb') as f:
job = cPickle.load(f)
if args.iset:
if ist is None:
print ">>> typyQ wasn't able to import ipdb."
print ">>> Interactive job modification not supported without ipdb."
exit(1)
else:
print "-----------------------------------------------------------------"
printJob()
print "-----------------------------------------------------------------"
print ">>> Run \"printJob()\" to see current contents of job"
print ">>> Job attributes can be set via \"job.<attribute>=value\""
print ">>> Run \"q\" to quit modification without saving"
print ">>> Run \"c\" to write modifications to the pkl."
print "-----------------------------------------------------------------"
ist()
print ">>> Writing job modifications to disk!"
elif args.set:
attr = args.set[0]
val = args.set[1]
valType = args.set[2]
if valType=="int":
val = int(val)
elif valType=="float":
val = float(val)
elif valType=="bool":
val = val=="True"
elif valType=="str":
pass
elif valType=="None":
val = None
else:
print ">>> `Variable type must be int, float, bool, none, or str, but not",valType
exit(1)
print ">>> Setting {} to {}".format(attr,val)
setattr(job,attr,val)
else:
print ">>> JobClass:",job.__class__.__name__
print "-----------------------------------------------------------------"
printJob()
print "-----------------------------------------------------------------"
if args.iset or args.set:
with open(args.pkl,'wb') as f:
cPickle.dump(job,f,-1)
|
py
|
1a5d7f8383de8cb55e311ad857d7e5921e8a0a0a
|
import math
import numpy as np
import matplotlib.pyplot as plt
def angular_momentum(posArray, velArray, massArray):
'''calculates the total angular momentum for a system of objects
'''
numObjects=len(massArray)
numDimensions=len(posArray[0])
momentum=[[0.0]*numDimensions for j in range(numObjects)]
angMomen=[[0.0]*numDimensions for j in range(numObjects)]
angMomenTot=[0.0]*numDimensions
for j in range(numObjects):
#take cross product of postion and velocity
angMomen[j]=np.cross(posArray[j],velArray[j])
#multiply each component by mass
for i in range(numDimensions):
angMomen[j][i]=massArray[j]*angMomen[j][i]
#print angMomen[j]
#sum up all angular momenta
for j in range(numObjects):
for i in range(numDimensions):
angMomenTot[i]=angMomen[j][i]+angMomenTot[i]
return angMomenTot
def total_energy(posArray, velArray, massArray):
'''calculates the total energy (gravitational and kinetic) for a system of objects
'''
numObjects=len(massArray)
numDimensions=len(posArray[0])
G=math.pow(2.0*math.pi,2.0)
gravPotential=0.0
kineticEnergy=0.0
speedArray=np.linalg.norm(velArray,axis=1)
for j in range(numObjects):
for k in range(j+1,numObjects):
r = pythag_pos_diff(posArray[j],posArray[k])
gravPotential+=(-G*massArray[j]*massArray[k])/r
for j in range(numObjects):
kineticEnergy+=(1.0/2.0)*massArray[j]*(speedArray[j]**2.0)
energyTot=gravPotential+kineticEnergy
return energyTot
def pythag_pos_diff(objPosArray1,objPosArray2):
#calculates the pythagorian position difference between two n-dimensional position arrays
pythagSum=0
for l in range(len(objPosArray1)):
pythagSum+=math.pow(objPosArray1[l]-objPosArray2[l],2)
return math.pow(pythagSum,1.0/2.0)
def nbody_sim_euler(posArray,velArray, massArray,stepSize,stepCount):
'''Returns an array of computed position steps for the inputed objects
'''
numObjects=len(massArray)
numDimensions=len(posArray[0])
angStart=angular_momentum(posArray, velArray, massArray)
energyStart=total_energy(posArray, velArray, massArray)
#simulation loop using forward Euler
#1. update position
#2. compute acceleration
#3. update velocity
G=math.pow(2.0*math.pi,2.0)
historicPosArray=[[[]for j in range(numDimensions)] for j in range(numObjects)]
historicVelArray=[[[]for j in range(numDimensions)] for j in range(numObjects)]
#start with forward Euler
for i in range(stepCount):
#update positon
for j in range(numObjects):
for l in range(numDimensions):
historicPosArray[j][l].append(posArray[j][l])
posArray[j][l]+=stepSize*velArray[j][l]
#compute acceleration
accelArray=[[0]*numDimensions for j in range(numObjects)]
for j in range(numObjects):
for l in range(numDimensions):
for k in range(j+1,numObjects):
#print posArray[j],posArray[k]
newAccel=G*massArray[k]*(posArray[k][l]-posArray[j][l]) / math.pow(pythag_pos_diff(posArray[j],posArray[k]),3.0)
oppositeNewAccel=-newAccel*massArray[j]/massArray[k]
accelArray[j][l]+=newAccel
accelArray[k][l]+=oppositeNewAccel
#print accelArray[1]
#update velocity
for j in range(numObjects):
for l in range(numDimensions):
velArray[j][l]+=stepSize*accelArray[j][l]
ang=angular_momentum(posArray, velArray, massArray)
energy=total_energy(posArray, velArray, massArray)
#print ang, energy
#print velArray[1]
print 'total drift'
print np.subtract(ang,angStart), (energy-energyStart)
return historicPosArray
numObjects=2
numDimensions=3
#Quantiy Units
#time | Year
#distance | AU
#velocity | AU/Year
#mass | Solar Mass
#Number Object
#0 | Sun
#1 | Jupiter
#2 | Saturn
#generate velocity and position vectors
#the arrays are defined as ____Array[jth object][dimensions x,y,z: 0,1,2]
#velocity array is half a timestep ahead of the position and acceleration arrays
posArray=[[0.0]*numDimensions for j in range(numObjects)]
velArray=[[0.0]*numDimensions for j in range(numObjects)]
#generate massArray[jth object]
massArray=[1.0, 0.0009543]#,0.0002857
#test center of mass coords
# x_sun = -M_jup*x_jup
# T = ((x_jup - x_sun) / (M_jup+M_sun))^(1/2)
# v_jup=2*pi*x_jup / T
# v_sun = -M_jup*v_jup
jupOrbit=5.0
sunOrbit=-jupOrbit*massArray[1]
period= (((jupOrbit-sunOrbit)**3)/(1.0+massArray[1]))**(1.0/2.0)
jupSpeed=2.0*math.pi*jupOrbit / period
sunSpeed=-massArray[1]*jupSpeed
posArray[1]=[jupOrbit,0.0,0.0]
velArray[1]=[0.0,jupSpeed,0.0]
posArray[0]=[sunOrbit,0.0,0.0]
velArray[0]=[0.0,sunSpeed,0.0]
#positions and velocitys of solar system bodies in baryocentric coords
#all starting values at A.D. 2016-Feb-29 00:00:00.0000
#posArray[0]=[3.769360061446780E-03, 1.812773374712511E-03, -1.624014729877611E-04]
#velArray[0]=[2.116907618849916E-07*365, 6.980633760687217E-06*365, -1.203681274902981E-08*365]
#posArray[1]=[-5.291527066741596E+00, 1.182309344528905E+00, 1.134242057706169E-01]
#velArray[1]=[-1.733768918092524E-03*365, -7.008096404329502E-03*365, 6.792695786000429E-05*365]
#posArray[2]=[-3.421132666518896E+00, -9.407865807544542E+00, 2.997343479755205E-01]
#velArray[2]=[4.937187945581996E-03*365, -1.922883900630874E-03*365, -1.633640215930448E-04*365]
print 'Start Postions',posArray
print 'Start Velocities',velArray
print 'Masses',massArray
stepSize=10/365.25 #1 day
stepCount=2000
print 'Duration: '+str(stepSize*stepCount) +' years'
historicPosArray = nbody_sim_euler(posArray,velArray,massArray,stepSize,stepCount)
plt.plot(historicPosArray[0][0],historicPosArray[0][1],'.')
plt.plot(historicPosArray[1][0],historicPosArray[1][1],'.--')
#plt.plot(historicPosArray[2][0],historicPosArray[1][1],'.--')
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
#print historicPosArray[1][0]
|
py
|
1a5d8005261d2357ca2dabea6694b60e12b98dcd
|
import re, select, socket, traceback
class IRCBot:
def __init__(self, ip, port, nick, **kwargs):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((ip, port))
self._recv()
ready = select.select([self._socket], [], [], 1)
if ready[0]:
self._recv()
self.nick = nick
if 'realname' in kwargs:
self.realname = kwargs['realname']
else:
self.realname = 'Python IRCBot'
if 'username' in kwargs:
self.username = kwargs['username']
else:
self.username = self.nick
if 'password' in kwargs:
self._password = kwargs['password']
self._send('PASS %s' % self._password)
self._send('NICK %s' % self.nick)
self._send('USER %s 8 * :%s' % (self.username, self.realname))
ping = self._recv()
self._send('PONG %s' % ping.split(' ')[1])
self._eventlist = {}
def __setattr__(self, name, value):
if name == 'nick':
pass
elif name == 'realname':
pass
self.__dict__[name] = value
def _send(self, raw):
print('> %s' % raw.rstrip())
self._socket.send(bytes(''.join([raw, '\n']), 'UTF-8'))
def _recv(self):
data = self._socket.recv(4096).decode('UTF-8')
for line in data.split('\n'):
if len(line) > 0:
print('< %s' % line.rstrip())
return data
# def _convert_wildcard(self, search):
# search = search.replace('\\', '\\\\')
# search = re.escape(search)
# return search.replace('\\*', '.+').replace('\\?', '.')
def user_array(self, user):
nick = user.split('!')[0]
username = user.split('!')[1].split('@')[0]
address = user.split('@')[1]
return (nick, username, address)
def _hook_generic(self, command, channel, match, func):
try:
self._eventlist[''.join([command, '.', channel])].append((re.compile(match), func))
except:
self._eventlist[''.join([command, '.', channel])] = []
self._eventlist[''.join([command, '.', channel])].append((re.compile(match), func))
def hook_join(self, channel, user, func): self._hook_generic('JOIN', channel, user, func)
def hook_msg(self, channel, message, func): self._hook_generic('PRIVMSG', channel, message, func)
def hook_part(self, channel, user, func): self._hook_generic('PART', channel, user, func)
def hook_quit(self, user, func): self._hook_generic('QUIT', '*', '.*', func)
def join(self, channel):
self._send('JOIN %s' % channel)
def msg(self, channel, message):
self._send('PRIVMSG %s :%s' % (channel, message))
def _process_event(self, line):
if len(line) > 0:
command = line.split(' ')[1]
if command == 'JOIN':
self._process_join(line)
elif command == 'PART':
self._process_part(line)
elif command == 'PRIVMSG':
self._process_privmsg(line)
elif command == 'QUIT':
self._process_quit(line)
else:
self._process_generic(line)
def _process_join(self, line):
source = line.split(' ')[0][1:]
channel = line.split(' ')[2][1:].rstrip()
args = {'line': line, 'source': source, 'channel': channel}
self._fire_event('JOIN', args, channel, channel=channel)
def _process_part(self, line):
source = line.split(' ')[0][1:]
channel = line.split(' ')[2].rstrip()
if len(line.split(' ')) > 2:
msg = ' '.join(line.split(' ')[3:])[1:].rstrip()
else:
msg = ''
args = {'line': line, 'source': source, 'channel': channel, 'message': msg}
self._fire_event('PART', args, channel, channel=channel)
def _process_privmsg(self, line):
source = line.split(' ')[0][1:]
channel = line.split(' ')[2]
msg = ' '.join(line.split(' ')[3:])[1:]
args = {'line': line, 'source': source, 'channel': channel, 'message': msg}
self._fire_event('PRIVMSG', args, msg, channel=channel)
def _process_generic(self, line):
command = line.split(' ')[1]
args = {'line': line}
self._fire_event(command, args, line)
def _fire_event(self, event, args, match, **kwargs):
try:
for event in self._eventlist[''.join([event, '.*'])]:
if re.match(event[0], match):
try:
event[1](self, args)
except Exception:
traceback.print_exc()
pass
except Exception as err:
pass
try:
for event in self._eventlist[''.join([event, '.', kwargs['channel']])]:
if re.match(event[0], match):
try:
event[1](self, args)
except Exception:
traceback.print_exc()
pass
except Exception as err:
pass
def loop(self):
while True:
data = self._recv()
dataArray = data.split('\n')
for line in dataArray:
self._process_event(line)
|
py
|
1a5d802f6f0a7d491d22668f6608a0a5335b9e8e
|
from datetime import datetime
from typing import NamedTuple, Dict
Item = str
Count = int
class Ride(NamedTuple):
start_time: datetime
end_time: datetime
items: Dict[Item, Count]
|
py
|
1a5d84b543222013dfb0b37d2e29a51bd035a0cd
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Email(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'anchor_allow_white_space_in_characters': 'str',
'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata',
'anchor_case_sensitive': 'str',
'anchor_case_sensitive_metadata': 'PropertyMetadata',
'anchor_horizontal_alignment': 'str',
'anchor_horizontal_alignment_metadata': 'PropertyMetadata',
'anchor_ignore_if_not_present': 'str',
'anchor_ignore_if_not_present_metadata': 'PropertyMetadata',
'anchor_match_whole_word': 'str',
'anchor_match_whole_word_metadata': 'PropertyMetadata',
'anchor_string': 'str',
'anchor_string_metadata': 'PropertyMetadata',
'anchor_tab_processor_version': 'str',
'anchor_tab_processor_version_metadata': 'PropertyMetadata',
'anchor_units': 'str',
'anchor_units_metadata': 'PropertyMetadata',
'anchor_x_offset': 'str',
'anchor_x_offset_metadata': 'PropertyMetadata',
'anchor_y_offset': 'str',
'anchor_y_offset_metadata': 'PropertyMetadata',
'bold': 'str',
'bold_metadata': 'PropertyMetadata',
'conceal_value_on_document': 'str',
'conceal_value_on_document_metadata': 'PropertyMetadata',
'conditional_parent_label': 'str',
'conditional_parent_label_metadata': 'PropertyMetadata',
'conditional_parent_value': 'str',
'conditional_parent_value_metadata': 'PropertyMetadata',
'custom_tab_id': 'str',
'custom_tab_id_metadata': 'PropertyMetadata',
'disable_auto_size': 'str',
'disable_auto_size_metadata': 'PropertyMetadata',
'document_id': 'str',
'document_id_metadata': 'PropertyMetadata',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_color_metadata': 'PropertyMetadata',
'font_metadata': 'PropertyMetadata',
'font_size': 'str',
'font_size_metadata': 'PropertyMetadata',
'form_order': 'str',
'form_order_metadata': 'PropertyMetadata',
'form_page_label': 'str',
'form_page_label_metadata': 'PropertyMetadata',
'form_page_number': 'str',
'form_page_number_metadata': 'PropertyMetadata',
'height': 'str',
'height_metadata': 'PropertyMetadata',
'italic': 'str',
'italic_metadata': 'PropertyMetadata',
'locale_policy': 'LocalePolicyTab',
'locked': 'str',
'locked_metadata': 'PropertyMetadata',
'max_length': 'str',
'max_length_metadata': 'PropertyMetadata',
'merge_field': 'MergeField',
'merge_field_xml': 'str',
'name': 'str',
'name_metadata': 'PropertyMetadata',
'original_value': 'str',
'original_value_metadata': 'PropertyMetadata',
'page_number': 'str',
'page_number_metadata': 'PropertyMetadata',
'recipient_id': 'str',
'recipient_id_guid': 'str',
'recipient_id_guid_metadata': 'PropertyMetadata',
'recipient_id_metadata': 'PropertyMetadata',
'require_all': 'str',
'require_all_metadata': 'PropertyMetadata',
'required': 'str',
'required_metadata': 'PropertyMetadata',
'require_initial_on_shared_change': 'str',
'require_initial_on_shared_change_metadata': 'PropertyMetadata',
'sender_required': 'str',
'sender_required_metadata': 'PropertyMetadata',
'shared': 'str',
'shared_metadata': 'PropertyMetadata',
'share_to_recipients': 'str',
'share_to_recipients_metadata': 'PropertyMetadata',
'smart_contract_information': 'SmartContractInformation',
'source': 'str',
'status': 'str',
'status_metadata': 'PropertyMetadata',
'tab_group_labels': 'list[str]',
'tab_group_labels_metadata': 'PropertyMetadata',
'tab_id': 'str',
'tab_id_metadata': 'PropertyMetadata',
'tab_label': 'str',
'tab_label_metadata': 'PropertyMetadata',
'tab_order': 'str',
'tab_order_metadata': 'PropertyMetadata',
'tab_type': 'str',
'tab_type_metadata': 'PropertyMetadata',
'template_locked': 'str',
'template_locked_metadata': 'PropertyMetadata',
'template_required': 'str',
'template_required_metadata': 'PropertyMetadata',
'tooltip': 'str',
'tool_tip_metadata': 'PropertyMetadata',
'underline': 'str',
'underline_metadata': 'PropertyMetadata',
'validation_message': 'str',
'validation_message_metadata': 'PropertyMetadata',
'validation_pattern': 'str',
'validation_pattern_metadata': 'PropertyMetadata',
'value': 'str',
'value_metadata': 'PropertyMetadata',
'width': 'str',
'width_metadata': 'PropertyMetadata',
'x_position': 'str',
'x_position_metadata': 'PropertyMetadata',
'y_position': 'str',
'y_position_metadata': 'PropertyMetadata'
}
attribute_map = {
'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters',
'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata',
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata',
'anchor_string': 'anchorString',
'anchor_string_metadata': 'anchorStringMetadata',
'anchor_tab_processor_version': 'anchorTabProcessorVersion',
'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata',
'anchor_units': 'anchorUnits',
'anchor_units_metadata': 'anchorUnitsMetadata',
'anchor_x_offset': 'anchorXOffset',
'anchor_x_offset_metadata': 'anchorXOffsetMetadata',
'anchor_y_offset': 'anchorYOffset',
'anchor_y_offset_metadata': 'anchorYOffsetMetadata',
'bold': 'bold',
'bold_metadata': 'boldMetadata',
'conceal_value_on_document': 'concealValueOnDocument',
'conceal_value_on_document_metadata': 'concealValueOnDocumentMetadata',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_label_metadata': 'conditionalParentLabelMetadata',
'conditional_parent_value': 'conditionalParentValue',
'conditional_parent_value_metadata': 'conditionalParentValueMetadata',
'custom_tab_id': 'customTabId',
'custom_tab_id_metadata': 'customTabIdMetadata',
'disable_auto_size': 'disableAutoSize',
'disable_auto_size_metadata': 'disableAutoSizeMetadata',
'document_id': 'documentId',
'document_id_metadata': 'documentIdMetadata',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_color_metadata': 'fontColorMetadata',
'font_metadata': 'fontMetadata',
'font_size': 'fontSize',
'font_size_metadata': 'fontSizeMetadata',
'form_order': 'formOrder',
'form_order_metadata': 'formOrderMetadata',
'form_page_label': 'formPageLabel',
'form_page_label_metadata': 'formPageLabelMetadata',
'form_page_number': 'formPageNumber',
'form_page_number_metadata': 'formPageNumberMetadata',
'height': 'height',
'height_metadata': 'heightMetadata',
'italic': 'italic',
'italic_metadata': 'italicMetadata',
'locale_policy': 'localePolicy',
'locked': 'locked',
'locked_metadata': 'lockedMetadata',
'max_length': 'maxLength',
'max_length_metadata': 'maxLengthMetadata',
'merge_field': 'mergeField',
'merge_field_xml': 'mergeFieldXml',
'name': 'name',
'name_metadata': 'nameMetadata',
'original_value': 'originalValue',
'original_value_metadata': 'originalValueMetadata',
'page_number': 'pageNumber',
'page_number_metadata': 'pageNumberMetadata',
'recipient_id': 'recipientId',
'recipient_id_guid': 'recipientIdGuid',
'recipient_id_guid_metadata': 'recipientIdGuidMetadata',
'recipient_id_metadata': 'recipientIdMetadata',
'require_all': 'requireAll',
'require_all_metadata': 'requireAllMetadata',
'required': 'required',
'required_metadata': 'requiredMetadata',
'require_initial_on_shared_change': 'requireInitialOnSharedChange',
'require_initial_on_shared_change_metadata': 'requireInitialOnSharedChangeMetadata',
'sender_required': 'senderRequired',
'sender_required_metadata': 'senderRequiredMetadata',
'shared': 'shared',
'shared_metadata': 'sharedMetadata',
'share_to_recipients': 'shareToRecipients',
'share_to_recipients_metadata': 'shareToRecipientsMetadata',
'smart_contract_information': 'smartContractInformation',
'source': 'source',
'status': 'status',
'status_metadata': 'statusMetadata',
'tab_group_labels': 'tabGroupLabels',
'tab_group_labels_metadata': 'tabGroupLabelsMetadata',
'tab_id': 'tabId',
'tab_id_metadata': 'tabIdMetadata',
'tab_label': 'tabLabel',
'tab_label_metadata': 'tabLabelMetadata',
'tab_order': 'tabOrder',
'tab_order_metadata': 'tabOrderMetadata',
'tab_type': 'tabType',
'tab_type_metadata': 'tabTypeMetadata',
'template_locked': 'templateLocked',
'template_locked_metadata': 'templateLockedMetadata',
'template_required': 'templateRequired',
'template_required_metadata': 'templateRequiredMetadata',
'tooltip': 'tooltip',
'tool_tip_metadata': 'toolTipMetadata',
'underline': 'underline',
'underline_metadata': 'underlineMetadata',
'validation_message': 'validationMessage',
'validation_message_metadata': 'validationMessageMetadata',
'validation_pattern': 'validationPattern',
'validation_pattern_metadata': 'validationPatternMetadata',
'value': 'value',
'value_metadata': 'valueMetadata',
'width': 'width',
'width_metadata': 'widthMetadata',
'x_position': 'xPosition',
'x_position_metadata': 'xPositionMetadata',
'y_position': 'yPosition',
'y_position_metadata': 'yPositionMetadata'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Email - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._anchor_allow_white_space_in_characters = None
self._anchor_allow_white_space_in_characters_metadata = None
self._anchor_case_sensitive = None
self._anchor_case_sensitive_metadata = None
self._anchor_horizontal_alignment = None
self._anchor_horizontal_alignment_metadata = None
self._anchor_ignore_if_not_present = None
self._anchor_ignore_if_not_present_metadata = None
self._anchor_match_whole_word = None
self._anchor_match_whole_word_metadata = None
self._anchor_string = None
self._anchor_string_metadata = None
self._anchor_tab_processor_version = None
self._anchor_tab_processor_version_metadata = None
self._anchor_units = None
self._anchor_units_metadata = None
self._anchor_x_offset = None
self._anchor_x_offset_metadata = None
self._anchor_y_offset = None
self._anchor_y_offset_metadata = None
self._bold = None
self._bold_metadata = None
self._conceal_value_on_document = None
self._conceal_value_on_document_metadata = None
self._conditional_parent_label = None
self._conditional_parent_label_metadata = None
self._conditional_parent_value = None
self._conditional_parent_value_metadata = None
self._custom_tab_id = None
self._custom_tab_id_metadata = None
self._disable_auto_size = None
self._disable_auto_size_metadata = None
self._document_id = None
self._document_id_metadata = None
self._error_details = None
self._font = None
self._font_color = None
self._font_color_metadata = None
self._font_metadata = None
self._font_size = None
self._font_size_metadata = None
self._form_order = None
self._form_order_metadata = None
self._form_page_label = None
self._form_page_label_metadata = None
self._form_page_number = None
self._form_page_number_metadata = None
self._height = None
self._height_metadata = None
self._italic = None
self._italic_metadata = None
self._locale_policy = None
self._locked = None
self._locked_metadata = None
self._max_length = None
self._max_length_metadata = None
self._merge_field = None
self._merge_field_xml = None
self._name = None
self._name_metadata = None
self._original_value = None
self._original_value_metadata = None
self._page_number = None
self._page_number_metadata = None
self._recipient_id = None
self._recipient_id_guid = None
self._recipient_id_guid_metadata = None
self._recipient_id_metadata = None
self._require_all = None
self._require_all_metadata = None
self._required = None
self._required_metadata = None
self._require_initial_on_shared_change = None
self._require_initial_on_shared_change_metadata = None
self._sender_required = None
self._sender_required_metadata = None
self._shared = None
self._shared_metadata = None
self._share_to_recipients = None
self._share_to_recipients_metadata = None
self._smart_contract_information = None
self._source = None
self._status = None
self._status_metadata = None
self._tab_group_labels = None
self._tab_group_labels_metadata = None
self._tab_id = None
self._tab_id_metadata = None
self._tab_label = None
self._tab_label_metadata = None
self._tab_order = None
self._tab_order_metadata = None
self._tab_type = None
self._tab_type_metadata = None
self._template_locked = None
self._template_locked_metadata = None
self._template_required = None
self._template_required_metadata = None
self._tooltip = None
self._tool_tip_metadata = None
self._underline = None
self._underline_metadata = None
self._validation_message = None
self._validation_message_metadata = None
self._validation_pattern = None
self._validation_pattern_metadata = None
self._value = None
self._value_metadata = None
self._width = None
self._width_metadata = None
self._x_position = None
self._x_position_metadata = None
self._y_position = None
self._y_position_metadata = None
self.discriminator = None
setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None))
setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None))
setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None))
setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None))
setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None))
setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None))
setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None))
setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None))
setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None))
setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None))
setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None))
setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None))
setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None))
setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None))
setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None))
setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None))
setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None))
setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None))
setattr(self, "_{}".format('bold'), kwargs.get('bold', None))
setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None))
setattr(self, "_{}".format('conceal_value_on_document'), kwargs.get('conceal_value_on_document', None))
setattr(self, "_{}".format('conceal_value_on_document_metadata'), kwargs.get('conceal_value_on_document_metadata', None))
setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None))
setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None))
setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None))
setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None))
setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None))
setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None))
setattr(self, "_{}".format('disable_auto_size'), kwargs.get('disable_auto_size', None))
setattr(self, "_{}".format('disable_auto_size_metadata'), kwargs.get('disable_auto_size_metadata', None))
setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None))
setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('font'), kwargs.get('font', None))
setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None))
setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None))
setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None))
setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None))
setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None))
setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None))
setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None))
setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None))
setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None))
setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None))
setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None))
setattr(self, "_{}".format('height'), kwargs.get('height', None))
setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None))
setattr(self, "_{}".format('italic'), kwargs.get('italic', None))
setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None))
setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None))
setattr(self, "_{}".format('locked'), kwargs.get('locked', None))
setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None))
setattr(self, "_{}".format('max_length'), kwargs.get('max_length', None))
setattr(self, "_{}".format('max_length_metadata'), kwargs.get('max_length_metadata', None))
setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None))
setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None))
setattr(self, "_{}".format('original_value'), kwargs.get('original_value', None))
setattr(self, "_{}".format('original_value_metadata'), kwargs.get('original_value_metadata', None))
setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None))
setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None))
setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None))
setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None))
setattr(self, "_{}".format('require_all'), kwargs.get('require_all', None))
setattr(self, "_{}".format('require_all_metadata'), kwargs.get('require_all_metadata', None))
setattr(self, "_{}".format('required'), kwargs.get('required', None))
setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None))
setattr(self, "_{}".format('require_initial_on_shared_change'), kwargs.get('require_initial_on_shared_change', None))
setattr(self, "_{}".format('require_initial_on_shared_change_metadata'), kwargs.get('require_initial_on_shared_change_metadata', None))
setattr(self, "_{}".format('sender_required'), kwargs.get('sender_required', None))
setattr(self, "_{}".format('sender_required_metadata'), kwargs.get('sender_required_metadata', None))
setattr(self, "_{}".format('shared'), kwargs.get('shared', None))
setattr(self, "_{}".format('shared_metadata'), kwargs.get('shared_metadata', None))
setattr(self, "_{}".format('share_to_recipients'), kwargs.get('share_to_recipients', None))
setattr(self, "_{}".format('share_to_recipients_metadata'), kwargs.get('share_to_recipients_metadata', None))
setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None))
setattr(self, "_{}".format('source'), kwargs.get('source', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None))
setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None))
setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None))
setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None))
setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None))
setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None))
setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None))
setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None))
setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None))
setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None))
setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None))
setattr(self, "_{}".format('underline'), kwargs.get('underline', None))
setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None))
setattr(self, "_{}".format('validation_message'), kwargs.get('validation_message', None))
setattr(self, "_{}".format('validation_message_metadata'), kwargs.get('validation_message_metadata', None))
setattr(self, "_{}".format('validation_pattern'), kwargs.get('validation_pattern', None))
setattr(self, "_{}".format('validation_pattern_metadata'), kwargs.get('validation_pattern_metadata', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None))
setattr(self, "_{}".format('width'), kwargs.get('width', None))
setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None))
setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None))
setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None))
setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None))
setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None))
@property
def anchor_allow_white_space_in_characters(self):
"""Gets the anchor_allow_white_space_in_characters of this Email. # noqa: E501
# noqa: E501
:return: The anchor_allow_white_space_in_characters of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_allow_white_space_in_characters
@anchor_allow_white_space_in_characters.setter
def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters):
"""Sets the anchor_allow_white_space_in_characters of this Email.
# noqa: E501
:param anchor_allow_white_space_in_characters: The anchor_allow_white_space_in_characters of this Email. # noqa: E501
:type: str
"""
self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters
@property
def anchor_allow_white_space_in_characters_metadata(self):
"""Gets the anchor_allow_white_space_in_characters_metadata of this Email. # noqa: E501
:return: The anchor_allow_white_space_in_characters_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_allow_white_space_in_characters_metadata
@anchor_allow_white_space_in_characters_metadata.setter
def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata):
"""Sets the anchor_allow_white_space_in_characters_metadata of this Email.
:param anchor_allow_white_space_in_characters_metadata: The anchor_allow_white_space_in_characters_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata
@property
def anchor_case_sensitive(self):
"""Gets the anchor_case_sensitive of this Email. # noqa: E501
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**. # noqa: E501
:return: The anchor_case_sensitive of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
"""Sets the anchor_case_sensitive of this Email.
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**. # noqa: E501
:param anchor_case_sensitive: The anchor_case_sensitive of this Email. # noqa: E501
:type: str
"""
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_case_sensitive_metadata(self):
"""Gets the anchor_case_sensitive_metadata of this Email. # noqa: E501
:return: The anchor_case_sensitive_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_case_sensitive_metadata
@anchor_case_sensitive_metadata.setter
def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata):
"""Sets the anchor_case_sensitive_metadata of this Email.
:param anchor_case_sensitive_metadata: The anchor_case_sensitive_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata
@property
def anchor_horizontal_alignment(self):
"""Gets the anchor_horizontal_alignment of this Email. # noqa: E501
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**. # noqa: E501
:return: The anchor_horizontal_alignment of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
"""Sets the anchor_horizontal_alignment of this Email.
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**. # noqa: E501
:param anchor_horizontal_alignment: The anchor_horizontal_alignment of this Email. # noqa: E501
:type: str
"""
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_horizontal_alignment_metadata(self):
"""Gets the anchor_horizontal_alignment_metadata of this Email. # noqa: E501
:return: The anchor_horizontal_alignment_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_horizontal_alignment_metadata
@anchor_horizontal_alignment_metadata.setter
def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata):
"""Sets the anchor_horizontal_alignment_metadata of this Email.
:param anchor_horizontal_alignment_metadata: The anchor_horizontal_alignment_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata
@property
def anchor_ignore_if_not_present(self):
"""Gets the anchor_ignore_if_not_present of this Email. # noqa: E501
When set to **true**, this tab is ignored if anchorString is not found in the document. # noqa: E501
:return: The anchor_ignore_if_not_present of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
"""Sets the anchor_ignore_if_not_present of this Email.
When set to **true**, this tab is ignored if anchorString is not found in the document. # noqa: E501
:param anchor_ignore_if_not_present: The anchor_ignore_if_not_present of this Email. # noqa: E501
:type: str
"""
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_ignore_if_not_present_metadata(self):
"""Gets the anchor_ignore_if_not_present_metadata of this Email. # noqa: E501
:return: The anchor_ignore_if_not_present_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_ignore_if_not_present_metadata
@anchor_ignore_if_not_present_metadata.setter
def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata):
"""Sets the anchor_ignore_if_not_present_metadata of this Email.
:param anchor_ignore_if_not_present_metadata: The anchor_ignore_if_not_present_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata
@property
def anchor_match_whole_word(self):
"""Gets the anchor_match_whole_word of this Email. # noqa: E501
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**. # noqa: E501
:return: The anchor_match_whole_word of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
"""Sets the anchor_match_whole_word of this Email.
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**. # noqa: E501
:param anchor_match_whole_word: The anchor_match_whole_word of this Email. # noqa: E501
:type: str
"""
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_match_whole_word_metadata(self):
"""Gets the anchor_match_whole_word_metadata of this Email. # noqa: E501
:return: The anchor_match_whole_word_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_match_whole_word_metadata
@anchor_match_whole_word_metadata.setter
def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata):
"""Sets the anchor_match_whole_word_metadata of this Email.
:param anchor_match_whole_word_metadata: The anchor_match_whole_word_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata
@property
def anchor_string(self):
"""Gets the anchor_string of this Email. # noqa: E501
Anchor text information for a radio button. # noqa: E501
:return: The anchor_string of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
"""Sets the anchor_string of this Email.
Anchor text information for a radio button. # noqa: E501
:param anchor_string: The anchor_string of this Email. # noqa: E501
:type: str
"""
self._anchor_string = anchor_string
@property
def anchor_string_metadata(self):
"""Gets the anchor_string_metadata of this Email. # noqa: E501
:return: The anchor_string_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_string_metadata
@anchor_string_metadata.setter
def anchor_string_metadata(self, anchor_string_metadata):
"""Sets the anchor_string_metadata of this Email.
:param anchor_string_metadata: The anchor_string_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_string_metadata = anchor_string_metadata
@property
def anchor_tab_processor_version(self):
"""Gets the anchor_tab_processor_version of this Email. # noqa: E501
# noqa: E501
:return: The anchor_tab_processor_version of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_tab_processor_version
@anchor_tab_processor_version.setter
def anchor_tab_processor_version(self, anchor_tab_processor_version):
"""Sets the anchor_tab_processor_version of this Email.
# noqa: E501
:param anchor_tab_processor_version: The anchor_tab_processor_version of this Email. # noqa: E501
:type: str
"""
self._anchor_tab_processor_version = anchor_tab_processor_version
@property
def anchor_tab_processor_version_metadata(self):
"""Gets the anchor_tab_processor_version_metadata of this Email. # noqa: E501
:return: The anchor_tab_processor_version_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_tab_processor_version_metadata
@anchor_tab_processor_version_metadata.setter
def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata):
"""Sets the anchor_tab_processor_version_metadata of this Email.
:param anchor_tab_processor_version_metadata: The anchor_tab_processor_version_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata
@property
def anchor_units(self):
"""Gets the anchor_units of this Email. # noqa: E501
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches. # noqa: E501
:return: The anchor_units of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_units
@anchor_units.setter
def anchor_units(self, anchor_units):
"""Sets the anchor_units of this Email.
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches. # noqa: E501
:param anchor_units: The anchor_units of this Email. # noqa: E501
:type: str
"""
self._anchor_units = anchor_units
@property
def anchor_units_metadata(self):
"""Gets the anchor_units_metadata of this Email. # noqa: E501
:return: The anchor_units_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_units_metadata
@anchor_units_metadata.setter
def anchor_units_metadata(self, anchor_units_metadata):
"""Sets the anchor_units_metadata of this Email.
:param anchor_units_metadata: The anchor_units_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_units_metadata = anchor_units_metadata
@property
def anchor_x_offset(self):
"""Gets the anchor_x_offset of this Email. # noqa: E501
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:return: The anchor_x_offset of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_x_offset
@anchor_x_offset.setter
def anchor_x_offset(self, anchor_x_offset):
"""Sets the anchor_x_offset of this Email.
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:param anchor_x_offset: The anchor_x_offset of this Email. # noqa: E501
:type: str
"""
self._anchor_x_offset = anchor_x_offset
@property
def anchor_x_offset_metadata(self):
"""Gets the anchor_x_offset_metadata of this Email. # noqa: E501
:return: The anchor_x_offset_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_x_offset_metadata
@anchor_x_offset_metadata.setter
def anchor_x_offset_metadata(self, anchor_x_offset_metadata):
"""Sets the anchor_x_offset_metadata of this Email.
:param anchor_x_offset_metadata: The anchor_x_offset_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_x_offset_metadata = anchor_x_offset_metadata
@property
def anchor_y_offset(self):
"""Gets the anchor_y_offset of this Email. # noqa: E501
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:return: The anchor_y_offset of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_y_offset
@anchor_y_offset.setter
def anchor_y_offset(self, anchor_y_offset):
"""Sets the anchor_y_offset of this Email.
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:param anchor_y_offset: The anchor_y_offset of this Email. # noqa: E501
:type: str
"""
self._anchor_y_offset = anchor_y_offset
@property
def anchor_y_offset_metadata(self):
"""Gets the anchor_y_offset_metadata of this Email. # noqa: E501
:return: The anchor_y_offset_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_y_offset_metadata
@anchor_y_offset_metadata.setter
def anchor_y_offset_metadata(self, anchor_y_offset_metadata):
"""Sets the anchor_y_offset_metadata of this Email.
:param anchor_y_offset_metadata: The anchor_y_offset_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_y_offset_metadata = anchor_y_offset_metadata
@property
def bold(self):
"""Gets the bold of this Email. # noqa: E501
When set to **true**, the information in the tab is bold. # noqa: E501
:return: The bold of this Email. # noqa: E501
:rtype: str
"""
return self._bold
@bold.setter
def bold(self, bold):
"""Sets the bold of this Email.
When set to **true**, the information in the tab is bold. # noqa: E501
:param bold: The bold of this Email. # noqa: E501
:type: str
"""
self._bold = bold
@property
def bold_metadata(self):
"""Gets the bold_metadata of this Email. # noqa: E501
:return: The bold_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._bold_metadata
@bold_metadata.setter
def bold_metadata(self, bold_metadata):
"""Sets the bold_metadata of this Email.
:param bold_metadata: The bold_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._bold_metadata = bold_metadata
@property
def conceal_value_on_document(self):
"""Gets the conceal_value_on_document of this Email. # noqa: E501
When set to **true**, the field appears normally while the recipient is adding or modifying the information in the field, but the data is not visible (the characters are hidden by asterisks) to any other signer or the sender. When an envelope is completed the information is available to the sender through the Form Data link in the DocuSign Console. This setting applies only to text boxes and does not affect list boxes, radio buttons, or check boxes. # noqa: E501
:return: The conceal_value_on_document of this Email. # noqa: E501
:rtype: str
"""
return self._conceal_value_on_document
@conceal_value_on_document.setter
def conceal_value_on_document(self, conceal_value_on_document):
"""Sets the conceal_value_on_document of this Email.
When set to **true**, the field appears normally while the recipient is adding or modifying the information in the field, but the data is not visible (the characters are hidden by asterisks) to any other signer or the sender. When an envelope is completed the information is available to the sender through the Form Data link in the DocuSign Console. This setting applies only to text boxes and does not affect list boxes, radio buttons, or check boxes. # noqa: E501
:param conceal_value_on_document: The conceal_value_on_document of this Email. # noqa: E501
:type: str
"""
self._conceal_value_on_document = conceal_value_on_document
@property
def conceal_value_on_document_metadata(self):
"""Gets the conceal_value_on_document_metadata of this Email. # noqa: E501
:return: The conceal_value_on_document_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._conceal_value_on_document_metadata
@conceal_value_on_document_metadata.setter
def conceal_value_on_document_metadata(self, conceal_value_on_document_metadata):
"""Sets the conceal_value_on_document_metadata of this Email.
:param conceal_value_on_document_metadata: The conceal_value_on_document_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._conceal_value_on_document_metadata = conceal_value_on_document_metadata
@property
def conditional_parent_label(self):
"""Gets the conditional_parent_label of this Email. # noqa: E501
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility. # noqa: E501
:return: The conditional_parent_label of this Email. # noqa: E501
:rtype: str
"""
return self._conditional_parent_label
@conditional_parent_label.setter
def conditional_parent_label(self, conditional_parent_label):
"""Sets the conditional_parent_label of this Email.
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility. # noqa: E501
:param conditional_parent_label: The conditional_parent_label of this Email. # noqa: E501
:type: str
"""
self._conditional_parent_label = conditional_parent_label
@property
def conditional_parent_label_metadata(self):
"""Gets the conditional_parent_label_metadata of this Email. # noqa: E501
:return: The conditional_parent_label_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._conditional_parent_label_metadata
@conditional_parent_label_metadata.setter
def conditional_parent_label_metadata(self, conditional_parent_label_metadata):
"""Sets the conditional_parent_label_metadata of this Email.
:param conditional_parent_label_metadata: The conditional_parent_label_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._conditional_parent_label_metadata = conditional_parent_label_metadata
@property
def conditional_parent_value(self):
"""Gets the conditional_parent_value of this Email. # noqa: E501
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active. # noqa: E501
:return: The conditional_parent_value of this Email. # noqa: E501
:rtype: str
"""
return self._conditional_parent_value
@conditional_parent_value.setter
def conditional_parent_value(self, conditional_parent_value):
"""Sets the conditional_parent_value of this Email.
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active. # noqa: E501
:param conditional_parent_value: The conditional_parent_value of this Email. # noqa: E501
:type: str
"""
self._conditional_parent_value = conditional_parent_value
@property
def conditional_parent_value_metadata(self):
"""Gets the conditional_parent_value_metadata of this Email. # noqa: E501
:return: The conditional_parent_value_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._conditional_parent_value_metadata
@conditional_parent_value_metadata.setter
def conditional_parent_value_metadata(self, conditional_parent_value_metadata):
"""Sets the conditional_parent_value_metadata of this Email.
:param conditional_parent_value_metadata: The conditional_parent_value_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._conditional_parent_value_metadata = conditional_parent_value_metadata
@property
def custom_tab_id(self):
"""Gets the custom_tab_id of this Email. # noqa: E501
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties. # noqa: E501
:return: The custom_tab_id of this Email. # noqa: E501
:rtype: str
"""
return self._custom_tab_id
@custom_tab_id.setter
def custom_tab_id(self, custom_tab_id):
"""Sets the custom_tab_id of this Email.
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties. # noqa: E501
:param custom_tab_id: The custom_tab_id of this Email. # noqa: E501
:type: str
"""
self._custom_tab_id = custom_tab_id
@property
def custom_tab_id_metadata(self):
"""Gets the custom_tab_id_metadata of this Email. # noqa: E501
:return: The custom_tab_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._custom_tab_id_metadata
@custom_tab_id_metadata.setter
def custom_tab_id_metadata(self, custom_tab_id_metadata):
"""Sets the custom_tab_id_metadata of this Email.
:param custom_tab_id_metadata: The custom_tab_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._custom_tab_id_metadata = custom_tab_id_metadata
@property
def disable_auto_size(self):
"""Gets the disable_auto_size of this Email. # noqa: E501
When set to **true**, disables the auto sizing of single line text boxes in the signing screen when the signer enters data. If disabled users will only be able enter as much data as the text box can hold. By default this is false. This property only affects single line text boxes. # noqa: E501
:return: The disable_auto_size of this Email. # noqa: E501
:rtype: str
"""
return self._disable_auto_size
@disable_auto_size.setter
def disable_auto_size(self, disable_auto_size):
"""Sets the disable_auto_size of this Email.
When set to **true**, disables the auto sizing of single line text boxes in the signing screen when the signer enters data. If disabled users will only be able enter as much data as the text box can hold. By default this is false. This property only affects single line text boxes. # noqa: E501
:param disable_auto_size: The disable_auto_size of this Email. # noqa: E501
:type: str
"""
self._disable_auto_size = disable_auto_size
@property
def disable_auto_size_metadata(self):
"""Gets the disable_auto_size_metadata of this Email. # noqa: E501
:return: The disable_auto_size_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._disable_auto_size_metadata
@disable_auto_size_metadata.setter
def disable_auto_size_metadata(self, disable_auto_size_metadata):
"""Sets the disable_auto_size_metadata of this Email.
:param disable_auto_size_metadata: The disable_auto_size_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._disable_auto_size_metadata = disable_auto_size_metadata
@property
def document_id(self):
"""Gets the document_id of this Email. # noqa: E501
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute. # noqa: E501
:return: The document_id of this Email. # noqa: E501
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""Sets the document_id of this Email.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute. # noqa: E501
:param document_id: The document_id of this Email. # noqa: E501
:type: str
"""
self._document_id = document_id
@property
def document_id_metadata(self):
"""Gets the document_id_metadata of this Email. # noqa: E501
:return: The document_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._document_id_metadata
@document_id_metadata.setter
def document_id_metadata(self, document_id_metadata):
"""Sets the document_id_metadata of this Email.
:param document_id_metadata: The document_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._document_id_metadata = document_id_metadata
@property
def error_details(self):
"""Gets the error_details of this Email. # noqa: E501
:return: The error_details of this Email. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this Email.
:param error_details: The error_details of this Email. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def font(self):
"""Gets the font of this Email. # noqa: E501
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:return: The font of this Email. # noqa: E501
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""Sets the font of this Email.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:param font: The font of this Email. # noqa: E501
:type: str
"""
self._font = font
@property
def font_color(self):
"""Gets the font_color of this Email. # noqa: E501
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:return: The font_color of this Email. # noqa: E501
:rtype: str
"""
return self._font_color
@font_color.setter
def font_color(self, font_color):
"""Sets the font_color of this Email.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:param font_color: The font_color of this Email. # noqa: E501
:type: str
"""
self._font_color = font_color
@property
def font_color_metadata(self):
"""Gets the font_color_metadata of this Email. # noqa: E501
:return: The font_color_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._font_color_metadata
@font_color_metadata.setter
def font_color_metadata(self, font_color_metadata):
"""Sets the font_color_metadata of this Email.
:param font_color_metadata: The font_color_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._font_color_metadata = font_color_metadata
@property
def font_metadata(self):
"""Gets the font_metadata of this Email. # noqa: E501
:return: The font_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._font_metadata
@font_metadata.setter
def font_metadata(self, font_metadata):
"""Sets the font_metadata of this Email.
:param font_metadata: The font_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._font_metadata = font_metadata
@property
def font_size(self):
"""Gets the font_size of this Email. # noqa: E501
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:return: The font_size of this Email. # noqa: E501
:rtype: str
"""
return self._font_size
@font_size.setter
def font_size(self, font_size):
"""Sets the font_size of this Email.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:param font_size: The font_size of this Email. # noqa: E501
:type: str
"""
self._font_size = font_size
@property
def font_size_metadata(self):
"""Gets the font_size_metadata of this Email. # noqa: E501
:return: The font_size_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._font_size_metadata
@font_size_metadata.setter
def font_size_metadata(self, font_size_metadata):
"""Sets the font_size_metadata of this Email.
:param font_size_metadata: The font_size_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._font_size_metadata = font_size_metadata
@property
def form_order(self):
"""Gets the form_order of this Email. # noqa: E501
# noqa: E501
:return: The form_order of this Email. # noqa: E501
:rtype: str
"""
return self._form_order
@form_order.setter
def form_order(self, form_order):
"""Sets the form_order of this Email.
# noqa: E501
:param form_order: The form_order of this Email. # noqa: E501
:type: str
"""
self._form_order = form_order
@property
def form_order_metadata(self):
"""Gets the form_order_metadata of this Email. # noqa: E501
:return: The form_order_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._form_order_metadata
@form_order_metadata.setter
def form_order_metadata(self, form_order_metadata):
"""Sets the form_order_metadata of this Email.
:param form_order_metadata: The form_order_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._form_order_metadata = form_order_metadata
@property
def form_page_label(self):
"""Gets the form_page_label of this Email. # noqa: E501
# noqa: E501
:return: The form_page_label of this Email. # noqa: E501
:rtype: str
"""
return self._form_page_label
@form_page_label.setter
def form_page_label(self, form_page_label):
"""Sets the form_page_label of this Email.
# noqa: E501
:param form_page_label: The form_page_label of this Email. # noqa: E501
:type: str
"""
self._form_page_label = form_page_label
@property
def form_page_label_metadata(self):
"""Gets the form_page_label_metadata of this Email. # noqa: E501
:return: The form_page_label_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._form_page_label_metadata
@form_page_label_metadata.setter
def form_page_label_metadata(self, form_page_label_metadata):
"""Sets the form_page_label_metadata of this Email.
:param form_page_label_metadata: The form_page_label_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._form_page_label_metadata = form_page_label_metadata
@property
def form_page_number(self):
"""Gets the form_page_number of this Email. # noqa: E501
# noqa: E501
:return: The form_page_number of this Email. # noqa: E501
:rtype: str
"""
return self._form_page_number
@form_page_number.setter
def form_page_number(self, form_page_number):
"""Sets the form_page_number of this Email.
# noqa: E501
:param form_page_number: The form_page_number of this Email. # noqa: E501
:type: str
"""
self._form_page_number = form_page_number
@property
def form_page_number_metadata(self):
"""Gets the form_page_number_metadata of this Email. # noqa: E501
:return: The form_page_number_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._form_page_number_metadata
@form_page_number_metadata.setter
def form_page_number_metadata(self, form_page_number_metadata):
"""Sets the form_page_number_metadata of this Email.
:param form_page_number_metadata: The form_page_number_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._form_page_number_metadata = form_page_number_metadata
@property
def height(self):
"""Gets the height of this Email. # noqa: E501
Height of the tab in pixels. # noqa: E501
:return: The height of this Email. # noqa: E501
:rtype: str
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this Email.
Height of the tab in pixels. # noqa: E501
:param height: The height of this Email. # noqa: E501
:type: str
"""
self._height = height
@property
def height_metadata(self):
"""Gets the height_metadata of this Email. # noqa: E501
:return: The height_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._height_metadata
@height_metadata.setter
def height_metadata(self, height_metadata):
"""Sets the height_metadata of this Email.
:param height_metadata: The height_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._height_metadata = height_metadata
@property
def italic(self):
"""Gets the italic of this Email. # noqa: E501
When set to **true**, the information in the tab is italic. # noqa: E501
:return: The italic of this Email. # noqa: E501
:rtype: str
"""
return self._italic
@italic.setter
def italic(self, italic):
"""Sets the italic of this Email.
When set to **true**, the information in the tab is italic. # noqa: E501
:param italic: The italic of this Email. # noqa: E501
:type: str
"""
self._italic = italic
@property
def italic_metadata(self):
"""Gets the italic_metadata of this Email. # noqa: E501
:return: The italic_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._italic_metadata
@italic_metadata.setter
def italic_metadata(self, italic_metadata):
"""Sets the italic_metadata of this Email.
:param italic_metadata: The italic_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._italic_metadata = italic_metadata
@property
def locale_policy(self):
"""Gets the locale_policy of this Email. # noqa: E501
:return: The locale_policy of this Email. # noqa: E501
:rtype: LocalePolicyTab
"""
return self._locale_policy
@locale_policy.setter
def locale_policy(self, locale_policy):
"""Sets the locale_policy of this Email.
:param locale_policy: The locale_policy of this Email. # noqa: E501
:type: LocalePolicyTab
"""
self._locale_policy = locale_policy
@property
def locked(self):
"""Gets the locked of this Email. # noqa: E501
When set to **true**, the signer cannot change the data of the custom tab. # noqa: E501
:return: The locked of this Email. # noqa: E501
:rtype: str
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this Email.
When set to **true**, the signer cannot change the data of the custom tab. # noqa: E501
:param locked: The locked of this Email. # noqa: E501
:type: str
"""
self._locked = locked
@property
def locked_metadata(self):
"""Gets the locked_metadata of this Email. # noqa: E501
:return: The locked_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._locked_metadata
@locked_metadata.setter
def locked_metadata(self, locked_metadata):
"""Sets the locked_metadata of this Email.
:param locked_metadata: The locked_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._locked_metadata = locked_metadata
@property
def max_length(self):
"""Gets the max_length of this Email. # noqa: E501
An optional value that describes the maximum length of the property when the property is a string. # noqa: E501
:return: The max_length of this Email. # noqa: E501
:rtype: str
"""
return self._max_length
@max_length.setter
def max_length(self, max_length):
"""Sets the max_length of this Email.
An optional value that describes the maximum length of the property when the property is a string. # noqa: E501
:param max_length: The max_length of this Email. # noqa: E501
:type: str
"""
self._max_length = max_length
@property
def max_length_metadata(self):
"""Gets the max_length_metadata of this Email. # noqa: E501
:return: The max_length_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._max_length_metadata
@max_length_metadata.setter
def max_length_metadata(self, max_length_metadata):
"""Sets the max_length_metadata of this Email.
:param max_length_metadata: The max_length_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._max_length_metadata = max_length_metadata
@property
def merge_field(self):
"""Gets the merge_field of this Email. # noqa: E501
:return: The merge_field of this Email. # noqa: E501
:rtype: MergeField
"""
return self._merge_field
@merge_field.setter
def merge_field(self, merge_field):
"""Sets the merge_field of this Email.
:param merge_field: The merge_field of this Email. # noqa: E501
:type: MergeField
"""
self._merge_field = merge_field
@property
def merge_field_xml(self):
"""Gets the merge_field_xml of this Email. # noqa: E501
# noqa: E501
:return: The merge_field_xml of this Email. # noqa: E501
:rtype: str
"""
return self._merge_field_xml
@merge_field_xml.setter
def merge_field_xml(self, merge_field_xml):
"""Sets the merge_field_xml of this Email.
# noqa: E501
:param merge_field_xml: The merge_field_xml of this Email. # noqa: E501
:type: str
"""
self._merge_field_xml = merge_field_xml
@property
def name(self):
"""Gets the name of this Email. # noqa: E501
# noqa: E501
:return: The name of this Email. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Email.
# noqa: E501
:param name: The name of this Email. # noqa: E501
:type: str
"""
self._name = name
@property
def name_metadata(self):
"""Gets the name_metadata of this Email. # noqa: E501
:return: The name_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._name_metadata
@name_metadata.setter
def name_metadata(self, name_metadata):
"""Sets the name_metadata of this Email.
:param name_metadata: The name_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._name_metadata = name_metadata
@property
def original_value(self):
"""Gets the original_value of this Email. # noqa: E501
The initial value of the tab when it was sent to the recipient. # noqa: E501
:return: The original_value of this Email. # noqa: E501
:rtype: str
"""
return self._original_value
@original_value.setter
def original_value(self, original_value):
"""Sets the original_value of this Email.
The initial value of the tab when it was sent to the recipient. # noqa: E501
:param original_value: The original_value of this Email. # noqa: E501
:type: str
"""
self._original_value = original_value
@property
def original_value_metadata(self):
"""Gets the original_value_metadata of this Email. # noqa: E501
:return: The original_value_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._original_value_metadata
@original_value_metadata.setter
def original_value_metadata(self, original_value_metadata):
"""Sets the original_value_metadata of this Email.
:param original_value_metadata: The original_value_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._original_value_metadata = original_value_metadata
@property
def page_number(self):
"""Gets the page_number of this Email. # noqa: E501
Specifies the page number on which the tab is located. # noqa: E501
:return: The page_number of this Email. # noqa: E501
:rtype: str
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""Sets the page_number of this Email.
Specifies the page number on which the tab is located. # noqa: E501
:param page_number: The page_number of this Email. # noqa: E501
:type: str
"""
self._page_number = page_number
@property
def page_number_metadata(self):
"""Gets the page_number_metadata of this Email. # noqa: E501
:return: The page_number_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._page_number_metadata
@page_number_metadata.setter
def page_number_metadata(self, page_number_metadata):
"""Sets the page_number_metadata of this Email.
:param page_number_metadata: The page_number_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._page_number_metadata = page_number_metadata
@property
def recipient_id(self):
"""Gets the recipient_id of this Email. # noqa: E501
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:return: The recipient_id of this Email. # noqa: E501
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""Sets the recipient_id of this Email.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:param recipient_id: The recipient_id of this Email. # noqa: E501
:type: str
"""
self._recipient_id = recipient_id
@property
def recipient_id_guid(self):
"""Gets the recipient_id_guid of this Email. # noqa: E501
# noqa: E501
:return: The recipient_id_guid of this Email. # noqa: E501
:rtype: str
"""
return self._recipient_id_guid
@recipient_id_guid.setter
def recipient_id_guid(self, recipient_id_guid):
"""Sets the recipient_id_guid of this Email.
# noqa: E501
:param recipient_id_guid: The recipient_id_guid of this Email. # noqa: E501
:type: str
"""
self._recipient_id_guid = recipient_id_guid
@property
def recipient_id_guid_metadata(self):
"""Gets the recipient_id_guid_metadata of this Email. # noqa: E501
:return: The recipient_id_guid_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._recipient_id_guid_metadata
@recipient_id_guid_metadata.setter
def recipient_id_guid_metadata(self, recipient_id_guid_metadata):
"""Sets the recipient_id_guid_metadata of this Email.
:param recipient_id_guid_metadata: The recipient_id_guid_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._recipient_id_guid_metadata = recipient_id_guid_metadata
@property
def recipient_id_metadata(self):
"""Gets the recipient_id_metadata of this Email. # noqa: E501
:return: The recipient_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._recipient_id_metadata
@recipient_id_metadata.setter
def recipient_id_metadata(self, recipient_id_metadata):
"""Sets the recipient_id_metadata of this Email.
:param recipient_id_metadata: The recipient_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._recipient_id_metadata = recipient_id_metadata
@property
def require_all(self):
"""Gets the require_all of this Email. # noqa: E501
When set to **true** and shared is true, information must be entered in this field to complete the envelope. # noqa: E501
:return: The require_all of this Email. # noqa: E501
:rtype: str
"""
return self._require_all
@require_all.setter
def require_all(self, require_all):
"""Sets the require_all of this Email.
When set to **true** and shared is true, information must be entered in this field to complete the envelope. # noqa: E501
:param require_all: The require_all of this Email. # noqa: E501
:type: str
"""
self._require_all = require_all
@property
def require_all_metadata(self):
"""Gets the require_all_metadata of this Email. # noqa: E501
:return: The require_all_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._require_all_metadata
@require_all_metadata.setter
def require_all_metadata(self, require_all_metadata):
"""Sets the require_all_metadata of this Email.
:param require_all_metadata: The require_all_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._require_all_metadata = require_all_metadata
@property
def required(self):
"""Gets the required of this Email. # noqa: E501
When set to **true**, the signer is required to fill out this tab # noqa: E501
:return: The required of this Email. # noqa: E501
:rtype: str
"""
return self._required
@required.setter
def required(self, required):
"""Sets the required of this Email.
When set to **true**, the signer is required to fill out this tab # noqa: E501
:param required: The required of this Email. # noqa: E501
:type: str
"""
self._required = required
@property
def required_metadata(self):
"""Gets the required_metadata of this Email. # noqa: E501
:return: The required_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._required_metadata
@required_metadata.setter
def required_metadata(self, required_metadata):
"""Sets the required_metadata of this Email.
:param required_metadata: The required_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._required_metadata = required_metadata
@property
def require_initial_on_shared_change(self):
"""Gets the require_initial_on_shared_change of this Email. # noqa: E501
Optional element for field markup. When set to **true**, the signer is required to initial when they modify a shared field. # noqa: E501
:return: The require_initial_on_shared_change of this Email. # noqa: E501
:rtype: str
"""
return self._require_initial_on_shared_change
@require_initial_on_shared_change.setter
def require_initial_on_shared_change(self, require_initial_on_shared_change):
"""Sets the require_initial_on_shared_change of this Email.
Optional element for field markup. When set to **true**, the signer is required to initial when they modify a shared field. # noqa: E501
:param require_initial_on_shared_change: The require_initial_on_shared_change of this Email. # noqa: E501
:type: str
"""
self._require_initial_on_shared_change = require_initial_on_shared_change
@property
def require_initial_on_shared_change_metadata(self):
"""Gets the require_initial_on_shared_change_metadata of this Email. # noqa: E501
:return: The require_initial_on_shared_change_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._require_initial_on_shared_change_metadata
@require_initial_on_shared_change_metadata.setter
def require_initial_on_shared_change_metadata(self, require_initial_on_shared_change_metadata):
"""Sets the require_initial_on_shared_change_metadata of this Email.
:param require_initial_on_shared_change_metadata: The require_initial_on_shared_change_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._require_initial_on_shared_change_metadata = require_initial_on_shared_change_metadata
@property
def sender_required(self):
"""Gets the sender_required of this Email. # noqa: E501
When set to **true**, the sender must populate the tab before an envelope can be sent using the template. This value tab can only be changed by modifying (PUT) the template. Tabs with a `senderRequired` value of true cannot be deleted from an envelope. # noqa: E501
:return: The sender_required of this Email. # noqa: E501
:rtype: str
"""
return self._sender_required
@sender_required.setter
def sender_required(self, sender_required):
"""Sets the sender_required of this Email.
When set to **true**, the sender must populate the tab before an envelope can be sent using the template. This value tab can only be changed by modifying (PUT) the template. Tabs with a `senderRequired` value of true cannot be deleted from an envelope. # noqa: E501
:param sender_required: The sender_required of this Email. # noqa: E501
:type: str
"""
self._sender_required = sender_required
@property
def sender_required_metadata(self):
"""Gets the sender_required_metadata of this Email. # noqa: E501
:return: The sender_required_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._sender_required_metadata
@sender_required_metadata.setter
def sender_required_metadata(self, sender_required_metadata):
"""Sets the sender_required_metadata of this Email.
:param sender_required_metadata: The sender_required_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._sender_required_metadata = sender_required_metadata
@property
def shared(self):
"""Gets the shared of this Email. # noqa: E501
When set to **true**, this custom tab is shared. # noqa: E501
:return: The shared of this Email. # noqa: E501
:rtype: str
"""
return self._shared
@shared.setter
def shared(self, shared):
"""Sets the shared of this Email.
When set to **true**, this custom tab is shared. # noqa: E501
:param shared: The shared of this Email. # noqa: E501
:type: str
"""
self._shared = shared
@property
def shared_metadata(self):
"""Gets the shared_metadata of this Email. # noqa: E501
:return: The shared_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._shared_metadata
@shared_metadata.setter
def shared_metadata(self, shared_metadata):
"""Sets the shared_metadata of this Email.
:param shared_metadata: The shared_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._shared_metadata = shared_metadata
@property
def share_to_recipients(self):
"""Gets the share_to_recipients of this Email. # noqa: E501
# noqa: E501
:return: The share_to_recipients of this Email. # noqa: E501
:rtype: str
"""
return self._share_to_recipients
@share_to_recipients.setter
def share_to_recipients(self, share_to_recipients):
"""Sets the share_to_recipients of this Email.
# noqa: E501
:param share_to_recipients: The share_to_recipients of this Email. # noqa: E501
:type: str
"""
self._share_to_recipients = share_to_recipients
@property
def share_to_recipients_metadata(self):
"""Gets the share_to_recipients_metadata of this Email. # noqa: E501
:return: The share_to_recipients_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._share_to_recipients_metadata
@share_to_recipients_metadata.setter
def share_to_recipients_metadata(self, share_to_recipients_metadata):
"""Sets the share_to_recipients_metadata of this Email.
:param share_to_recipients_metadata: The share_to_recipients_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._share_to_recipients_metadata = share_to_recipients_metadata
@property
def smart_contract_information(self):
"""Gets the smart_contract_information of this Email. # noqa: E501
:return: The smart_contract_information of this Email. # noqa: E501
:rtype: SmartContractInformation
"""
return self._smart_contract_information
@smart_contract_information.setter
def smart_contract_information(self, smart_contract_information):
"""Sets the smart_contract_information of this Email.
:param smart_contract_information: The smart_contract_information of this Email. # noqa: E501
:type: SmartContractInformation
"""
self._smart_contract_information = smart_contract_information
@property
def source(self):
"""Gets the source of this Email. # noqa: E501
# noqa: E501
:return: The source of this Email. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this Email.
# noqa: E501
:param source: The source of this Email. # noqa: E501
:type: str
"""
self._source = source
@property
def status(self):
"""Gets the status of this Email. # noqa: E501
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:return: The status of this Email. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Email.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:param status: The status of this Email. # noqa: E501
:type: str
"""
self._status = status
@property
def status_metadata(self):
"""Gets the status_metadata of this Email. # noqa: E501
:return: The status_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._status_metadata
@status_metadata.setter
def status_metadata(self, status_metadata):
"""Sets the status_metadata of this Email.
:param status_metadata: The status_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._status_metadata = status_metadata
@property
def tab_group_labels(self):
"""Gets the tab_group_labels of this Email. # noqa: E501
# noqa: E501
:return: The tab_group_labels of this Email. # noqa: E501
:rtype: list[str]
"""
return self._tab_group_labels
@tab_group_labels.setter
def tab_group_labels(self, tab_group_labels):
"""Sets the tab_group_labels of this Email.
# noqa: E501
:param tab_group_labels: The tab_group_labels of this Email. # noqa: E501
:type: list[str]
"""
self._tab_group_labels = tab_group_labels
@property
def tab_group_labels_metadata(self):
"""Gets the tab_group_labels_metadata of this Email. # noqa: E501
:return: The tab_group_labels_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_group_labels_metadata
@tab_group_labels_metadata.setter
def tab_group_labels_metadata(self, tab_group_labels_metadata):
"""Sets the tab_group_labels_metadata of this Email.
:param tab_group_labels_metadata: The tab_group_labels_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_group_labels_metadata = tab_group_labels_metadata
@property
def tab_id(self):
"""Gets the tab_id of this Email. # noqa: E501
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:return: The tab_id of this Email. # noqa: E501
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""Sets the tab_id of this Email.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:param tab_id: The tab_id of this Email. # noqa: E501
:type: str
"""
self._tab_id = tab_id
@property
def tab_id_metadata(self):
"""Gets the tab_id_metadata of this Email. # noqa: E501
:return: The tab_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_id_metadata
@tab_id_metadata.setter
def tab_id_metadata(self, tab_id_metadata):
"""Sets the tab_id_metadata of this Email.
:param tab_id_metadata: The tab_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_id_metadata = tab_id_metadata
@property
def tab_label(self):
"""Gets the tab_label of this Email. # noqa: E501
The label string associated with the tab. # noqa: E501
:return: The tab_label of this Email. # noqa: E501
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""Sets the tab_label of this Email.
The label string associated with the tab. # noqa: E501
:param tab_label: The tab_label of this Email. # noqa: E501
:type: str
"""
self._tab_label = tab_label
@property
def tab_label_metadata(self):
"""Gets the tab_label_metadata of this Email. # noqa: E501
:return: The tab_label_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_label_metadata
@tab_label_metadata.setter
def tab_label_metadata(self, tab_label_metadata):
"""Sets the tab_label_metadata of this Email.
:param tab_label_metadata: The tab_label_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_label_metadata = tab_label_metadata
@property
def tab_order(self):
"""Gets the tab_order of this Email. # noqa: E501
# noqa: E501
:return: The tab_order of this Email. # noqa: E501
:rtype: str
"""
return self._tab_order
@tab_order.setter
def tab_order(self, tab_order):
"""Sets the tab_order of this Email.
# noqa: E501
:param tab_order: The tab_order of this Email. # noqa: E501
:type: str
"""
self._tab_order = tab_order
@property
def tab_order_metadata(self):
"""Gets the tab_order_metadata of this Email. # noqa: E501
:return: The tab_order_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_order_metadata
@tab_order_metadata.setter
def tab_order_metadata(self, tab_order_metadata):
"""Sets the tab_order_metadata of this Email.
:param tab_order_metadata: The tab_order_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_order_metadata = tab_order_metadata
@property
def tab_type(self):
"""Gets the tab_type of this Email. # noqa: E501
# noqa: E501
:return: The tab_type of this Email. # noqa: E501
:rtype: str
"""
return self._tab_type
@tab_type.setter
def tab_type(self, tab_type):
"""Sets the tab_type of this Email.
# noqa: E501
:param tab_type: The tab_type of this Email. # noqa: E501
:type: str
"""
self._tab_type = tab_type
@property
def tab_type_metadata(self):
"""Gets the tab_type_metadata of this Email. # noqa: E501
:return: The tab_type_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_type_metadata
@tab_type_metadata.setter
def tab_type_metadata(self, tab_type_metadata):
"""Sets the tab_type_metadata of this Email.
:param tab_type_metadata: The tab_type_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_type_metadata = tab_type_metadata
@property
def template_locked(self):
"""Gets the template_locked of this Email. # noqa: E501
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients. # noqa: E501
:return: The template_locked of this Email. # noqa: E501
:rtype: str
"""
return self._template_locked
@template_locked.setter
def template_locked(self, template_locked):
"""Sets the template_locked of this Email.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients. # noqa: E501
:param template_locked: The template_locked of this Email. # noqa: E501
:type: str
"""
self._template_locked = template_locked
@property
def template_locked_metadata(self):
"""Gets the template_locked_metadata of this Email. # noqa: E501
:return: The template_locked_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._template_locked_metadata
@template_locked_metadata.setter
def template_locked_metadata(self, template_locked_metadata):
"""Sets the template_locked_metadata of this Email.
:param template_locked_metadata: The template_locked_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._template_locked_metadata = template_locked_metadata
@property
def template_required(self):
"""Gets the template_required of this Email. # noqa: E501
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients. # noqa: E501
:return: The template_required of this Email. # noqa: E501
:rtype: str
"""
return self._template_required
@template_required.setter
def template_required(self, template_required):
"""Sets the template_required of this Email.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients. # noqa: E501
:param template_required: The template_required of this Email. # noqa: E501
:type: str
"""
self._template_required = template_required
@property
def template_required_metadata(self):
"""Gets the template_required_metadata of this Email. # noqa: E501
:return: The template_required_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._template_required_metadata
@template_required_metadata.setter
def template_required_metadata(self, template_required_metadata):
"""Sets the template_required_metadata of this Email.
:param template_required_metadata: The template_required_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._template_required_metadata = template_required_metadata
@property
def tooltip(self):
"""Gets the tooltip of this Email. # noqa: E501
# noqa: E501
:return: The tooltip of this Email. # noqa: E501
:rtype: str
"""
return self._tooltip
@tooltip.setter
def tooltip(self, tooltip):
"""Sets the tooltip of this Email.
# noqa: E501
:param tooltip: The tooltip of this Email. # noqa: E501
:type: str
"""
self._tooltip = tooltip
@property
def tool_tip_metadata(self):
"""Gets the tool_tip_metadata of this Email. # noqa: E501
:return: The tool_tip_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tool_tip_metadata
@tool_tip_metadata.setter
def tool_tip_metadata(self, tool_tip_metadata):
"""Sets the tool_tip_metadata of this Email.
:param tool_tip_metadata: The tool_tip_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tool_tip_metadata = tool_tip_metadata
@property
def underline(self):
"""Gets the underline of this Email. # noqa: E501
When set to **true**, the information in the tab is underlined. # noqa: E501
:return: The underline of this Email. # noqa: E501
:rtype: str
"""
return self._underline
@underline.setter
def underline(self, underline):
"""Sets the underline of this Email.
When set to **true**, the information in the tab is underlined. # noqa: E501
:param underline: The underline of this Email. # noqa: E501
:type: str
"""
self._underline = underline
@property
def underline_metadata(self):
"""Gets the underline_metadata of this Email. # noqa: E501
:return: The underline_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._underline_metadata
@underline_metadata.setter
def underline_metadata(self, underline_metadata):
"""Sets the underline_metadata of this Email.
:param underline_metadata: The underline_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._underline_metadata = underline_metadata
@property
def validation_message(self):
"""Gets the validation_message of this Email. # noqa: E501
The message displayed if the custom tab fails input validation (either custom of embedded). # noqa: E501
:return: The validation_message of this Email. # noqa: E501
:rtype: str
"""
return self._validation_message
@validation_message.setter
def validation_message(self, validation_message):
"""Sets the validation_message of this Email.
The message displayed if the custom tab fails input validation (either custom of embedded). # noqa: E501
:param validation_message: The validation_message of this Email. # noqa: E501
:type: str
"""
self._validation_message = validation_message
@property
def validation_message_metadata(self):
"""Gets the validation_message_metadata of this Email. # noqa: E501
:return: The validation_message_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._validation_message_metadata
@validation_message_metadata.setter
def validation_message_metadata(self, validation_message_metadata):
"""Sets the validation_message_metadata of this Email.
:param validation_message_metadata: The validation_message_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._validation_message_metadata = validation_message_metadata
@property
def validation_pattern(self):
"""Gets the validation_pattern of this Email. # noqa: E501
A regular expression used to validate input for the tab. # noqa: E501
:return: The validation_pattern of this Email. # noqa: E501
:rtype: str
"""
return self._validation_pattern
@validation_pattern.setter
def validation_pattern(self, validation_pattern):
"""Sets the validation_pattern of this Email.
A regular expression used to validate input for the tab. # noqa: E501
:param validation_pattern: The validation_pattern of this Email. # noqa: E501
:type: str
"""
self._validation_pattern = validation_pattern
@property
def validation_pattern_metadata(self):
"""Gets the validation_pattern_metadata of this Email. # noqa: E501
:return: The validation_pattern_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._validation_pattern_metadata
@validation_pattern_metadata.setter
def validation_pattern_metadata(self, validation_pattern_metadata):
"""Sets the validation_pattern_metadata of this Email.
:param validation_pattern_metadata: The validation_pattern_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._validation_pattern_metadata = validation_pattern_metadata
@property
def value(self):
"""Gets the value of this Email. # noqa: E501
Specifies the value of the tab. # noqa: E501
:return: The value of this Email. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Email.
Specifies the value of the tab. # noqa: E501
:param value: The value of this Email. # noqa: E501
:type: str
"""
self._value = value
@property
def value_metadata(self):
"""Gets the value_metadata of this Email. # noqa: E501
:return: The value_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._value_metadata
@value_metadata.setter
def value_metadata(self, value_metadata):
"""Sets the value_metadata of this Email.
:param value_metadata: The value_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._value_metadata = value_metadata
@property
def width(self):
"""Gets the width of this Email. # noqa: E501
Width of the tab in pixels. # noqa: E501
:return: The width of this Email. # noqa: E501
:rtype: str
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this Email.
Width of the tab in pixels. # noqa: E501
:param width: The width of this Email. # noqa: E501
:type: str
"""
self._width = width
@property
def width_metadata(self):
"""Gets the width_metadata of this Email. # noqa: E501
:return: The width_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._width_metadata
@width_metadata.setter
def width_metadata(self, width_metadata):
"""Sets the width_metadata of this Email.
:param width_metadata: The width_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._width_metadata = width_metadata
@property
def x_position(self):
"""Gets the x_position of this Email. # noqa: E501
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:return: The x_position of this Email. # noqa: E501
:rtype: str
"""
return self._x_position
@x_position.setter
def x_position(self, x_position):
"""Sets the x_position of this Email.
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:param x_position: The x_position of this Email. # noqa: E501
:type: str
"""
self._x_position = x_position
@property
def x_position_metadata(self):
"""Gets the x_position_metadata of this Email. # noqa: E501
:return: The x_position_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._x_position_metadata
@x_position_metadata.setter
def x_position_metadata(self, x_position_metadata):
"""Sets the x_position_metadata of this Email.
:param x_position_metadata: The x_position_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._x_position_metadata = x_position_metadata
@property
def y_position(self):
"""Gets the y_position of this Email. # noqa: E501
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:return: The y_position of this Email. # noqa: E501
:rtype: str
"""
return self._y_position
@y_position.setter
def y_position(self, y_position):
"""Sets the y_position of this Email.
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:param y_position: The y_position of this Email. # noqa: E501
:type: str
"""
self._y_position = y_position
@property
def y_position_metadata(self):
"""Gets the y_position_metadata of this Email. # noqa: E501
:return: The y_position_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._y_position_metadata
@y_position_metadata.setter
def y_position_metadata(self, y_position_metadata):
"""Sets the y_position_metadata of this Email.
:param y_position_metadata: The y_position_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._y_position_metadata = y_position_metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Email, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Email):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Email):
return True
return self.to_dict() != other.to_dict()
|
py
|
1a5d8635d95390f3e09d33d9ae41c77951cdd777
|
from fastapi import APIRouter, Body
from fastapi.encoders import jsonable_encoder
from ..crud.student import (
add_student,
delete_student,
retrieve_student,
retrieve_students,
update_student,
)
from ..models.student import (
ErrorResponseModel,
ResponseModel,
StudentSchema,
UpdateStudentModel,
)
router = APIRouter()
@router.post("/", response_description="Student data added into the database")
async def add_student_data(student: StudentSchema = Body(...)):
student = jsonable_encoder(student)
new_student = await add_student(student)
return ResponseModel(new_student, "Student added successfully.")
@router.get("/", response_description="Students retrieved")
async def get_students():
students = await retrieve_students()
if students:
return ResponseModel(students, "Students data retrieved successfully")
return ResponseModel(students, "Empty list returned")
@router.get("/{id}", response_description="Student data retrieved")
async def get_student_data(id):
student = await retrieve_student(id)
if student:
return ResponseModel(student, "Student data retrieved successfully")
return ErrorResponseModel("An error occurred.", 404, "Student doesn't exist.")
@router.put("/{id}")
async def update_student_data(id: str, req: UpdateStudentModel = Body(...)):
req = {k: v for k, v in req.dict().items() if v is not None}
updated_student = await update_student(id, req)
if updated_student:
return ResponseModel(
"Student with ID: {} name update is successful".format(id),
"Student name updated successfully",
)
return ErrorResponseModel(
"An error occurred",
404,
"There was an error updating the student data.",
)
@router.delete("/{id}", response_description="Student data deleted from the database")
async def delete_student_data(id: str):
deleted_student = await delete_student(id)
if deleted_student:
return ResponseModel(
"Student with ID: {} removed".format(id), "Student deleted successfully"
)
return ErrorResponseModel(
"An error occurred", 404, "Student with id {0} doesn't exist".format(id)
)
|
py
|
1a5d86e062edb6f8847f476942750fd28d12cc78
|
import cv2 as cv
import numpy as np
import os
from typing import Final
#rock = 0, scissor = 1, paper = 2
ROCK: Final = 0
SCISSOR: Final = 1
PAPER: Final = 2
def ModeSelect():
return 0
def DistanceCheck():
return True
def GetRcp():
while True :
if DistanceCheck() == True : break
return 0
def InitOdds(win, lose, total):
win = 0
lose = 0
total = win+lose
def RcpFunc(mode, hand):
if mode == 0: #WIN
if hand == ROCK : return PAPER
elif hand == SCISSOR : return ROCK
elif hand == PAPER : return SCISSOR
elif mode == 1: #LOSE
if hand == ROCK : return SCISSOR
elif hand == SCISSOR : return PAPER
elif hand == PAPER : return ROCK
elif mode == 2: #RAND
return np.random.randint(2)
def LedCtrl(result):
if result == ROCK:
pass
elif result == SCISSOR:
pass
elif result == PAPER:
pass
win, lose, total = 0
while True:
InitOdds(win, lose, total)
mode = ModeSelect()
if mode == 0:
hand = GetRcp()
result_hand = RcpFunc(mode, hand)
LedCtrl(result_hand)
elif mode == 1:
hand = GetRcp()
result_hand = RcpFunc(mode, hand)
LedCtrl(result_hand)
elif mode == 2:
hand = GetRcp()
result_hand = RcpFunc(mode, hand)
LedCtrl(result_hand)
|
py
|
1a5d8754632359b08fffca3414a0bddecf1c4912
|
import os
import sys
import json
def buildPath(fileName):
"""
Standardizes filepath to conform with an executable version built using pyinstaller
:param string fileName: name of the file
:return: state-dependant filepath
:rtype: string
"""
if getattr(sys, 'frozen', False):
return os.path.join(os.path.dirname(sys.executable), str(fileName))
return fileName
def writeJson(filename, data):
with open(buildPath(filename), 'w') as outfile:
json.dump(data, outfile, default=lambda o: o.__dict__, sort_keys=True)
return 0
def readJson(filename, attribute=None):
try:
with open(buildPath(filename)) as json_data:
data = json.load(json_data)
if attribute:
return data[attribute]
return data
except IOError as ioerror:
print ioerror
return 1
|
py
|
1a5d8795363a442ab6976b8dc704462a4104ffc3
|
from behave import Given, When, Then
import time
@Given(u'the user is on the home page')
def step_impl(context):
context.driver.get("file:///C:/Users/Ozgur/Desktop/repo/project1web/index.html")
@When(u'the user enters the username styphon')
def step_impl(context):
context.index_page.select_username_input().send_keys("styphon")
@When(u'the user enters the password 1234')
def step_impl(context):
context.index_page.select_password_input().send_keys("1234")
@When(u'the user clicks sign in')
def step_impl(context):
context.index_page.select_sign_in_button().click()
@Then(u'the employee should be redirected to the employee page')
def step_impl(context):
time.sleep(1)
assert context.driver.title == "Employee Page"
@When(u'the user enters the username styphon31')
def step_impl(context):
context.index_page.select_username_input().send_keys("styphon31")
@When(u'the user enters the password 4321')
def step_impl(context):
context.index_page.select_password_input().send_keys("4321")
@Then(u'the manager should be redirected to the manager page')
def step_impl(context):
time.sleep(1)
assert context.driver.title == "Manager Page"
@When(u'the user enters the password 5432')
def step_impl(context):
context.index_page.select_password_input().send_keys("5432")
@Then(u'the user should get invalid credentials error')
def step_impl(context):
time.sleep(1)
assert context.driver.switch_to.alert.text == "Invalid credentials"
context.driver.switch_to.alert.accept()
@When(u'the user enters the username asda')
def step_impl(context):
context.index_page.select_username_input().send_keys("asda")
@When(u'the user enters the username asdasda')
def step_impl(context):
context.index_page.select_username_input().send_keys("asdasda")
@When(u'the user enters the password aasdas')
def step_impl(context):
context.index_page.select_password_input().send_keys("aasdas")
|
py
|
1a5d88b77ec9cda04674dbddc1438f7c06b0a7db
|
# -*- coding: utf-8 -*-
#
# shortit documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'shortit'
copyright = u"2016, aaron"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'shorldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'shorl.tex',
u'shortit Documentation',
u"aaron", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'shorl', u'shortit Documentation',
[u"aaron"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'shorl', u'shortit Documentation',
u"aaron", 'shortit',
'A simple URL shortner', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py
|
1a5d88cc02d250396637ab6ed9d48e3b06673bc2
|
import torch
import torch.nn as nn
import dgl
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
class GCNZinc(nn.Module):
def __init__(self,
g,
num_feats,
num_layers,
num_hidden,
num_atom_type,
num_bond_type):
super(GCNZinc, self).__init__()
self.g = g
self.num_atom_type = num_atom_type
self.num_bond_type = num_bond_type
self.gcn_layers = nn.ModuleList()
self.BNs = nn.ModuleList()
# atom_type embedding
self.embed = nn.Embedding(num_atom_type, num_feats)
self.gcn_layers.append(GraphConv(num_feats, num_hidden))
self.BNs.append(nn.BatchNorm1d(num_hidden))
for i in range(num_layers):
self.gcn_layers.append(GraphConv(num_hidden, num_hidden))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.regressor1 = nn.Linear(num_hidden, num_hidden//2)
self.regressor2 = nn.Linear(num_hidden//2, 1)
def forward(self, x, e, snorm_n, snorm_e):
h = self.embed(x)
for layer, bn in zip(self.gcn_layers, self.BNs):
h = layer(self.g, h)
h = h * snorm_n
h = bn(h)
h = torch.tanh(h)
self.g.ndata['h'] = h
h = dgl.mean_nodes(self.g, 'h')
h = torch.relu(h)
h = self.regressor1(h)
h = torch.relu(h)
logits = self.regressor2(h)
return logits
|
py
|
1a5d88f17c4c35ec9113d3ea89e6fed88adc118b
|
"""Poll related commands."""
from telegram.ext import run_async
from pollbot.i18n import i18n
from pollbot.helper.session import message_wrapper
from pollbot.display.creation import get_init_text
from pollbot.display.misc import get_poll_list
from pollbot.telegram.keyboard import (
get_cancel_creation_keyboard,
get_init_keyboard,
)
from pollbot.models import Poll
@run_async
@message_wrapper(private=True)
def create_poll(bot, update, session, user):
"""Create a new poll."""
# The previous unfinished poll will be removed
user.started = True
if user.current_poll is not None and not user.current_poll.created:
update.message.chat.send_message(
i18n.t("creation.already_creating", locale=user.locale),
reply_markup=get_cancel_creation_keyboard(user.current_poll),
)
return
poll = Poll.create(user, session)
text = get_init_text(poll)
keyboard = get_init_keyboard(poll)
update.message.chat.send_message(
text,
parse_mode="markdown",
reply_markup=keyboard,
disable_web_page_preview=True,
)
@run_async
@message_wrapper(private=True)
def list_polls(bot, update, session, user):
"""Get a list of all active polls."""
text, keyboard = get_poll_list(session, user)
update.message.chat.send_message(text, reply_markup=keyboard)
@run_async
@message_wrapper(private=True)
def list_closed_polls(bot, update, session, user):
"""Get a list of all closed polls."""
text, keyboard = get_poll_list(session, user, closed=True)
update.message.chat.send_message(text, reply_markup=keyboard)
|
py
|
1a5d8a8cdc837e72d81d8b03a69ada775fcb8213
|
"""In which we test that various pieces of py2-only syntax are supported."""
from __future__ import with_statement
from rpython.conftest import option
from rpython.annotator import model as annmodel
from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator
from rpython.translator.test import snippet
from .test_annrpython import (
TestAnnotateTestCase as _TestAnnotateTestCase, graphof
)
class TestAnnotateTestCase:
def teardown_method(self, meth):
assert annmodel.s_Bool == annmodel.SomeBool()
class RPythonAnnotator(_RPythonAnnotator):
def build_types(self, *args):
s = _RPythonAnnotator.build_types(self, *args)
self.validate()
if option.view:
self.translator.view()
return s
def test_harmonic(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.harmonic, [int])
assert s.knowntype == float
# check that the list produced by range() is not mutated or resized
graph = graphof(a, snippet.harmonic)
all_vars = set().union(*[block.getvariables() for block in graph.iterblocks()])
print all_vars
for var in all_vars:
s_value = var.annotation
if isinstance(s_value, annmodel.SomeList):
assert not s_value.listdef.listitem.resized
assert not s_value.listdef.listitem.mutated
assert s_value.listdef.listitem.range_step
def test_prebuilt_long_that_is_not_too_long(self):
small_constant = 12L
def f():
return small_constant
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 12
assert s.nonneg
assert not s.unsigned
#
small_constant = -23L
def f():
return small_constant
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == -23
assert not s.nonneg
assert not s.unsigned
def test_isinstance_double_const(self):
class X(object):
def _freeze_(self):
return True
x = X()
def f(i):
if i:
x1 = x
else:
x1 = None
print "hello" # this is to force the merge of blocks
return isinstance(x1, X)
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeInteger()])
assert isinstance(s, annmodel.SomeBool)
|
py
|
1a5d8b00e0832cb1f480ba8d62540877b28f0ca5
|
from django.contrib import admin
from django.db import models
from .models import Event, Robot, MatchScout, CoachScout, PitScout
from django.forms import CheckboxSelectMultiple
class EventAdmin(admin.ModelAdmin):
formfield_overrides = {
Event.robots: {'widget': CheckboxSelectMultiple},
}
admin.site.register(Event, EventAdmin)
admin.site.register(Robot)
admin.site.register(MatchScout)
admin.site.register(CoachScout)
admin.site.register(PitScout)
|
py
|
1a5d8d6204d0dbbad9fea832c4a346c59887d373
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.File module."""
import os.path
import shutil
import tempfile
import unittest
from io import StringIO
from Bio import bgzf
from Bio import File
class RandomAccess(unittest.TestCase):
"""Random access tests."""
def test_plain(self):
"""Test plain text file."""
with File._open_for_random_access("Quality/example.fastq") as handle:
self.assertIn("r", handle.mode)
self.assertIn("b", handle.mode)
def test_bgzf(self):
"""Test BGZF compressed file."""
with File._open_for_random_access("Quality/example.fastq.bgz") as handle:
self.assertIsInstance(handle, bgzf.BgzfReader)
def test_gzip(self):
"""Test gzip compressed file."""
self.assertRaises(
ValueError, File._open_for_random_access, "Quality/example.fastq.gz"
)
class AsHandleTestCase(unittest.TestCase):
"""Tests for as_handle function."""
def setUp(self):
"""Initialise temporary directory."""
# Create a directory to work in
self.temp_dir = tempfile.mkdtemp(prefix="biopython-test")
def tearDown(self):
"""Remove temporary directory."""
shutil.rmtree(self.temp_dir)
def _path(self, *args):
return os.path.join(self.temp_dir, *args)
def test_handle(self):
"""Test as_handle with a file-like object argument."""
p = self._path("test_file.fasta")
with open(p, "wb") as fp:
with File.as_handle(fp) as handle:
self.assertEqual(
fp,
handle,
"as_handle should "
"return argument when given a "
"file-like object",
)
self.assertFalse(handle.closed)
self.assertFalse(
handle.closed,
"Exiting as_handle given a file-like object "
"should not close the file",
)
def test_string_path(self):
"""Test as_handle with a string path argument."""
p = self._path("test_file.fasta")
mode = "wb"
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(p, handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_path_object(self):
"""Test as_handle with a pathlib.Path object."""
from pathlib import Path
p = Path(self._path("test_file.fasta"))
mode = "wb"
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(str(p.absolute()), handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_custom_path_like_object(self):
"""Test as_handle with a custom path-like object."""
class CustomPathLike:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
p = CustomPathLike(self._path("test_file.fasta"))
mode = "wb"
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(p.path, handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_stringio(self):
"""Testing passing StringIO handles."""
s = StringIO()
with File.as_handle(s) as handle:
self.assertIs(s, handle)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
py
|
1a5d8d77eeb099020c242c5a5960d45ce09f2c0c
|
"""Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd("emoji (.*)"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 16)
input_str = event.pattern_match.group(1)
if input_str == "shrug":
await event.edit("¯\_(ツ)_/¯")
elif input_str == "apple":
await event.edit("\uF8FF")
elif input_str == ":/":
await event.edit(input_str)
animation_chars = [
":\\",
":/"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
elif input_str == "-_-":
await event.edit(input_str)
animation_chars = [
"-__-",
"-_-"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
|
py
|
1a5d8ef8e11cceddf1b4b4c7a4ed2697725c25d5
|
from datetime import datetime
from config import db, ma
from marshmallow import fields
# NOTE: Marshmallow is the module that translates SQLAlchemy objects into
# Python objects suitable for creating JSON strings
class Person(db.Model):
'''
Class used to represent a Person
Parent: db.Model
Attributes
----------
__tablename__ : str
A string that states the name of the table
person_id : int
The unique id of a person in the person table
lname : str
The last name of the person
fname : str
The first name of the person
timestamp : datetime
The UTC timestamp of when a person was added/updated to the table
notes : list
List of notes created by a Person
'''
__tablename__ = 'person'
person_id = db.Column(db.Integer, primary_key=True)
lname = db.Column(db.String(32))
fname = db.Column(db.String(32))
timestamp = db.Column(
db.DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow
)
# Set up the relationship between Person and Note
notes = db.relationship(
# 'Note' defines what the SQLAlchemy class Person is related to
# We use string as a forward reference which handles problems caused
# by something that is needed but isn't defined until later in the code
'Note',
# backref='person' creates a back reference to person
# Each instance of Note will contain a person attribute which
# references the parent object (Person)
# Having a reference to the parent in the child can be useful if your
# code iterates over notes and has to include info about the parent
# (this happens frequently in display rendering code)
backref='person',
# cascade determines how to treat Note instances when changes are made
# to the Person instance
# e.g. When a Person is deleted, this parameter tells SQLAlchemy to
# delete all Note instances associated with it
cascade='all, delete, delete-orphan',
# single_parent=True is required if delete-orphan is part of the
# cascade parameter
# This tells SQLAlchemy not to allow oprhaned Note instances to exist
# because each Note has a single parent
single_parent=True,
# ordery_by tells SQLAlchemy how to sort the Note instances associated
# with a Person. By default the notes attribute list will contain Note
# objects in an unknown order. desc() sorts notes in descending order
# (ascending is the default)
order_by='desc(Note.timestamp)'
)
class Note(db.Model):
'''
Class used to represent a Note
Parent: db.Model
Attributes
----------
__tablename__ : str
A string that states the name of the table
note_id : int
The unique id of a note in the note table
person_id : int
A number corresponding to the owner of the note
content : str
The actual text of the note
timestamp : datetime
The UTC timestamp of when a note was added/updated to the table
'''
__tablename__ = 'note'
note_id = db.Column(db.Integer, primary_key=True)
# Relate the Note class to the Person class using person.person_id
# This and Person.notes are how SQLAlchemy knows what to do when
# interacting with Person and Note objects
person_id = db.Column(db.Integer, db.ForeignKey('person.person_id'))
# nullable=False indicates it's ok to create a new empty note
content = db.Column(db.String, nullable=False)
timestamp = db.Column(
db.DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow
)
class PersonSchema(ma.ModelSchema):
'''
Defines how the attributes of Person will be converted into JSON-friendly
formats
Parent: ma.ModelSchema
Attributes
----------
notes : list
List of notes related to a Person, default is empty list
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
class Meta:
'''
Required by ModelSchema. Used to find the SQLAlchemy model Person and
the db.session in order to extract Person attributes and their types in
order to serialize/deserialize them.
Attributes
----------
model : Person
The SQLAlchemy model to use to serialize/deserialize data
sqla_session: db.session
The database session to use to introspect and determine attribute
data types
'''
model = Person
sqla_session = db.session
# RECALL: many = True indicates a one-to-many relationship, so Marshmallow
# will serialize all related notes
notes = fields.Nested('PersonNoteSchema', default=[], many=True)
# The PersonNoteSchema class defines what a Note object looks like as
# Marshmallow serializes the notes list
class PersonNoteSchema(ma.ModelSchema):
'''
This class exists to get around a recursion issue
Parent: ma.ModelSchema
Attributes
----------
note_id : int
The unique id of a note in the note table
person_id : int
A number corresponding to the owner of the note
content : str
The actual text of the note
timestamp : str
The string representation of a Person/Note timestamp
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
note_id = fields.Int()
person_id = fields.Int()
content = fields.Str()
timestamp = fields.Str()
class NoteSchema(ma.ModelSchema):
'''
Defines how the attributes of Note will be converted into JSON-friendly
formats
Parent: ma.ModelSchema
Attributes
----------
person : Person
Person related to a Note, default is None
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
class Meta:
'''
Required by ModelSchema. Used to find the SQLAlchemy model Note and
the db.session in order to extract Note attributes and their types in
order to serialize/deserialize them.
Attributes
----------
model : Note
The SQLAlchemy model to use to serialize/deserialize data
sqla_session: db.session
The database session to use to introspect and determine attribute
data types
'''
model = Note
sqla_session = db.session
# This attribute comes from the db.relationship definition parameter
# backref='person'. It is nested but because it doesn't have a many=True
# parameter, there is only a single person connected
person = fields.Nested('NotePersonSchema', default=None)
class NotePersonSchema(ma.ModelSchema):
'''
This class exists to get around a recursion issue
Parent: ma.ModelSchema
Attributes
----------
person_id : int
The unique id of a person in the person table
lname : str
Last name corresponding to the owner of the note
fname : str
First name corresponding to the owner of the note
timestamp : str
The string representation of a Person/Note timestamp
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
person_id = fields.Int()
lname = fields.Str()
fname = fields.Str()
timestamp = fields.Str()
|
py
|
1a5d8f1eb9e20a056a3a347a8a93387681f76e00
|
import argparse
import pandas as pd
import numpy as np
import param
import os
def preprocess_sam(r1_sam, r2_sam):
"""
preprocess sam files
"""
#if not os.path.isfile(r1_sam) or not os.path.isfile(r2_sam):
# print("file doesn't exist")
# exit(0)
dir_name = os.path.dirname(r1_sam)
r1_basename = os.path.basename(r1_sam)
r2_basename = os.path.basename(r2_sam)
sorted_r1 = os.path.join(dir_name, r1_basename.replace(".sam", "_sorted.sam"))
sort_r1 = param.SAMTOOLS + "sort -n -o " + sorted_r1 + " " + r1_sam
sorted_r2 = os.path.join(dir_name, r2_basename.replace(".sam","_sorted.sam"))
sort_r2 = param.SAMTOOLS + "sort -n -o " + sorted_r2 + " " + r2_sam
# remove headers
r1 = os.path.join(dir_name, r1_basename.replace(".sam", "_noh.sam"))
r2 = os.path.join(dir_name, r2_basename.replace(".sam", "_noh.sam"))
os.system(sort_r1)
os.system(sort_r2)
#os.system("rm "+r1_sam)
#os.system("rm "+r2_sam)
os.system("grep -v \"^@\" "+sorted_r1+" > "+r1)
os.system("grep -v \"^@\" "+sorted_r2+" > "+r2)
r1_csv = os.path.join(dir_name, r1.replace(".sam", ".csv"))
r2_csv = os.path.join(dir_name, r2.replace(".sam", ".csv"))
os.system("cut -f 1-5 "+r1+" > "+ r1_csv)
os.system("cut -f 1-5 "+r2+" > "+ r2_csv)
os.system("rm "+r1)
os.system("rm "+r2)
return r1_csv, r2_csv
def read_count_hap(r1_csv, r2_csv, DB_genes):
empty_matrix = pd.DataFrame(0, index = DB_genes, columns = DB_genes)
f1 = open(r1_csv, "rb")
f2 = open(r2_csv, "rb")
i = True
lines = 0
pairs = {}
fail = 0
count = 0
while 1:
r1_line = f1.readline() # uptag
r2_line = f2.readline() # dntag
if r1_line == "" or r2_line == "":
i = False
print("End of file")
break
r1_line = r1_line.strip().split("\t")
r2_line = r2_line.strip().split("\t")
if r1_line[0] != r2_line[0]:
i = False
print("# READ ID DOES NOT MATCH #")
break
if int(r1_line[4]) < param.cut_off or int(r2_line[4]) < param.cut_off: # check quality
fail += 1
continue
if r1_line[2] == "*" or r2_line[2] =="*":
fail +=1
continue
r1_name = r1_line[2].split(";")
r2_name = r2_line[2].split(";")
if r1_name[-1] != r2_name[-1]:
count+=1
pairs[(r2_name[1], r1_name[1])] = pairs.get((r2_name[1], r1_name[1]), 0) + 1
matrix = (pd.Series(pairs)
.unstack(fill_value=0)
.T
.reindex(index=empty_matrix.index, columns=empty_matrix.columns, fill_value=0))
f1.close()
f2.close()
diag = pd.Series(np.diag(matrix), index=[matrix.index, matrix.columns])
print(diag)
return diag
def read_DB(hDB):
"""
get a list of db gene from hDB summary
"""
summary = pd.read_table(hDB, sep="\t")
DB_genes = summary.Locus.tolist()
return DB_genes
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='read count from sam files')
parser.add_argument("--r1sam", help="sam file for read one")
parser.add_argument("--r2sam", help="sam file for read two")
parser.add_argument("--mode", help="human or yeast")
parser.add_argument("--r1csv", help="csv file for read one")
parser.add_argument("--r2csv", help="csv file for read two")
parser.add_argument("--prefix", help= "output prefix")
args = parser.parse_args()
prefix = args.prefix
r1_sam = args.r1sam
r2_sam = args.r2sam
if r1_sam:
r1_csv, r2_csv = preprocess_sam(r1_sam, r2_sam)
DB_genes = read_DB(param.hDB_summary)
diag = read_count_hap(r1_csv, r2_csv, DB_genes)
else:
r1_csv = args.r1csv
r2_csv = args.r2csv
DB_genes = read_DB(param.hDB_summary)
diag = read_count_hap(r1_csv, r2_csv, DB_genes)
diag.to_csv(prefix+"_matrix.csv")
|
py
|
1a5d8ff4ce46e796ee75c4ea88a68d18bfcfb541
|
from collections import (
Mapping,
)
import json
import os
import warnings
from cytoolz import (
dissoc,
)
from eth_account._utils.keyfile import (
create_keyfile_json,
decode_keyfile_json,
)
from eth_keys import (
KeyAPI,
keys,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_utils.curried import (
combomethod,
hexstr_if_str,
is_dict,
keccak,
text_if_str,
to_bytes,
to_int,
)
from hexbytes import (
HexBytes,
)
from eth_account._utils.structured_data.signing import (
hash_of_signed_transaction,
sign_message_hash,
sign_transaction_dict,
to_standard_signature_bytes,
to_standard_v,
)
from client.transactions import (
Transaction,
vrs_from,
)
from eth_account.datastructures import (
AttributeDict,
)
from eth_account.messages import (
SignableMessage,
_hash_eip191_message,
)
from eth_account.signers.local import (
LocalAccount,
)
class Account(object):
"""
The primary entry point for working with Ethereum private keys.
It does **not** require a connection to an Ethereum node.
"""
_keys = keys
_default_kdf = os.getenv('ETH_ACCOUNT_KDF', 'scrypt')
@combomethod
def create(self, extra_entropy=''):
r"""
Creates a new private key, and returns it as a :class:`~eth_account.local.LocalAccount`.
:param extra_entropy: Add extra randomness to whatever randomness your OS can provide
:type extra_entropy: str or bytes or int
:returns: an object with private key and convenience methods
.. code-block:: python
>>> from eth_account import Account
>>> acct = Account.create('KEYSMASH FJAFJKLDSKF7JKFDJ 1530')
>>> acct.address
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct.key
b"\xb2\}\xb3\x1f\xee\xd9\x12''\xbf\t9\xdcv\x9a\x96VK-\xe4\xc4rm\x03[6\xec\xf1\xe5\xb3d"
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
extra_key_bytes = text_if_str(to_bytes, extra_entropy)
key_bytes = keccak(os.urandom(32) + extra_key_bytes)
return self.from_key(key_bytes)
@staticmethod
def decrypt(keyfile_json, password):
"""
Decrypts a private key that was encrypted using an Ethereum client or
:meth:`~Account.encrypt`.
:param keyfile_json: The encrypted key
:type keyfile_json: dict or str
:param str password: The password that was used to encrypt the key
:returns: the raw private key
:rtype: ~hexbytes.main.HexBytes
.. code-block:: python
>>> encrypted = {
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {'cipher': 'aes-128-ctr',
'cipherparams': {'iv': '78f214584844e0b241b433d7c3bb8d5f'},
'ciphertext': 'd6dbb56e4f54ba6db2e8dc14df17cb7352fdce03681dd3f90ce4b6c1d5af2c4f',
'kdf': 'pbkdf2',
'kdfparams': {'c': 1000000,
'dklen': 32,
'prf': 'hmac-sha256',
'salt': '45cf943b4de2c05c2c440ef96af914a2'},
'mac': 'f5e1af09df5ded25c96fcf075ada313fb6f79735a914adc8cb02e8ddee7813c3'},
'id': 'b812f3f9-78cc-462a-9e89-74418aa27cb0',
'version': 3}
>>> import getpass
>>> Account.decrypt(encrypted, getpass.getpass())
HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364')
"""
if isinstance(keyfile_json, str):
keyfile = json.loads(keyfile_json)
elif is_dict(keyfile_json):
keyfile = keyfile_json
else:
raise TypeError("The keyfile should be supplied as a JSON string, or a dictionary.")
password_bytes = text_if_str(to_bytes, password)
return HexBytes(decode_keyfile_json(keyfile, password_bytes))
@classmethod
def encrypt(cls, private_key, password, kdf=None, iterations=None):
"""
Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
If kdf is not set, the default key derivation function falls back to the
environment variable :envvar:`ETH_ACCOUNT_KDF`. If that is not set, then
'scrypt' will be used as the default.
.. code-block:: python
>>> import getpass
>>> encrypted = Account.encrypt(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
getpass.getpass()
)
{
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {
'cipher': 'aes-128-ctr',
'cipherparams': {
'iv': '0b7845a5c3597d3d378bde9b7c7319b7'
},
'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501
'kdf': 'scrypt',
'kdfparams': {
'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '13c4a48123affaa29189e9097726c698'
},
'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1'
},
'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f',
'version': 3
}
>>> with open('my-keyfile', 'w') as f:
f.write(json.dumps(encrypted))
"""
# print(private_key)
if isinstance(private_key, keys.PrivateKey):
key_bytes = private_key.to_bytes()
# print("private_key:to_bytes",len(private_key))
else:
key_bytes = HexBytes(private_key)
# print("private_key:HexBytes", len(private_key))
if kdf is None:
kdf = cls._default_kdf
password_bytes = text_if_str(to_bytes, password)
assert len(key_bytes) == 32
return create_keyfile_json(key_bytes, password_bytes, kdf=kdf, iterations=iterations)
@combomethod
def privateKeyToAccount(self, private_key):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.from_key`.
This method will be removed in v0.5
"""
warnings.warn(
"privateKeyToAccount is deprecated in favor of from_key",
category=DeprecationWarning,
)
return self.from_key(private_key)
@combomethod
def from_key(self, private_key):
r"""
Returns a convenient object for working with the given private key.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:return: object with methods for signing and encrypting
:rtype: LocalAccount
.. code-block:: python
>>> acct = Account.from_key(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364)
>>> acct.address
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct.key
b"\xb2\}\xb3\x1f\xee\xd9\x12''xbf\t9\xdcv\x9a\x96VK-\xe4\xc4rm\x03[6\xec\xf1\xe5\xb3d"
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
key = self._parsePrivateKey(private_key)
return LocalAccount(key, self)
@combomethod
def recover_message(self, signable_message: SignableMessage, vrs=None, signature=None):
r"""
Get the address of the account that signed the given message.
You must specify exactly one of: vrs or signature
:param signable_message: the message that was signed
:param vrs: the three pieces generated by an elliptic curve signature
:type vrs: tuple(v, r, s), each element is hex str, bytes or int
:param signature: signature bytes concatenated as r+s+v
:type signature: hex str or bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
.. code-block:: python
>>> from eth_account.messages import encode_defunct
>>> message = encode_defunct(text="I♥SF")
>>> vrs = (
28,
'0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
'0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce')
>>> Account.recover_message(message, vrs=vrs)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
# All of these recover calls are equivalent:
# variations on vrs
>>> vrs = (
'0x1c',
'0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
'0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce')
>>> Account.recover_message(message, vrs=vrs)
>>> vrs = (
b'\x1c',
b'\\xe6\\xca\\x9b\\xbaX\\xc8\\x86\\x11\\xfa\\xd6jl\\xe8\\xf9\\x96\\x90\\x81\\x95Y8\\x07\\xc4\\xb3\\x8b\\xd5(\\xd2\\xcf\\xf0\\x9dN\\xb3', # noqa: E501
b'>[\\xfb\\xbfM>9\\xb1\\xa2\\xfd\\x81jv\\x80\\xc1\\x9e\\xbe\\xba\\xf3\\xa1A\\xb29\\x93J\\xd4<\\xb3?\\xce\\xc8\\xce') # noqa: E501
>>> Account.recover_message(message, vrs=vrs)
>>> # Caution about this approach: likely problems if there are leading 0s
>>> vrs = (
0x1c,
0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3,
0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce)
>>> Account.recover_message(message, vrs=vrs)
# variations on signature
>>> signature = '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c' # noqa: E501
>>> Account.recover_message(message, signature=signature)
>>> signature = b'\\xe6\\xca\\x9b\\xbaX\\xc8\\x86\\x11\\xfa\\xd6jl\\xe8\\xf9\\x96\\x90\\x81\\x95Y8\\x07\\xc4\\xb3\\x8b\\xd5(\\xd2\\xcf\\xf0\\x9dN\\xb3>[\\xfb\\xbfM>9\\xb1\\xa2\\xfd\\x81jv\\x80\\xc1\\x9e\\xbe\\xba\\xf3\\xa1A\\xb29\\x93J\\xd4<\\xb3?\\xce\\xc8\\xce\\x1c' # noqa: E501
>>> Account.recover_message(message, signature=signature)
>>> # Caution about this approach: likely problems if there are leading 0s
>>> signature = 0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c # noqa: E501
>>> Account.recover_message(message, signature=signature)
"""
message_hash = _hash_eip191_message(signable_message)
return self._recover_hash(message_hash, vrs, signature)
@combomethod
def recoverHash(self, message_hash, vrs=None, signature=None):
"""
Get the address of the account that signed the message with the given hash.
You must specify exactly one of: vrs or signature
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.recover_message`.
This method might be removed as early as v0.5
:param message_hash: the hash of the message that you want to verify
:type message_hash: hex str or bytes or int
:param vrs: the three pieces generated by an elliptic curve signature
:type vrs: tuple(v, r, s), each element is hex str, bytes or int
:param signature: signature bytes concatenated as r+s+v
:type signature: hex str or bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
"""
warnings.warn(
"recoverHash is deprecated in favor of recover_message",
category=DeprecationWarning,
)
return self._recover_hash(message_hash, vrs, signature)
@combomethod
def _recover_hash(self, message_hash, vrs=None, signature=None):
hash_bytes = HexBytes(message_hash)
if len(hash_bytes) != 32:
raise ValueError("The message hash must be exactly 32-bytes")
if vrs is not None:
v, r, s = map(hexstr_if_str(to_int), vrs)
v_standard = to_standard_v(v)
signature_obj = self._keys.Signature(vrs=(v_standard, r, s))
elif signature is not None:
signature_bytes = HexBytes(signature)
signature_bytes_standard = to_standard_signature_bytes(signature_bytes)
signature_obj = self._keys.Signature(signature_bytes=signature_bytes_standard)
else:
raise TypeError("You must supply the vrs tuple or the signature bytes")
pubkey = signature_obj.recover_public_key_from_msg_hash(hash_bytes)
return pubkey.to_checksum_address()
@combomethod
def recoverTransaction(self, serialized_transaction):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.recover_transaction`.
This method will be removed in v0.5
"""
warnings.warn(
"recoverTransaction is deprecated in favor of recover_transaction",
category=DeprecationWarning,
)
return self.recover_transaction(serialized_transaction)
@combomethod
def recover_transaction(self, serialized_transaction):
"""
Get the address of the account that signed this transaction.
:param serialized_transaction: the complete signed transaction
:type serialized_transaction: hex str, bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
.. code-block:: python
>>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428', # noqa: E501
>>> Account.recover_transaction(raw_transaction)
'0x2c7536E3605D9C16a7a3D7b1898e529396a65c23'
"""
txn_bytes = HexBytes(serialized_transaction)
txn = Transaction.from_bytes(txn_bytes)
msg_hash = hash_of_signed_transaction(txn)
return self._recover_hash(msg_hash, vrs=vrs_from(txn))
def setKeyBackend(self, backend):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.set_key_backend`.
This method will be removed in v0.5
"""
warnings.warn(
"setKeyBackend is deprecated in favor of set_key_backend",
category=DeprecationWarning,
)
self.set_key_backend(backend)
def set_key_backend(self, backend):
"""
Change the backend used by the underlying eth-keys library.
*(The default is fine for most users)*
:param backend: any backend that works in
`eth_keys.KeyApi(backend) <https://github.com/ethereum/eth-keys/#keyapibackendnone>`_
"""
self._keys = KeyAPI(backend)
@combomethod
def sign_message(self, signable_message: SignableMessage, private_key):
r"""
Sign the provided message.
This API supports any messaging format that will encode to EIP-191_ messages.
If you would like historical compatibility with
:meth:`w3.eth.sign() <web3.eth.Eth.sign>`
you can use :meth:`~eth_account.messages.encode_defunct`.
Other options are the "validator", or "structured data" standards. (Both of these
are in *DRAFT* status currently, so be aware that the implementation is not
guaranteed to be stable). You can import all supported message encoders in
``eth_account.messages``.
:param signable_message: the encoded message for signing
:param private_key: the key to sign the message with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most importantly the fields: v, r, and s
:rtype: ~eth_account.datastructures.AttributeDict
.. code-block:: python
>>> msg = "I♥SF"
>>> from eth_account.messages import encode_defunct
>>> msghash = encode_defunct(text=msg)
SignableMessage(version=b'E', header=b'thereum Signed Message:\n6', body=b'I\xe2\x99\xa5SF')
>>> # If you're curious about the internal fields of SignableMessage, take a look at EIP-191, linked above
>>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364"
>>> Account.sign_message(msghash, key)
{'messageHash': HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'), # noqa: E501
'r': 104389933075820307925104709181714897380569894203213074526835978196648170704563,
's': 28205917190874851400050446352651915501321657673772411533993420917949420456142,
'signature': HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c'), # noqa: E501
'v': 28}
.. _EIP-191: https://eips.ethereum.org/EIPS/eip-191
"""
message_hash = _hash_eip191_message(signable_message)
return self._sign_hash(message_hash, private_key)
@combomethod
def signHash(self, message_hash, private_key):
"""
.. WARNING:: *Never* sign a hash that you didn't generate,
it can be an arbitrary transaction. For example, it might
send all of your account's ether to an attacker.
Instead, prefer :meth:`~eth_account.account.Account.sign_message`,
which cannot accidentally sign a transaction.
Sign the provided hash.
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.sign_message`.
This method will be removed in v0.5
:param message_hash: the 32-byte message hash to be signed
:type message_hash: hex str, bytes or int
:param private_key: the key to sign the message with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most
importantly the fields: v, r, and s
:rtype: ~eth_account.datastructures.AttributeDict
"""
warnings.warn(
"signHash is deprecated in favor of sign_message",
category=DeprecationWarning,
)
return self._sign_hash(message_hash, private_key)
@combomethod
def _sign_hash(self, message_hash, private_key):
msg_hash_bytes = HexBytes(message_hash)
if len(msg_hash_bytes) != 32:
raise ValueError("The message hash must be exactly 32-bytes")
key = self._parsePrivateKey(private_key)
(v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash_bytes)
return AttributeDict({
'messageHash': msg_hash_bytes,
'r': r,
's': s,
'v': v,
'signature': HexBytes(eth_signature_bytes),
})
@combomethod
def sign_transaction(self, transaction_dict, private_key):
"""
Sign a transaction using a local private key. Produces signature details
and the hex-encoded transaction suitable for broadcast using
:meth:`w3.eth.sendRawTransaction() <web3.eth.Eth.sendRawTransaction>`.
Create the transaction dict for a contract method with
`my_contract.functions.my_function().buildTransaction()
<http://web3py.readthedocs.io/en/latest/contracts.html#methods>`_
:param dict transaction_dict: the transaction with keys:
nonce, chainId, to, data, value, gas, and gasPrice.
:param private_key: the private key to sign the data with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most
importantly the fields: v, r, and s
:rtype: AttributeDict
.. code-block:: python
>>> transaction = {
# Note that the address must be in checksum format or native bytes:
'to': '0xF0109fC8DF283027b6285cc889F5aA624EaC1F55',
'value': 1000000000,
'gas': 2000000,
'gasPrice': 234567897654321,
'nonce': 0,
'chainId': 1
}
>>> key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
>>> signed = Account.sign_transaction(transaction, key)
{'hash': HexBytes('0x6893a6ee8df79b0f5d64a180cd1ef35d030f3e296a5361cf04d02ce720d32ec5'),
'r': 4487286261793418179817841024889747115779324305375823110249149479905075174044,
'rawTransaction': HexBytes('0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428'), # noqa: E501
's': 30785525769477805655994251009256770582792548537338581640010273753578382951464,
'v': 37}
>>> w3.eth.sendRawTransaction(signed.rawTransaction)
"""
if not isinstance(transaction_dict, Mapping):
raise TypeError("transaction_dict must be dict-like, got %r" % transaction_dict)
account = self.from_key(private_key)
# allow from field, *only* if it matches the private key
if 'from' in transaction_dict:
if transaction_dict['from'] == account.address:
sanitized_transaction = dissoc(transaction_dict, 'from')
else:
raise TypeError("from field must match key's %s, but it was %s" % (
account.address,
transaction_dict['from'],
))
else:
sanitized_transaction = transaction_dict
# sign transaction
(
v,
r,
s,
rlp_encoded,
) = sign_transaction_dict(account._key_obj, sanitized_transaction)
transaction_hash = keccak(rlp_encoded)
return AttributeDict({
'rawTransaction': HexBytes(rlp_encoded),
'hash': HexBytes(transaction_hash),
'r': r,
's': s,
'v': v,
})
@combomethod
def _parsePrivateKey(self, key):
"""
Generate a :class:`eth_keys.datatypes.PrivateKey` from the provided key. If the
key is already of type :class:`eth_keys.datatypes.PrivateKey`, return the key.
:param key: the private key from which a :class:`eth_keys.datatypes.PrivateKey`
will be generated
:type key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: the provided key represented as a :class:`eth_keys.datatypes.PrivateKey`
"""
if isinstance(key, self._keys.PrivateKey):
return key
try:
return self._keys.PrivateKey(HexBytes(key))
except ValidationError as original_exception:
raise ValueError(
"The private key must be exactly 32 bytes long, instead of "
"%d bytes." % len(key)
) from original_exception
|
py
|
1a5d9030cb20e82d9d32bd3eac6859c5cbd0be1d
|
import flee.flee as flee
import datamanager.handle_refugee_data as handle_refugee_data
import numpy as np
import outputanalysis.analysis as a
"""
Generation 1 code. Incorporates only distance, travel always takes one day.
"""
if __name__ == "__main__":
print("Testing basic data handling and simulation kernel.")
flee.SimulationSettings.MinMoveSpeed=10.0
flee.SimulationSettings.MaxMoveSpeed=10.0
end_time = 100
e = flee.Ecosystem()
l1 = e.addLocation("A", movechance=1.0)
l2 = e.addLocation("B", movechance=0.0)
e.linkUp("A","B","5.0")
for t in range(0,end_time):
# Insert refugee agents
e.addAgent(location=l1)
# Propagate the model by one time step.
e.evolve()
if t==2:
assert e.close_location("B")
print(t, l1.numAgents, l2.numAgents)
e.printComplete()
assert t==99
assert l2.numAgents==3 # Location is closed after 3 steps, refugees underway will still arrive but others are blocked.
assert l1.numAgents==97
print("Test successful!")
|
py
|
1a5d91b8fc0e55c00680db85a5bd1571e7fe2e54
|
#!/usr/bin/env python3
import requests
import socket
import random
import string
import concurrent.futures
from paramiko.client import SSHClient
import paramiko
# VARIABLES
ports = [80,443,445,8080,3389,22,21]
#ports = list(range(1,65536))
domain = "letmeoutofyour.net"
verbose = False
printOpen = True
printClosed = True
threadcount = 100
random.shuffle(ports)
# Verbosity - set to False above if you don't want output
def vprint(status):
if verbose == True:
print(status)
# Print open ports
def print_open(status):
if printOpen == True:
print("[+] " + status)
# Print closed ports
def print_closed(status):
if printClosed == True:
print("[-] " + status)
def check_web(base, domain, port):
vprint("Testing: " + base + domain + ":" + str(port))
try:
r = requests.get(base + domain + ":" + str(port), timeout=1)
result = r.text.strip()
if result == "w00tw00t":
print_open("Success! " + base + domain + ":" + str(port))
except requests.exceptions.ConnectionError:
print_closed("Failed! " + base + domain + ":" + str(port))
def check_ssh(domain, port):
client = SSHClient()
vprint("Trying SSH to " + domain + " Port: " + str(port))
try:
client.connect(domain, port, timeout=1)
except paramiko.ssh_exception.SSHException:
pass
except socket.timeout:
print_closed("Failed! SSH to " + domain + " Port: " + str(port))
return
key = client.get_transport().get_remote_server_key()
if key.get_base64() == "AAAAC3NzaC1lZDI1NTE5AAAAIIrfkWLMzwGKRliVsJOjm5OJRJo6AZt7NsqAH8bk9tYc":
print_open("Success! SSH to " + domain + " Port: " + str(port))
with concurrent.futures.ThreadPoolExecutor(threadcount) as executor:
for port in ports:
# Test HTTP
base = "http://"
executor.submit(check_web, base, domain, port)
# Test HTTPS
base = "https://"
executor.submit(check_web, base, domain, port)
# Test SSH
executor.submit(check_ssh, domain, port)
|
py
|
1a5d92c93bc8d89bc4e38e2b32e2e3e8e9637620
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, json, time, sys
import requests as req
from jinja2 import Environment, FileSystemLoader
loader = FileSystemLoader(["./nuvolaris/templates", "./nuvolaris/files"])
env = Environment(loader=loader)
import nuvolaris.config as cfg
class CouchDB:
def __init__(self):
self.db_protocol = "http"
self.db_prefix = "nuvolaris_"
self.db_host = cfg.get("couchdb.host")
self.db_username = cfg.get("couchdb.admin.user")
self.db_password = cfg.get("couchdb.admin.password")
self.db_port = "5984"
self.db_auth = req.auth.HTTPBasicAuth(self.db_username,self.db_password)
self.db_url = f"{self.db_protocol}://{self.db_host}:{self.db_port}"
self.db_base = f"{self.db_url}/{self.db_prefix}"
def wait_db_ready(self, max_seconds):
start = time.time()
while time.time() - start < max_seconds*60:
try:
r = req.get(f"{self.db_url}/_utils", timeout=1)
if r.status_code == 200:
return True
print(r.status_code)
except:
print(".", end='', file=sys.stderr)
pass
return False
# check if database exists, return boolean
def check_db(self, database):
url = f"{self.db_base}{database}"
r = req.head(url, auth=self.db_auth)
return r.status_code == 200
# delete database, return true if ok
def delete_db(self, database):
url = f"{self.db_base}{database}"
r = req.delete(url, auth=self.db_auth)
return r.status_code == 200
# create db, return true if ok
def create_db(self, database):
url = f"{self.db_base}{database}"
r = req.put(url, auth=self.db_auth)
return r.status_code == 201
# database="subjects"
def recreate_db(self, database, recreate=False):
msg = "recreate_db:"
exists = self.check_db(database)
if recreate and exists:
msg += " deleted"
self.delete_db(database)
if recreate or not exists:
msg += " created"
self.create_db(database)
return msg
def get_doc(self, database, id, user=None, password="", no_auth=False):
url = f"{self.db_base}{database}/{id}"
if no_auth:
db_auth=None
elif user:
db_auth=req.auth.HTTPBasicAuth(user, password)
else:
db_auth = self.db_auth
r = req.get(url, auth=db_auth)
if r.status_code == 200:
return json.loads(r.text)
return None
def update_doc(self, database, doc):
if '_id' in doc:
url = f"{self.db_base}{database}/{doc['_id']}"
cur = self.get_doc(database, doc['_id'])
if cur and '_rev' in cur:
doc['_rev'] = cur['_rev']
r = req.put(url, auth=self.db_auth, json=doc)
else:
r = req.put(url, auth=self.db_auth, json=doc)
return r.status_code in [200,201]
return False
def delete_doc(self, database, id):
cur = self.get_doc(database, id)
if cur and '_rev' in cur:
url = f"{self.db_base}{database}/{cur['_id']}?rev={cur['_rev']}"
r = req.delete(url, auth=self.db_auth)
return r.status_code == 200
return False
def update_templated_doc(self, database, template, data):
tpl = env.get_template(template)
doc = json.loads(tpl.render(data))
return self.update_doc(database, doc)
def configure_single_node(self):
url = f"{self.db_url}/_cluster_setup"
data = {"action": "enable_single_node", "singlenode": True, "bind_address": "0.0.0.0", "port": 5984}
r = req.post(url, auth=self.db_auth, json=data)
return r.status_code == 201
def configure_no_reduce_limit(self):
url = f"{self.db_url}/_node/_local/_config/query_server_config/reduce_limit"
data=b'"false"'
r = req.put(url, auth=self.db_auth, data=data)
return r.status_code == 200
def add_user(self, username: str, password: str):
userpass = {"name": username, "password": password, "roles": [], "type": "user"}
url = f"{self.db_url}/_users/org.couchdb.user:{username}"
res = req.put(url, auth=self.db_auth, json=userpass)
return res.status_code in [200, 201, 421]
def add_role(self, database: str, members: list[str] = [], admins: list[str] =[]):
roles = {"admins": { "names": admins, "roles": [] }, "members": { "names": members, "roles": [] } }
url = f"{self.db_base}{database}/_security"
res = req.put(url, auth=self.db_auth, json=roles)
return res.status_code in [200, 201, 421]
|
py
|
1a5d938f3f94aa0c8ae5bda60cfecd96d4bfd48d
|
from __future__ import division
#
# Author: Daniel Dittenhafer
#
# Created: May 15, 2019
#
# Description: Coursera Algorithms Greedy Algos, MST and Dynamic Programming
#
__author__ = 'Daniel Dittenhafer'
import collections
import copy
import heapq
import itertools
import os
import sys
from timeit import default_timer as timer
from src.utillib.graph import graph
"""
You should NOT assume that edge costs are positive, nor should you assume that
they are distinct.
Your task is to run Prim's minimum spanning tree algorithm on this graph. You
should report the overall cost of a minimum spanning tree --- an integer, which
may or may not be negative.
IMPLEMENTATION NOTES: This graph is small enough that the straightforward O(mn)
time implementation of Prim's algorithm should work fine. OPTIONAL: For those
of you seeking an additional challenge, try implementing a heap-based version.
The simpler approach, which should already give you a healthy speed-up, is to
maintain relevant edges in a heap (with keys = edge costs). The superior
approach stores the unprocessed vertices in the heap, as described in lecture.
Note this requires a heap that supports deletions, and you'll probably need to
maintain some kind of mapping between vertices and their positions in the heap.
"""
class min_spanning_tree:
def __init__(self):
pass
def run(self, g):
X = {}
T = []
avail = []
# Add the initial vertex
X[1] = 1
for e in g.edges:
avail.append(e)
done = False
while not done:
me = None
for e in avail:
if ( (X.has_key(e[0].id) and not X.has_key(e[1].id)) or
(X.has_key(e[1].id) and not X.has_key(e[0].id)) ):
if me is None:
me = e
elif me.weight > e.weight:
me = e
#elif (X.has_key(e[0].id) and X.has_key(e[1].id)):
# avail.remove(e)
T.append(me)
avail.remove(me)
if me[0].id in X:
X[me[1].id] = 1
else:
X[me[0].id] = 1
# Are we done yet?
done = len(X) == len(g.vertices)
mst_cost = 0
for e in T:
mst_cost += e.weight
return mst_cost, T
def load_stanford_algs_test_cases(tests, test_cases_folder):
for filename in os.listdir(test_cases_folder):
if filename[:5] != 'input':
continue
outputfile = filename.replace("input_", "output_")
with open(test_cases_folder + "\\" + outputfile) as fp:
expected_out = fp.read().split("\n")
output = []
for p in expected_out:
if len(p) > 0:
op = int(p)
if op != 0:
output.append(op)
tests.append((test_cases_folder + "\\" + filename, output))
def main():
tests_correct = 0
tests = [
# path to graph file, finishing times dict, leaders dict
#("D:\\Code\\Python\\py-sandbox\\data\\graph-small2-dijkstra.txt", [1,2,3,4,5,6,7], {}, [0,5,3,4,5,6,6])
]
load_test_cases = False
tests_correct = 0
if load_test_cases:
load_stanford_algs_test_cases(tests, "D:\\Code\\other\\stanford-algs\\testcases\\course3\\assignment1SchedulingAndMST\\question3")
# The real problem
tests.append(("D:\\Code\\Python\\py-sandbox\\data\\mst_edges.txt", [-3612829]))
# iterate over the test cases
for t in tests:
# load the graph data (while timing it)
start = timer()
g = graph()
g.load_data2(t[0], verbose=True, delim=" ")
end = timer()
print "loaded {0} in {1} secs".format(t[0], end - start)
m = min_spanning_tree()
start = timer()
res, tree = m.run(g)
end = timer()
print "mst of {0} in {1} secs = {2}/sec".format(res, end - start, len(g.vertices) / (end - start))
print res
#print tree
expected = t[1]
ok = len(expected) == 0 or res == expected[0]
if not ok:
print "ERROR! Expected {0}".format(expected[0])
else:
print "OK"
tests_correct += 1
print "{0} of {1} tests passed = {2}%".format(tests_correct, len(tests) * 2, (tests_correct / (len(tests) * 2)) * 100)
if __name__ == "__main__":
main()
|
py
|
1a5d93b89fbb6b7fcab00c2ab3ed3975e93f8cbc
|
info = {
"name": "sw",
"date_order": "DMY",
"january": [
"jan",
"januari"
],
"february": [
"feb",
"februari"
],
"march": [
"mac",
"machi"
],
"april": [
"apr",
"aprili"
],
"may": [
"mei"
],
"june": [
"jun",
"juni"
],
"july": [
"jul",
"julai"
],
"august": [
"ago",
"agosti"
],
"september": [
"sep",
"septemba"
],
"october": [
"okt",
"oktoba"
],
"november": [
"nov",
"novemba"
],
"december": [
"des",
"desemba"
],
"monday": [
"jumatatu"
],
"tuesday": [
"jumanne"
],
"wednesday": [
"jumatano"
],
"thursday": [
"alhamisi"
],
"friday": [
"ijumaa"
],
"saturday": [
"jumamosi"
],
"sunday": [
"jumapili"
],
"am": [
"am",
"asubuhi"
],
"pm": [
"mchana",
"pm"
],
"year": [
"mwaka"
],
"month": [
"mwezi"
],
"week": [
"wiki"
],
"day": [
"siku"
],
"hour": [
"saa"
],
"minute": [
"dak",
"dakika"
],
"second": [
"sek",
"sekunde"
],
"relative-type": {
"0 day ago": [
"leo"
],
"0 hour ago": [
"saa hii"
],
"0 minute ago": [
"dakika hii"
],
"0 month ago": [
"mwezi huu"
],
"0 second ago": [
"sasa hivi"
],
"0 week ago": [
"wiki hii"
],
"0 year ago": [
"mwaka huu"
],
"1 day ago": [
"jana"
],
"1 month ago": [
"mwezi uliopita"
],
"1 week ago": [
"wiki iliyopita"
],
"1 year ago": [
"mwaka uliopita"
],
"in 1 day": [
"kesho"
],
"in 1 month": [
"mwezi ujao"
],
"in 1 week": [
"wiki ijayo"
],
"in 1 year": [
"mwaka ujao"
]
},
"relative-type-regex": {
"\\1 day ago": [
"siku (\\d+) iliyopita",
"siku (\\d+) zilizopita"
],
"\\1 hour ago": [
"saa (\\d+) iliyopita",
"saa (\\d+) zilizopita"
],
"\\1 minute ago": [
"dakika (\\d+) iliyopita",
"dakika (\\d+) zilizopita"
],
"\\1 month ago": [
"miezi (\\d+) iliyopita",
"mwezi (\\d+) uliopita"
],
"\\1 second ago": [
"sekunde (\\d+) iliyopita",
"sekunde (\\d+) zilizopita"
],
"\\1 week ago": [
"wiki (\\d+) iliyopita",
"wiki (\\d+) zilizopita"
],
"\\1 year ago": [
"miaka (\\d+) iliyopita",
"mwaka (\\d+) uliopita"
],
"in \\1 day": [
"baada ya siku (\\d+)"
],
"in \\1 hour": [
"baada ya saa (\\d+)"
],
"in \\1 minute": [
"baada ya dakika (\\d+)"
],
"in \\1 month": [
"baada ya miezi (\\d+)",
"baada ya mwezi (\\d+)"
],
"in \\1 second": [
"baada ya sekunde (\\d+)"
],
"in \\1 week": [
"baada ya wiki (\\d+)"
],
"in \\1 year": [
"baada ya miaka (\\d+)",
"baada ya mwaka (\\d+)"
]
},
"locale_specific": {
"sw-CD": {
"name": "sw-CD",
"week": [
"juma"
]
},
"sw-KE": {
"name": "sw-KE"
},
"sw-UG": {
"name": "sw-UG"
}
},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
|
py
|
1a5d93fbb74812f346005d32772236b50697aeab
|
# Copyright (c) 2019 Guo Yejun
#
# This file is part of FFmpeg.
#
# FFmpeg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# FFmpeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FFmpeg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ==============================================================================
import tensorflow as tf
import numpy as np
import sys, struct
import convert_header as header
__all__ = ['convert_from_tensorflow']
class Operand(object):
IOTYPE_INPUT = 1
IOTYPE_OUTPUT = 2
IOTYPE_INTERMEDIATE = IOTYPE_INPUT | IOTYPE_OUTPUT
DTYPE_FLOAT = 1
DTYPE_UINT8 = 4
index = 0
def __init__(self, name, dtype, dims):
self.name = name
self.dtype = dtype
self.dims = dims
self.iotype = 0
self.used_count = 0
self.index = Operand.index
Operand.index = Operand.index + 1
self.iotype2str = {Operand.IOTYPE_INPUT: 'in', Operand.IOTYPE_OUTPUT: 'out', Operand.IOTYPE_INTERMEDIATE: 'inout'}
self.dtype2str = {Operand.DTYPE_FLOAT: 'DT_FLOAT', Operand.DTYPE_UINT8: 'DT_UINT8'}
def add_iotype(self, iotype):
self.iotype = self.iotype | iotype
if iotype == Operand.IOTYPE_INPUT:
self.used_count = self.used_count + 1
def __str__(self):
return "{}: (name: {}, iotype: {}, dtype: {}, dims: {}, used_count: {})".format(self.index,
self.name, self.iotype2str[self.iotype], self.dtype2str[self.dtype],
self.dims, self.used_count)
def __lt__(self, other):
return self.index < other.index
class TFConverter:
def __init__(self, graph_def, nodes, outfile, dump4tb):
self.graph_def = graph_def
self.nodes = nodes
self.outfile = outfile
self.dump4tb = dump4tb
self.layer_number = 0
self.output_names = []
self.name_node_dict = {}
self.edges = {}
self.conv_activations = {'Relu':0, 'Tanh':1, 'Sigmoid':2, 'None':3, 'LeakyRelu':4}
self.conv_paddings = {'VALID':0, 'SAME':1}
self.pool_paddings = {'VALID':0, 'SAME':1}
self.converted_nodes = set()
self.conv2d_scope_names = set()
self.conv2d_scopename_inputname_dict = {}
self.dense_scope_names = set()
self.dense_scopename_inputname_dict = {}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4,
'MathBinary':5, 'MathUnary':6, 'AvgPool':7, 'MatMul':8}
self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4, 'FloorMod':5}
self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
'Acosh':11, 'Atanh':12, 'Ceil':13, 'Floor':14, 'Round':15}
self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
self.name_operand_dict = {}
def add_operand(self, name, type):
node = self.name_node_dict[name]
if name not in self.name_operand_dict:
dtype = node.attr['dtype'].type
if dtype == 0:
dtype = node.attr['T'].type
dims = [-1,-1,-1,-1]
if 'shape' in node.attr:
dims[0] = node.attr['shape'].shape.dim[0].size
dims[1] = node.attr['shape'].shape.dim[1].size
dims[2] = node.attr['shape'].shape.dim[2].size
dims[3] = node.attr['shape'].shape.dim[3].size
operand = Operand(name, dtype, dims)
self.name_operand_dict[name] = operand;
self.name_operand_dict[name].add_iotype(type)
return self.name_operand_dict[name].index
def dump_for_tensorboard(self):
graph = tf.get_default_graph()
tf.import_graph_def(self.graph_def, name="")
tf.summary.FileWriter('/tmp/graph', graph)
print('graph saved, run "tensorboard --logdir=/tmp/graph" to see it')
def get_conv2d_params(self, conv2d_scope_name):
knode = self.name_node_dict[conv2d_scope_name + '/kernel']
bnode = self.name_node_dict[conv2d_scope_name + '/bias']
if conv2d_scope_name + '/dilation_rate' in self.name_node_dict:
dnode = self.name_node_dict[conv2d_scope_name + '/dilation_rate']
else:
dnode = None
# the BiasAdd name is possible be changed into the output name,
# if activation is None, and BiasAdd.next is the last op which is Identity
if conv2d_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[conv2d_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, dnode, anode
def get_dense_params(self, dense_scope_name):
knode = self.name_node_dict[dense_scope_name + '/kernel']
bnode = self.name_node_dict.get(dense_scope_name + '/bias')
# the BiasAdd name is possible be changed into the output name,
# if activation is None, and BiasAdd.next is the last op which is Identity
anode = None
if bnode:
if dense_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[dense_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, anode
def dump_complex_conv2d_to_file(self, node, f):
assert(node.op == 'Conv2D')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
scope_name = TFConverter.get_scope_name(node.name)
#knode for kernel, bnode for bias, dnode for dilation, anode for activation
knode, bnode, dnode, anode = self.get_conv2d_params(scope_name)
if dnode is not None:
dilation = struct.unpack('i', dnode.attr['value'].tensor.tensor_content[0:4])[0]
else:
dilation = 1
if anode is not None:
activation = anode.op
else:
activation = 'None'
padding = node.attr['padding'].s.decode("utf-8")
# conv2d with dilation > 1 generates tens of nodes, not easy to parse them, so use this tricky method.
if dilation > 1 and scope_name + '/stack' in self.name_node_dict:
if self.name_node_dict[scope_name + '/stack'].op == "Const":
padding = 'SAME'
padding = self.conv_paddings[padding]
ktensor = knode.attr['value'].tensor
filter_height = ktensor.tensor_shape.dim[0].size
filter_width = ktensor.tensor_shape.dim[1].size
in_channels = ktensor.tensor_shape.dim[2].size
out_channels = ktensor.tensor_shape.dim[3].size
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
kernel = np.transpose(kernel, [3, 0, 1, 2])
has_bias = 1
np.array([self.op2code[node.op], dilation, padding, self.conv_activations[activation], in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
btensor = bnode.attr['value'].tensor
if btensor.tensor_shape.dim[0].size == 1:
bias = struct.pack("f", btensor.float_val[0])
else:
bias = btensor.tensor_content
f.write(bias)
input_name = self.conv2d_scopename_inputname_dict[scope_name]
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
if anode is not None:
output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
else:
output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_dense_to_file(self, node, f):
assert(node.op == 'MatMul')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
scope_name = TFConverter.get_scope_name(node.name)
#knode for kernel, bnode for bias, anode for activation
knode, bnode, anode = self.get_dense_params(scope_name.split('/')[0])
if bnode is not None:
has_bias = 1
btensor = bnode.attr['value'].tensor
if btensor.tensor_shape.dim[0].size == 1:
bias = struct.pack("f", btensor.float_val[0])
else:
bias = btensor.tensor_content
else:
has_bias = 0
if anode is not None:
activation = anode.op
else:
activation = 'None'
ktensor = knode.attr['value'].tensor
in_channels = ktensor.tensor_shape.dim[0].size
out_channels = ktensor.tensor_shape.dim[1].size
if in_channels * out_channels == 1:
kernel = np.float32(ktensor.float_val[0])
else:
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(in_channels, out_channels)
kernel = np.transpose(kernel, [1, 0])
np.array([self.op2code[node.op], self.conv_activations[activation], in_channels, out_channels, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
if has_bias:
f.write(bias)
input_name = self.dense_scopename_inputname_dict[scope_name.split('/')[0]]
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
if anode is not None:
output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
else:
if bnode is not None:
output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
else:
output_operand_index = self.add_operand(self.edges[scope_name+'/concat_1'][0].name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_simple_conv2d_to_file(self, node, f):
assert(node.op == 'Conv2D')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
node1 = self.name_node_dict[node.input[1]]
if node0.op == 'Const':
knode = node0
input_name = node.input[1]
else:
knode = node1
input_name = node.input[0]
ktensor = knode.attr['value'].tensor
filter_height = ktensor.tensor_shape.dim[0].size
filter_width = ktensor.tensor_shape.dim[1].size
in_channels = ktensor.tensor_shape.dim[2].size
out_channels = ktensor.tensor_shape.dim[3].size
if filter_height * filter_width * in_channels * out_channels == 1:
kernel = np.float32(ktensor.float_val[0])
else:
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
kernel = np.transpose(kernel, [3, 0, 1, 2])
has_bias = 0
dilation = 1
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], dilation, self.conv_paddings[padding], self.conv_activations['None'],
in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_depth2space_to_file(self, node, f):
assert(node.op == 'DepthToSpace')
self.layer_number = self.layer_number + 1
block_size = node.attr['block_size'].i
np.array([self.op2code[node.op], block_size], dtype=np.uint32).tofile(f)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_mirrorpad_to_file(self, node, f):
assert(node.op == 'MirrorPad')
self.layer_number = self.layer_number + 1
mode = node.attr['mode'].s
mode = self.mirrorpad_mode[mode.decode("utf-8")]
np.array([self.op2code[node.op], mode], dtype=np.uint32).tofile(f)
pnode = self.name_node_dict[node.input[1]]
self.converted_nodes.add(pnode.name)
paddings = pnode.attr['value'].tensor.tensor_content
f.write(paddings)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_maximum_to_file(self, node, f):
assert(node.op == 'Maximum')
self.layer_number = self.layer_number + 1
ynode = self.name_node_dict[node.input[1]]
y = ynode.attr['value'].tensor.float_val[0]
np.array([self.op2code[node.op]], dtype=np.uint32).tofile(f)
np.array([y], dtype=np.float32).tofile(f)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_mathbinary_to_file(self, node, f):
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
i1_node = self.name_node_dict[node.input[1]]
np.array([self.op2code['MathBinary'], self.mathbin2code[node.op]], dtype=np.uint32).tofile(f)
if i0_node.op == 'Const':
scalar = i0_node.attr['value'].tensor.float_val[0]
np.array([1], dtype=np.uint32).tofile(f) # broadcast: 1
np.array([scalar], dtype=np.float32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f) # broadcast: 0
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
elif i1_node.op == 'Const':
scalar = i1_node.attr['value'].tensor.float_val[0]
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([1], dtype=np.uint32).tofile(f)
np.array([scalar], dtype=np.float32).tofile(f)
else:
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index], dtype=np.uint32).tofile(f)
def dump_mathunary_to_file(self, node, f):
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
np.array([self.op2code['MathUnary'], self.mathun2code[node.op]], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index],dtype=np.uint32).tofile(f)
def dump_avg_pool_to_file(self, node, f):
assert(node.op == 'AvgPool')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
strides = node.attr['strides']
# Tensorflow do not support pooling strides in batch dimension and
# current native NN do not support pooling strides in channel dimension, added assert() here.
assert(strides.list.i[1]==strides.list.i[2])
assert(strides.list.i[0]==1)
assert(strides.list.i[3]==1)
strides = strides.list.i[1]
filter_node = node.attr['ksize']
input_name = node.input[0]
# Tensorflow do not support pooling ksize in batch dimension and channel dimension.
assert(filter_node.list.i[0]==1)
assert(filter_node.list.i[3]==1)
filter_height = filter_node.list.i[1]
filter_width = filter_node.list.i[2]
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], strides, self.pool_paddings[padding], filter_height],
dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index],dtype=np.uint32).tofile(f)
def dump_layers_to_file(self, f):
for node in self.nodes:
if node.name in self.converted_nodes:
continue
# conv2d with dilation generates very complex nodes, so handle it in special
if self.in_conv2d_scope(node.name):
if node.op == 'Conv2D':
self.dump_complex_conv2d_to_file(node, f)
continue
if self.in_dense_scope(node.name):
if node.op == 'MatMul':
self.dump_dense_to_file(node, f)
continue
if node.op == 'Conv2D':
self.dump_simple_conv2d_to_file(node, f)
continue
if node.name in self.output_names:
input_name = self.id_different_scope_dict[node.name]
if TFConverter.get_scope_name(input_name)!=TFConverter.get_scope_name(node.name):
continue
if node.op == 'AvgPool':
self.dump_avg_pool_to_file(node, f)
elif node.op == 'DepthToSpace':
self.dump_depth2space_to_file(node, f)
elif node.op == 'MirrorPad':
self.dump_mirrorpad_to_file(node, f)
elif node.op == 'Maximum':
self.dump_maximum_to_file(node, f)
elif node.op in self.mathbin2code:
self.dump_mathbinary_to_file(node, f)
elif node.op in self.mathun2code:
self.dump_mathunary_to_file(node, f)
def dump_operands_to_file(self, f):
operands = sorted(self.name_operand_dict.values())
for operand in operands:
#print('{}'.format(operand))
np.array([operand.index, len(operand.name)], dtype=np.uint32).tofile(f)
f.write(operand.name.encode('utf-8'))
np.array([operand.iotype, operand.dtype], dtype=np.uint32).tofile(f)
np.array(operand.dims, dtype=np.uint32).tofile(f)
def dump_to_file(self):
with open(self.outfile, 'wb') as f:
f.write(header.str.encode('utf-8'))
np.array([header.major, header.minor], dtype=np.uint32).tofile(f)
self.dump_layers_to_file(f)
self.dump_operands_to_file(f)
np.array([self.layer_number, len(self.name_operand_dict)], dtype=np.uint32).tofile(f)
def generate_name_node_dict(self):
for node in self.nodes:
self.name_node_dict[node.name] = node
def generate_output_names(self):
used_names = []
for node in self.nodes:
for input in node.input:
used_names.append(input)
for node in self.nodes:
if node.name not in used_names:
self.output_names.append(node.name)
def remove_identity(self):
self.id_different_scope_dict = {}
id_nodes = []
id_dict = {}
for node in self.nodes:
if node.op == 'Identity':
name = node.name
input = node.input[0]
id_nodes.append(node)
# do not change the output name
if name in self.output_names:
self.name_node_dict[input].name = name
self.name_node_dict[name] = self.name_node_dict[input]
del self.name_node_dict[input]
self.id_different_scope_dict[name] = input
else:
id_dict[name] = input
for idnode in id_nodes:
self.nodes.remove(idnode)
for node in self.nodes:
for i in range(len(node.input)):
input = node.input[i]
if input in id_dict:
node.input[i] = id_dict[input]
def generate_edges(self):
for node in self.nodes:
for input in node.input:
if input in self.edges:
self.edges[input].append(node)
else:
self.edges[input] = [node]
@staticmethod
def get_scope_name(name):
index = name.rfind('/')
if index == -1:
return ""
return name[0:index]
def in_conv2d_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.conv2d_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def in_dense_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.dense_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def generate_sub_block_op_scope_info(self):
# mostly, conv2d/dense is a sub block in graph, get the scope name
for node in self.nodes:
if node.op == 'Conv2D':
scope = TFConverter.get_scope_name(node.name)
# for the case tf.nn.conv2d is called directly
if scope == '':
continue
# for the case tf.nn.conv2d is called within a scope
if scope + '/kernel' not in self.name_node_dict:
continue
self.conv2d_scope_names.add(scope)
elif node.op == 'MatMul':
scope = TFConverter.get_scope_name(node.name)
# for the case tf.nn.dense is called directly
if scope == '':
continue
# for the case tf.nn.dense is called within a scope
if scope + '/kernel' not in self.name_node_dict and scope.split('/Tensordot')[0] + '/kernel' not in self.name_node_dict:
continue
self.dense_scope_names.add(scope.split('/Tensordot')[0])
# get the input name to the conv2d/dense sub block
for node in self.nodes:
scope = TFConverter.get_scope_name(node.name)
if scope in self.conv2d_scope_names:
if node.op == 'Conv2D' or node.op == 'Shape':
for inp in node.input:
if TFConverter.get_scope_name(inp) != scope:
self.conv2d_scopename_inputname_dict[scope] = inp
elif scope in self.dense_scope_names:
if node.op == 'MatMul' or node.op == 'Shape':
for inp in node.input:
if TFConverter.get_scope_name(inp) != scope:
self.dense_scopename_inputname_dict[scope] = inp
elif scope.split('/Tensordot')[0] in self.dense_scope_names:
if node.op == 'Transpose':
for inp in node.input:
if TFConverter.get_scope_name(inp).find(scope)<0 and TFConverter.get_scope_name(inp).find(scope.split('/')[0])<0:
self.dense_scopename_inputname_dict[scope.split('/Tensordot')[0]] = inp
def run(self):
self.generate_name_node_dict()
self.generate_output_names()
self.remove_identity()
self.generate_edges()
self.generate_sub_block_op_scope_info()
if self.dump4tb:
self.dump_for_tensorboard()
self.dump_to_file()
def convert_from_tensorflow(infile, outfile, dump4tb):
with open(infile, 'rb') as f:
# read the file in .proto format
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
nodes = graph_def.node
converter = TFConverter(graph_def, nodes, outfile, dump4tb)
converter.run()
|
py
|
1a5d94467de0c9ae78ad16986b8dc6c87482ffd7
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pyrimidine import *
from pyrimidine.local_search import *
from pyrimidine.utils import randint2
from pyrimidine.benchmarks.optimization import *
_evaluate = ShortestPath.random(30)
class _Chromosome(PermutationChromosome):
default_size = 30
def decode(self):
return np.hstack((self, [self[0]]))
def to_points(self):
x = self.decode()
return points[x, 0], points[x, 1]
_Individual = MonoIndividual[_Chromosome].set_fitness(lambda obj: 1 / _evaluate(obj.decode()))
class SAIndividual(SimulatedAnnealing, _Individual):
def get_neighbour(self):
cpy = self.clone(fitness=None)
cpy.chromosome.mutate()
return cpy
sa = SAIndividual.random(size=30)
from matplotlib import pyplot as plt
from celluloid import Camera
fig = plt.figure()
ax = fig.add_subplot(111)
points = _evaluate.points
def animate(i):
sa.evolve(n_iter=5, verbose=False)
ax.plot(*sa.chromosome.to_points(), 'k-o')
ax.plot(*sa.phantom.chromosome.to_points(), 'b--o')
ax.legend((f'Best Solution({sa.fitness:.4})', f'Generation {i*5}'))
camera = Camera(fig)
ax.plot(*sa.chromosome.to_points(), 'k-o')
ax.legend(('Generation 0',))
for i in range(1, 300):
animate(i)
camera.snap()
animation = camera.animate()
animation.save('animation-sa.mp4')
|
py
|
1a5d95a93198c93e95047e2dfb99fb9133895e14
|
import logging
import pytest
import subprocess
import sys
import numpy as np
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
@pytest.mark.gdalbin
def test_write_ubyte(tmpdir):
name = str(tmpdir.mkdir("sub").join("test_write_ubyte.png"))
a = np.ones((100, 100), dtype=rasterio.ubyte) * 127
with rasterio.open(
name, 'w',
driver='PNG', width=100, height=100, count=1,
dtype=a.dtype) as s:
s.write(a, indexes=1)
info = subprocess.check_output(["gdalinfo", "-stats", name]).decode('utf-8')
assert "Minimum=127.000, Maximum=127.000, Mean=127.000, StdDev=0.000" in info
|
py
|
1a5d95d3a33ade3129b8706a24af78862fa5b188
|
from typing import List, Any
from pydantic import BaseModel, validator
from tracardi.domain.entity import Entity
class Configuration(BaseModel):
source: Entity
type: str = 'select'
query: str = "SELECT 1"
data: List[Any] = []
timeout: int = None
@validator("query")
def must_not_be_empty(cls, value):
if len(value) < 2:
raise ValueError("Sql must not be empty.")
return value
@validator("type")
def must_have_certain_value(cls, value):
allowed_values = ['select', 'insert', 'delete', 'call', 'create']
if value not in allowed_values:
raise ValueError(f"Allowed values {allowed_values}")
return value
|
py
|
1a5d96bfd3c1bd2a115e25b7095287ba810c5632
|
from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = ({{ cookiecutter.minimum_supported_python_version [0] }}, {{ cookiecutter.minimum_supported_python_version [2] }})
if sys.version_info < min_version:
error = """
{{ cookiecutter.package_dist_name }} does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='{{ cookiecutter.package_dist_name }}',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="{{ cookiecutter.project_short_description }}",
long_description=readme,
author="{{ cookiecutter.full_name }}",
author_email='{{ cookiecutter.email }}',
url='https://{{ cookiecutter.vcs_domain }}/{{ cookiecutter.vcs_username }}/{{ cookiecutter.repo_name }}',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'{{ cookiecutter.package_dir_name }}': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
py
|
1a5d96ee9ae2a78f5785803110f3a923fe522038
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import calendar
import hashlib
import math
from collections import defaultdict
from datetime import date, datetime, time, timedelta
from functools import reduce
from urllib.parse import quote, urlencode
import dateutil
import isoweek
import pytz
from django.conf import settings
from django.core.cache import caches
from django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q
from django.db.models.functions import Coalesce, Greatest
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.formats import date_format, get_format
from django.utils.timezone import get_current_timezone, now
from django.views import View
from django.views.decorators.cache import cache_page
from django.views.generic import ListView, TemplateView
from pytz import UTC
from pretix.base.i18n import language
from pretix.base.models import (
Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,
)
from pretix.base.services.quotas import QuotaAvailability
from pretix.helpers.compat import date_fromisocalendar
from pretix.helpers.daterange import daterange
from pretix.helpers.formats.en.formats import (
SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,
)
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.ical import get_public_ical
from pretix.presale.views import OrganizerViewMixin
def filter_qs_by_attr(qs, request):
"""
We'll allow to filter the event list using attributes defined in the event meta data
models in the format ?attr[meta_name]=meta_value
"""
attrs = {}
for i, item in enumerate(request.GET.items()):
k, v = item
if k.startswith("attr[") and k.endswith("]"):
attrs[k[5:-1]] = v
skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')
if request.GET.get('attr_persist'):
request.session[skey] = attrs
elif skey in request.session:
attrs = request.session[skey]
props = {
p.name: p for p in request.organizer.meta_properties.filter(
name__in=attrs.keys()
)
}
for i, item in enumerate(attrs.items()):
attr, v = item
emv_with_value = EventMetaValue.objects.filter(
event=OuterRef('event' if qs.model == SubEvent else 'pk'),
property__name=attr,
value=v
)
emv_with_any_value = EventMetaValue.objects.filter(
event=OuterRef('event' if qs.model == SubEvent else 'pk'),
property__name=attr,
)
if qs.model == SubEvent:
semv_with_value = SubEventMetaValue.objects.filter(
subevent=OuterRef('pk'),
property__name=attr,
value=v
)
semv_with_any_value = SubEventMetaValue.objects.filter(
subevent=OuterRef('pk'),
property__name=attr,
)
prop = props.get(attr)
if not prop:
continue
annotations = {'attr_{}'.format(i): Exists(emv_with_value)}
if qs.model == SubEvent:
annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)
annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)
filters = Q(**{'attr_{}_sub'.format(i): True})
filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))
if prop.default == v:
annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)
filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))
else:
filters = Q(**{'attr_{}'.format(i): True})
if prop.default == v:
annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)
filters |= Q(**{'attr_{}_any'.format(i): False})
qs = qs.annotate(**annotations).filter(filters)
return qs
class EventListMixin:
def _get_event_queryset(self):
query = Q(is_public=True) & Q(live=True)
qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)
qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)
qs = qs.annotate(
min_from=Min('subevents__date_from'),
min_to=Min('subevents__date_to'),
max_from=Max('subevents__date_from'),
max_to=Max('subevents__date_to'),
max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),
)
if "old" in self.request.GET:
qs = qs.filter(
Q(Q(has_subevents=False) & Q(
Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))
)) | Q(Q(has_subevents=True) & Q(
Q(min_to__lt=now()) | Q(min_from__lt=now()))
)
).annotate(
order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),
).order_by('-order_to')
else:
qs = qs.filter(
Q(Q(has_subevents=False) & Q(
Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))
)) | Q(Q(has_subevents=True) & Q(
Q(max_to__gte=now()) | Q(max_from__gte=now()))
)
).annotate(
order_from=Coalesce('min_from', 'date_from'),
).order_by('order_from')
qs = Event.annotated(filter_qs_by_attr(qs, self.request))
return qs
def _set_month_to_next_subevent(self):
tz = pytz.timezone(self.request.event.settings.timezone)
next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
active=True,
is_public=True,
).select_related('event').order_by('date_from').first()
if next_sev:
datetime_from = next_sev.date_from
self.year = datetime_from.astimezone(tz).year
self.month = datetime_from.astimezone(tz).month
else:
self.year = now().year
self.month = now().month
def _set_month_to_next_event(self):
next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
organizer=self.request.organizer,
live=True,
is_public=True,
has_subevents=False
), self.request).order_by('date_from').first()
next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
active=True,
is_public=True,
), self.request).select_related('event').order_by('date_from').first()
datetime_from = None
if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
datetime_from = next_sev.date_from
next_ev = next_sev.event
elif next_ev:
datetime_from = next_ev.date_from
if datetime_from:
tz = pytz.timezone(next_ev.settings.timezone)
self.year = datetime_from.astimezone(tz).year
self.month = datetime_from.astimezone(tz).month
else:
self.year = now().year
self.month = now().month
def _set_month_year(self):
if 'date' in self.request.GET:
try:
date = dateutil.parser.isoparse(self.request.GET.get('date')).date()
except ValueError:
date = now().date()
self.year = date.year
self.month = date.month
else:
if hasattr(self.request, 'event'):
self._set_month_to_next_subevent()
else:
self._set_month_to_next_event()
def _set_week_to_next_subevent(self):
tz = pytz.timezone(self.request.event.settings.timezone)
next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
active=True,
is_public=True,
).select_related('event').order_by('date_from').first()
if next_sev:
datetime_from = next_sev.date_from
self.year = datetime_from.astimezone(tz).isocalendar()[0]
self.week = datetime_from.astimezone(tz).isocalendar()[1]
else:
self.year = now().isocalendar()[0]
self.week = now().isocalendar()[1]
def _set_week_to_next_event(self):
next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
organizer=self.request.organizer,
live=True,
is_public=True,
has_subevents=False
), self.request).order_by('date_from').first()
next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
active=True,
is_public=True,
), self.request).select_related('event').order_by('date_from').first()
datetime_from = None
if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
datetime_from = next_sev.date_from
next_ev = next_sev.event
elif next_ev:
datetime_from = next_ev.date_from
if datetime_from:
tz = pytz.timezone(next_ev.settings.timezone)
self.year = datetime_from.astimezone(tz).isocalendar()[0]
self.week = datetime_from.astimezone(tz).isocalendar()[1]
else:
self.year = now().isocalendar()[0]
self.week = now().isocalendar()[1]
def _set_week_year(self):
if 'date' in self.request.GET:
try:
iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()
except ValueError:
iso = now().isocalendar()
self.year = iso[0]
self.week = iso[1]
else:
if hasattr(self.request, 'event'):
self._set_week_to_next_subevent()
else:
self._set_week_to_next_event()
class OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):
model = Event
context_object_name = 'events'
template_name = 'pretixpresale/organizers/index.html'
paginate_by = 30
def dispatch(self, request, *args, **kwargs):
# In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,
# so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching
# on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well
# as the login status etc.
cache_allowed = (
settings.CACHE_LARGE_VALUES_ALLOWED and
not getattr(request, 'customer', None) and
not request.user.is_authenticated
)
if not cache_allowed:
return super().dispatch(request, *args, **kwargs)
cache_key_parts = [
request.method,
request.host,
str(request.organizer.pk),
request.get_full_path(),
request.LANGUAGE_CODE,
self.request.sales_channel.identifier,
]
for c, v in request.COOKIES.items():
# If the cookie is not one we know, it might be set by a plugin and we need to include it in the
# cache key to be safe. A known example includes plugins that e.g. store cookie banner state.
if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):
cache_key_parts.append(f'{c}={v}')
for c, v in request.session.items():
# If the session key is not one we know, it might be set by a plugin and we need to include it in the
# cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.
if (
not c.startswith('_auth') and
not c.startswith('pretix_auth_') and
not c.startswith('customer_auth_') and
not c.startswith('current_cart_') and
not c.startswith('cart_') and
not c.startswith('payment_') and
c not in ('carts', 'payment', 'pinned_user_agent')
):
cache_key_parts.append(f'{c}={repr(v)}')
cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(":".join(cache_key_parts).encode()).hexdigest()}'
cache_timeout = 15
cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]
response = cache.get(cache_key)
if response is not None:
return response
response = super().dispatch(request, *kwargs, **kwargs)
if response.status_code >= 400:
return response
if hasattr(response, 'render') and callable(response.render):
def _store_to_cache(r):
cache.set(cache_key, r, cache_timeout)
response.add_post_render_callback(_store_to_cache)
else:
cache.set(cache_key, response, cache_timeout)
return response
def get(self, request, *args, **kwargs):
style = request.GET.get("style", request.organizer.settings.event_list_type)
if style == "calendar":
cv = CalendarView()
cv.request = request
return cv.get(request, *args, **kwargs)
elif style == "day":
cv = DayCalendarView()
cv.request = request
return cv.get(request, *args, **kwargs)
elif style == "week":
cv = WeekCalendarView()
cv.request = request
return cv.get(request, *args, **kwargs)
else:
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self._get_event_queryset()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
for event in ctx['events']:
event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))
if event.has_subevents:
event.daterange = daterange(
event.min_from.astimezone(event.tzname),
(event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)
)
return ctx
def has_before_after(eventqs, subeventqs, before, after):
eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)
sqs = subeventqs.filter(active=True, is_public=True)
return (
eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),
eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()
)
def add_events_for_days(request, baseqs, before, after, ebd, timezones):
qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(
Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |
Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |
Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))
).order_by(
'date_from'
).prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
if hasattr(request, 'organizer'):
qs = filter_qs_by_attr(qs, request)
for event in qs:
timezones.add(event.settings.timezones)
tz = pytz.timezone(event.settings.timezone)
datetime_from = event.date_from.astimezone(tz)
date_from = datetime_from.date()
if event.settings.show_date_to and event.date_to:
datetime_to = event.date_to.astimezone(tz)
date_to = event.date_to.astimezone(tz).date()
d = max(date_from, before.date())
while d <= date_to and d <= after.date():
first = d == date_from
ebd[d].append({
'event': event,
'continued': not first,
'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,
'time_end': (
datetime_to.time().replace(tzinfo=None)
if (date_to == date_from or (
date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()
)) and event.settings.show_times
else None
),
'time_end_today': (
datetime_to.time().replace(tzinfo=None)
if date_to == d and event.settings.show_times
else None
),
'url': eventreverse(event, 'presale:event.index'),
'timezone': event.settings.timezone,
})
d += timedelta(days=1)
else:
ebd[date_from].append({
'event': event,
'continued': False,
'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,
'url': eventreverse(event, 'presale:event.index'),
'timezone': event.settings.timezone,
})
def add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):
qs = qs.filter(active=True, is_public=True).filter(
Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |
Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |
Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))
).order_by(
'date_from'
)
quotas_to_compute = []
for se in qs:
if se.presale_is_running:
quotas_to_compute += se.active_quotas
qcache = {}
if quotas_to_compute:
qa = QuotaAvailability()
qa.queue(*quotas_to_compute)
qa.compute(allow_cache=True)
qcache.update(qa.results)
for se in qs:
if qcache:
se._quota_cache = qcache
kwargs = {'subevent': se.pk}
if cart_namespace:
kwargs['cart_namespace'] = cart_namespace
s = event.settings if event else se.event.settings
if s.event_list_available_only:
hide = se.presale_has_ended or (
(not voucher or not voucher.allow_ignore_quota) and
se.best_availability_state is not None and
se.best_availability_state < Quota.AVAILABILITY_RESERVED
)
if hide:
continue
timezones.add(s.timezones)
tz = pytz.timezone(s.timezone)
datetime_from = se.date_from.astimezone(tz)
date_from = datetime_from.date()
if s.show_date_to and se.date_to:
datetime_to = se.date_to.astimezone(tz)
date_to = se.date_to.astimezone(tz).date()
d = max(date_from, before.date())
while d <= date_to and d <= after.date():
first = d == date_from
ebd[d].append({
'continued': not first,
'timezone': s.timezone,
'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,
'time_end': (
datetime_to.time().replace(tzinfo=None)
if (date_to == date_from or (
date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()
)) and s.show_times
else None
),
'time_end_today': (
datetime_to.time().replace(tzinfo=None)
if date_to == d and s.show_times
else None
),
'event': se,
'url': (
eventreverse(se.event, 'presale:event.redeem',
kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'
if voucher
else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)
)
})
d += timedelta(days=1)
else:
ebd[date_from].append({
'event': se,
'continued': False,
'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,
'url': (
eventreverse(se.event, 'presale:event.redeem',
kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'
if voucher
else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)
),
'timezone': s.timezone,
})
def sort_ev(e):
return e['time'] or time(0, 0, 0), str(e['event'].name)
def days_for_template(ebd, week):
day_format = get_format('WEEK_DAY_FORMAT')
if day_format == 'WEEK_DAY_FORMAT':
day_format = 'SHORT_DATE_FORMAT'
return [
{
'day_formatted': date_format(day, day_format),
'date': day,
'today': day == now().astimezone(get_current_timezone()).date(),
'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []
}
for day in week.days()
]
def weeks_for_template(ebd, year, month):
calendar.setfirstweekday(0) # TODO: Configurable
return [
[
{
'day': day,
'date': date(year, month, day),
'events': (
sorted(ebd.get(date(year, month, day)), key=sort_ev)
if date(year, month, day) in ebd else None
)
}
if day > 0
else None
for day in week
]
for week in calendar.monthcalendar(year, month)
]
class CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
template_name = 'pretixpresale/organizers/calendar.html'
def get(self, request, *args, **kwargs):
# redirect old month-year-URLs to new date-URLs
keys = ("month", "year")
if all(k in request.GET for k in keys):
get_params = {k: v for k, v in request.GET.items() if k not in keys}
get_params["date"] = "%s-%s" % (request.GET.get("year"), request.GET.get("month"))
return redirect(self.request.path + "?" + urlencode(get_params))
self._set_month_year()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
try:
_, ndays = calendar.monthrange(self.year, self.month)
except calendar.IllegalMonthError:
raise Http404()
before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)
after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)
ctx['date'] = date(self.year, self.month, 1)
ctx['before'] = before
ctx['after'] = after
ebd = self._events_by_day(before, after)
ctx['has_before'], ctx['has_after'] = has_before_after(
self.request.organizer.events.filter(
sales_channels__contains=self.request.sales_channel.identifier
),
SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
before,
after,
)
ctx['multiple_timezones'] = self._multiple_timezones
ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)
ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]
ctx['years'] = range(now().year - 2, now().year + 3)
return ctx
def _events_by_day(self, before, after):
ebd = defaultdict(list)
timezones = set()
add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
settings.DATABASE_REPLICA
).filter(
sales_channels__contains=self.request.sales_channel.identifier
), before, after, ebd, timezones)
add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
)), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
self._multiple_timezones = len(timezones) > 1
return ebd
class WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
template_name = 'pretixpresale/organizers/calendar_week.html'
def get(self, request, *args, **kwargs):
# redirect old week-year-URLs to new date-URLs
keys = ("week", "year")
if all(k in request.GET for k in keys):
get_params = {k: v for k, v in request.GET.items() if k not in keys}
get_params["date"] = "%s-W%s" % (request.GET.get("year"), request.GET.get("week"))
return redirect(self.request.path + "?" + urlencode(get_params))
self._set_week_year()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
week = isoweek.Week(self.year, self.week)
before = datetime(
week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC
) - timedelta(days=1)
after = datetime(
week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC
) + timedelta(days=1)
ctx['date'] = week.monday()
ctx['before'] = before
ctx['after'] = after
ebd = self._events_by_day(before, after)
ctx['has_before'], ctx['has_after'] = has_before_after(
self.request.organizer.events.filter(
sales_channels__contains=self.request.sales_channel.identifier
),
SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
before,
after,
)
ctx['days'] = days_for_template(ebd, week)
years = (self.year - 1, self.year, self.year + 1)
weeks = []
for year in years:
weeks += [
(date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))
for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)
]
ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]
ctx['week_format'] = get_format('WEEK_FORMAT')
if ctx['week_format'] == 'WEEK_FORMAT':
ctx['week_format'] = WEEK_FORMAT
ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')
if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':
ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT
ctx['multiple_timezones'] = self._multiple_timezones
return ctx
def _events_by_day(self, before, after):
ebd = defaultdict(list)
timezones = set()
add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
settings.DATABASE_REPLICA
).filter(
sales_channels__contains=self.request.sales_channel.identifier
), before, after, ebd, timezones)
add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
)), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
self._multiple_timezones = len(timezones) > 1
return ebd
class DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
template_name = 'pretixpresale/organizers/calendar_day.html'
def _set_date_to_next_event(self):
next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
organizer=self.request.organizer,
live=True,
is_public=True,
date_from__gte=now(),
), self.request).order_by('date_from').first()
next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
active=True,
is_public=True,
), self.request).select_related('event').order_by('date_from').first()
datetime_from = None
if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
datetime_from = next_sev.date_from
next_ev = next_sev.event
elif next_ev:
datetime_from = next_ev.date_from
if datetime_from:
self.tz = pytz.timezone(next_ev.settings.timezone)
self.date = datetime_from.astimezone(self.tz).date()
else:
self.tz = self.request.organizer.timezone
self.date = now().astimezone(self.tz).date()
def _set_date(self):
if 'date' in self.request.GET:
self.tz = self.request.organizer.timezone
try:
self.date = dateutil.parser.parse(self.request.GET.get('date')).date()
except ValueError:
self.date = now().astimezone(self.tz).date()
else:
self._set_date_to_next_event()
def get(self, request, *args, **kwargs):
self._set_date()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
before = datetime(
self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC
) - timedelta(days=1)
after = datetime(
self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC
) + timedelta(days=1)
ctx['date'] = self.date
ctx['cal_tz'] = self.tz
ctx['before'] = before
ctx['after'] = after
ctx['has_before'], ctx['has_after'] = has_before_after(
self.request.organizer.events.filter(
sales_channels__contains=self.request.sales_channel.identifier
),
SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
before,
after,
)
ebd = self._events_by_day(before, after)
if not ebd[self.date]:
return ctx
events = ebd[self.date]
shortest_duration = self._get_shortest_duration(events).total_seconds() // 60
# pick the next biggest tick_duration based on shortest_duration, max. 180 minutes
tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)
raster_size = min(self._get_raster_size(events), tick_duration)
events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)
calendar_duration = self._get_time_duration(start, end)
ctx["calendar_duration"] = self._format_duration(calendar_duration)
ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)
ctx['start'] = datetime.combine(self.date, start)
ctx['raster_size'] = raster_size
# ctx['end'] = end
# size of each grid-column is based on shortest event duration and raster_size
# raster_size is based on start/end times, so it could happen we have a small raster but long running events
# raster_size will always be smaller or equals tick_duration
ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)
ctx['events'] = events
events_by_series = self._grid_for_template(events)
ctx['collections'] = events_by_series
ctx['no_headlines'] = not any([series for series, events in events_by_series])
ctx['multiple_timezones'] = self._multiple_timezones
return ctx
def _get_raster_size(self, events):
# get best raster-size for min. # of columns in grid
# due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)
# all start- and end-times (minute-part) except full hour
times = [
e["time"].minute for e in events if e["time"] and e["time"].minute
] + [
e["time_end_today"].minute for e in events if "time_end_today" in e and e["time_end_today"] and e["time_end_today"].minute
]
if not times:
# no time other than full hour, so raster can be 1 hour/60 minutes
return 60
gcd = reduce(math.gcd, set(times))
return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)
def _get_time_duration(self, start, end):
midnight = time(0, 0)
return datetime.combine(
self.date if end != midnight else self.date + timedelta(days=1),
end
) - datetime.combine(
self.date,
start
)
def _format_duration(self, duration):
return ":".join([
"%02d" % i for i in (
(duration.days * 24) + (duration.seconds // 3600),
(duration.seconds // 60) % 60
)
])
def _floor_time(self, t, raster_size=5):
# raster_size based on minutes, might be factored into a helper class with a timedelta as raster
minutes = t.hour * 60 + t.minute
if minutes % raster_size:
minutes = (minutes // raster_size) * raster_size
return t.replace(hour=minutes // 60, minute=minutes % 60)
return t
def _ceil_time(self, t, raster_size=5):
# raster_size based on minutes, might be factored into a helper class with a timedelta as raster
minutes = t.hour * 60 + t.minute
if not minutes % raster_size:
return t
minutes = math.ceil(minutes / raster_size) * raster_size
minute = minutes % 60
hour = minutes // 60
if hour > 23:
hour = hour % 24
return t.replace(minute=minute, hour=hour)
def _rasterize_events(self, events, tick_duration, raster_size=5):
rastered_events = []
start, end = self._get_time_range(events)
start = self._floor_time(start, raster_size=tick_duration)
end = self._ceil_time(end, raster_size=tick_duration)
midnight = time(0, 0)
for e in events:
t = e["time"] or time(0, 0)
e["offset_shift_start"] = 0
if e["continued"]:
e["time_rastered"] = midnight
elif t.minute % raster_size:
e["time_rastered"] = t.replace(minute=(t.minute // raster_size) * raster_size)
e["offset_shift_start"] = t.minute % raster_size
else:
e["time_rastered"] = t
e["offset_shift_end"] = 0
if "time_end_today" in e and e["time_end_today"]:
if e["time_end_today"].minute % raster_size:
minute = math.ceil(e["time_end_today"].minute / raster_size) * raster_size
hour = e["time_end_today"].hour
if minute > 59:
minute = minute % 60
hour = (hour + 1) % 24
e["time_end_today_rastered"] = e["time_end_today"].replace(minute=minute, hour=hour)
e["offset_shift_end"] = raster_size - e["time_end_today"].minute % raster_size
else:
e["time_end_today_rastered"] = e["time_end_today"]
else:
e["time_end_today"] = e["time_end_today_rastered"] = time(0, 0)
e["duration_rastered"] = self._format_duration(datetime.combine(
self.date if e["time_end_today_rastered"] != midnight else self.date + timedelta(days=1),
e["time_end_today_rastered"]
) - datetime.combine(
self.date,
e['time_rastered']
))
e["offset_rastered"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e["time_rastered"])
rastered_events.append(e)
return rastered_events, start, end
def _get_shortest_duration(self, events):
midnight = time(0, 0)
durations = [
datetime.combine(
self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),
e['time_end_today'] if e.get('time_end_today') else time(0, 0)
)
-
datetime.combine(
self.date,
time(0, 0) if e['continued'] else (e['time'] or time(0, 0))
)
for e in events
]
return min([d for d in durations])
def _get_time_range(self, events):
if any(e['continued'] for e in events) or any(e['time'] is None for e in events):
starting_at = time(0, 0)
else:
starting_at = min(e['time'] for e in events)
if any(e.get('time_end_today') is None for e in events):
ending_at = time(0, 0)
else:
ending_at = max(e['time_end_today'] for e in events)
return starting_at, ending_at
def _get_time_ticks(self, start, end, tick_duration):
ticks = []
tick_duration = timedelta(minutes=tick_duration)
# convert time to datetime for timedelta calc
start = datetime.combine(self.date, start)
end = datetime.combine(self.date, end)
if end <= start:
end = end + timedelta(days=1)
tick_start = start
offset = datetime.utcfromtimestamp(0)
duration = datetime.utcfromtimestamp(tick_duration.total_seconds())
while tick_start < end:
tick = {
"start": tick_start,
"duration": duration,
"offset": offset,
}
ticks.append(tick)
tick_start += tick_duration
offset += tick_duration
return ticks
def _grid_for_template(self, events):
midnight = time(0, 0)
rows_by_collection = defaultdict(list)
# We sort the events into "collections": all subevents from the same
# event series together and all non-series events into a "None"
# collection. Then, we look if there's already an event in the
# collection that overlaps, in which case we need to split the
# collection into multiple rows.
for counter, e in enumerate(events):
collection = e['event'].event if isinstance(e['event'], SubEvent) else None
placed_in_row = False
for row in rows_by_collection[collection]:
if any(
(e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and
(o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)
for o in row
):
continue
row.append(e)
placed_in_row = True
break
if not placed_in_row:
rows_by_collection[collection].append([e])
# flatten rows to one stream of events with attribute row
# for better keyboard-tab-order in html
for collection in rows_by_collection:
for i, row in enumerate(rows_by_collection[collection]):
concurrency = i + 1
for e in row:
e["concurrency"] = concurrency
rows_by_collection[collection] = {
"concurrency": len(rows_by_collection[collection]),
"events": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),
}
def sort_key(c):
collection, row = c
if collection is None:
return ''
else:
return str(collection.name)
return sorted(rows_by_collection.items(), key=sort_key)
def _events_by_day(self, before, after):
ebd = defaultdict(list)
timezones = set()
add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
settings.DATABASE_REPLICA
).filter(
sales_channels__contains=self.request.sales_channel.identifier
), before, after, ebd, timezones)
add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
)), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
self._multiple_timezones = len(timezones) > 1
return ebd
@method_decorator(cache_page(300), name='dispatch')
class OrganizerIcalDownload(OrganizerViewMixin, View):
def get(self, request, *args, **kwargs):
cutoff = now() - timedelta(days=31)
events = list(
filter_qs_by_attr(
self.request.organizer.events.filter(
Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),
is_public=True,
live=True,
has_subevents=False,
sales_channels__contains=self.request.sales_channel.identifier,
),
request
).order_by(
'date_from'
).prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
events += list(
filter_qs_by_attr(
SubEvent.objects.filter(
Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
is_public=True,
active=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
request
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
).order_by(
'date_from'
)
)
if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):
with language(request.GET.get('locale'), self.request.organizer.settings.region):
cal = get_public_ical(events)
else:
cal = get_public_ical(events)
resp = HttpResponse(cal.serialize(), content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}.ics"'.format(
request.organizer.slug
)
if request.organizer.settings.meta_noindex:
resp['X-Robots-Tag'] = 'noindex'
return resp
|
py
|
1a5d96fa7bccd6c02be9901b9876662ef155c86d
|
import numpy as np
from vector import Vec4
def u_2(r):
"""
Integration variable u_2 --- solution for r = 2u^1 - 1u^2
"""
return 1. - np.sqrt(1.-r)
def u_3(r):
"""
Integration variable u_3 --- solution for r = 3u^2 - 2u^3
"""
x = pow(1.-2.*r+2.*np.sqrt(r*(r-1.)+0.j),1./3.)
y = (2.-(1.-1.j*np.sqrt(3.))/x-(1.+1.j*np.sqrt(3.))*x)/4.
return y.real
def u_4(r):
"""
Integration variable u_4 --- solution for r = 4u^3 - 3u^4
"""
y = pow(r+np.sqrt(r*r*(1-r)+0.j),1./3.)
x = 3./2.*(r/y+y)
y = np.sqrt(1.+x)
z = (1.+y-np.sqrt(2.-x+2./y))/3.
return z.real
def f(x, a, r):
"""
The equation ax^(a-1) - (a-1)x^a - r = 0
To be used as argument in solver
"""
return a*x**(a-1) - (a-1)*x**a - r
def fp(x, a):
"""
First derivative of f
"""
return a*(a-1)*(x**(a-2) - x**(a-1))
def fpp(x, a):
"""
Second derivative of
"""
return a*(a-1)*((a-2)*x**(a-3) - (a-1)*x**(a-2))
def get_u(a, r):
"""
Solve f for u
a = n + 1 -i in Simon's notation
The lowest order case is n=3 and i = 2, i.e. a = 2
"""
if a < 2 : raise Exception("a = {} not implemented".format(a))
from scipy import optimize
if a == 2: return u_2(r)
elif a == 3: return u_3(r)
elif a == 4: return u_4(r)
else:
return optimize.newton(lambda x : f(x, a, r), r, fprime=lambda x: fp(x,a), fprime2=lambda x: fpp(x,a))
def rho(Min, Mout, mext=0.0):
"""
Helper function for mass term eq (5)
"""
M2 = Min*Min
return 0.125 * np.sqrt( (M2 - (Mout+mext)*(Mout+mext)) * (M2 - (Mout-mext)*(Mout-mext))) / M2
def rho_massless(Min, Mout):
"""
Helper function for mass term eq (5)
"""
M2 = Min*Min
M22 = Mout*Mout
return 0.125 * np.sqrt( M2*M2 - 2*M2*M22 + M22*M22) / M2
def generate_point(pa,pb,rans):
# The final momenta
MOM = [ -rans[-1]*pa, -rans[-2]*pb ]
_Q = -MOM[0]-MOM[1]
# Storage of intermediate Masses, Qs
M = [_Q.M()]
ALLQ =[_Q]
U, R = [], [] # Store the u and random numbers r
for i in range(2, NP+1):
# print("now i = {}".format(i))
if i < NP:
# print("Solving for u_{}, M_{}".format(i, i))
r = rans[3*(i-2)+2]
u = get_u(NP+1-i, r)
U.append(u)
R.append(r)
# Simon's paper must be wrong here, check
_M = np.sqrt(u*_Q.M2()) # M_i^2
else:
_M = 0
# print("Got M_{}={}".format(i, _M))
M.append(_M)
q = 4*M[-2] * rho_massless(M[-2], M[-1])
# Random numbers for costheta and phi
costheta = 2*rans[3*(i-2)] - 1
phi = 2.*np.pi*rans[3*(i-2)+1]
# Generated 4 Vectors
# p_(i-1)
sintheta = np.sqrt(1. - costheta*costheta)
p = q*Vec4(1, np.cos(phi)*sintheta, np.sin(phi)*sintheta, costheta)
# print("p_{} = {} {}".format(i+1, p, np.sqrt(abs(p.M2()))))
# now Q_i
_Q = Vec4(np.sqrt(q*q + M[-1]*M[-1]), -p.px, -p.py, -p.pz)
# print("Q_{} = {} {}".format(i, _Q, np.sqrt(abs(_Q.M2()))))
p = ALLQ[i-2].BoostBack(p)
_Q = ALLQ[i-2].BoostBack(_Q)
# print ALLQ[i-2]-_Q-p
# print "p boosted ",p,p.M2()
# print "Q boosted ",_Q,np.sqrt(abs(_Q.M2()))
# print "sum p+Q ",(p+_Q),(p+_Q).M()
MOM.append(p)
ALLQ.append(_Q)
MOM.append(_Q)
return MOM
def generate_weight(pa,pb,mom):
Q = -mom[0]-mom[1]
rans = []
for i in range(2, NP+1):
# print("now i = {}".format(i))
p = Q.Boost(mom[i])
# print 'p = ',p
costh = p[3]/p.P()
phi = p.Phi()
if phi < 0: phi += 2.*np.pi
# print "phi = ",phi
rans.append((1+costh)/2.)
rans.append(phi/(2.*np.pi))
if i < NP:
m = (Q-mom[i]).M2() / Q.M2()
u = f(m, NP+1-i, 0)
# print Q.M2(),(Q-mom[i]).M2(),(mom[3]+mom[4]).M2(),m,u
# print Q
Q -= mom[i]
# print Q
rans.append(u)
else:
_M = 0
rans.append(-(mom[1]*pa)/(pa*pb))
rans.append(-(mom[0]*pb)/(pa*pb))
return rans
def ME_ESW(P):
"""
Calculate the matrix element for g(p1) g(p2) --> g(p3) g(p4) g(p5)
Using eq (7.51) in QCD for collider physics.
P ... list of 4 momentum vectors
"""
from itertools import permutations
permutations=list(permutations([0,1,2,3,4])) # All 120 permutations
# M = const * A * B / C
# A = sum_permutations {1 2} ^ 4
A = 0
B = 0
for i in permutations:
A+= (P[i[0]] * P[i[1]])**4
B+= (P[i[0]] * P[i[1]]) * (P[i[1]] * P[i[2]]) * (P[i[2]] * P[i[3]]) * (P[i[3]] * P[i[4]]) * (P[i[4]] * P[i[0]])
C = 1
for i in range(5):
for j in range(5):
if i <j:
# print("i={}, j={}: {} * {} = {}".format(i, j, P[i], P[j], P[i]*P[j]))
C *= P[i]*P[j]
return A*B/C
def ME_PLB(P):
"""
Calculate the matrix element for g(p1) g(p2) --> g(p3) g(p4) g(p5)
Using eq (18) in Berends et al, Phys Let B 103 (1981) p 124 ff.
P ... list of 4 momentum vectors
"""
from itertools import permutations, combinations
permutations= [
(0,1,2,3,4),
(0,1,2,4,3),
(0,1,3,2,4),
(0,1,3,4,2),
(0,1,4,2,3),
(0,1,4,3,2),
(0,2,1,3,4),
(0,2,1,4,3),
(0,2,3,1,4),
(0,2,4,1,3),
(0,3,1,2,4),
(0,3,2,1,4),
]
kpermutations = list(combinations([0,1,2,3,4], 2))
# M = const * A * B / C
# A = sum_permutations {1 2} ^ 4
A = 0
for i in kpermutations:
A+= (P[i[0]] * P[i[1]])**4
B = 0
for i in permutations:
# print("(k{} * k{})^4".format(i[0]+1, i[1]+1))
B+= (P[i[0]] * P[i[1]]) * (P[i[1]] * P[i[2]]) * (P[i[2]] * P[i[3]]) * (P[i[3]] * P[i[4]]) * (P[i[4]] * P[i[0]])
C = 1
for i in range(5):
for j in range(5):
if i <j:
# print("i={}, j={}: {} * {} = {}".format(i, j, P[i], P[j], P[i]*P[j]))
C *= P[i]*P[j]
return A*B/C
if __name__ == "__main__":
import sys
np.random.seed(1)
pa = Vec4(7000,0,0,7000)
pb = Vec4(7000,0,0,-7000)
if len(sys.argv) <2:
print("Please specify the number of external particles, exiting")
sys.exit(1)
NP = int(sys.argv[1]) # Number of external particles
if NP<3:
print("NP should be >=3 for the whole thing to make sense, exiting")
sys.exit(1)
rans = [ np.random.rand() for i in range(0,3*NP-4+2) ]
moms = generate_point(pa,pb,rans)
msum = Vec4()
for num, mom in enumerate(moms):
msum += mom
print("p_{} = {} {}".format(num+1, mom, mom.M2()))
print("Mom sum {}".format(msum))
ranc = generate_weight(pa,pb,moms)
for r in range(0,len(rans)):
print("r_{} = {} -> dev. {}".format(r, ranc[r], ranc[r]/rans[r]-1))
print("120*Berends: {:.20f}".format(120*ME_PLB(moms)))
print("Ellis: {:.20f}".format(ME_ESW(moms)))
|
py
|
1a5d9882ae1651226651e26aa624aaf024fdfadb
|
#!/usr/bin/env python3
"""Parser for U.S. Energy Information Administration, https://www.eia.gov/ .
Aggregates and standardizes data from most of the US ISOs,
and exposes them via a unified API.
Requires an API key, set in the EIA_KEY environment variable. Get one here:
https://www.eia.gov/opendata/register.php
"""
import datetime
import requests
from dateutil import parser, tz
from .ENTSOE import merge_production_outputs
from .lib.utils import get_token
from .lib.validation import validate
#Reverse exchanges need to be multiplied by -1, since they are reported in the opposite direction
REVERSE_EXCHANGES = [
'US-CA->MX-BC',
'MX-BC->US-CAL-CISO',
'CA-SK->US-CENT-SWPP',
'CA-MB->US-MIDW-MISO',
'CA-ON->US-MIDW-MISO',
'CA-QC->US-NE-ISNE',
'CA-NB->US-NE-ISNE',
'CA-BC->US-NW-BPAT',
'CA-AB->US-NW-NWMT',
'CA-QC->US-NY-NYIS',
'CA-ON->US-NY-NYIS',
'MX-NE->US-TEX-ERCO',
'MX-NO->US-TEX-ERCO',
'US-SW-PNM->US-SW-SRP' # For some reason EBA.SRP-PNM.ID.H exists in EIA, but PNM-SRP does not. Probably because it is unidirectional
]
NEGATIVE_PRODUCTION_THRESHOLDS = {
'default': -10,
'zoneOverrides': {
'US-SW-SRP': {
'coal': -50,
'unknown': -50
},
'US-CAL-CISO': {
'unknown': -50,
'solar': -100
},
'US-SE-AEC': {
'coal': -50,
'gas': -20
},
'US-CAR-CPLE': {
'coal': -20
},
'US-NW-AVRN': {
'wind': -20
}
}
}
EXCHANGES = {
#Old exchanges with old zones, to be updated/removed once clients have had time to switch
'US-CA->MX-BC': 'EBA.CISO-CFE.ID.H',
'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',
'US-SPP->US-TX': 'EBA.SWPP-ERCO.ID.H',
'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',
'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',
'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',
'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H',
#Exchanges to non-US BAs
'MX-BC->US-CAL-CISO': 'EBA.CISO-CFE.ID.H', #Unable to verify if MX-BC is correct
'CA-SK->US-CENT-SWPP': 'EBA.SWPP-SPC.ID.H',
'CA-MB->US-MIDW-MISO': 'EBA.MISO-MHEB.ID.H',
'CA-ON->US-MIDW-MISO': 'EBA.MISO-IESO.ID.H',
'CA-QC->US-NE-ISNE': 'EBA.ISNE-HQT.ID.H',
'CA-NB->US-NE-ISNE': 'EBA.ISNE-NBSO.ID.H',
'CA-BC->US-NW-BPAT': 'EBA.BPAT-BCHA.ID.H',
'CA-AB->US-NW-NWMT': 'EBA.NWMT-AESO.ID.H',
'CA-QC->US-NY-NYIS': 'EBA.NYIS-HQT.ID.H',
'CA-ON->US-NY-NYIS': 'EBA.NYIS-IESO.ID.H',
'MX-NE->US-TEX-ERCO': 'EBA.ERCO-CEN.ID.H', #Unable to verify if MX-NE is correct
'MX-NO->US-TEX-ERCO': 'EBA.ERCO-CFE.ID.H', #Unable to verify if MX-NO is correct
#Exchanges to other US balancing authorities
'US-CAL-BANC->US-NW-BPAT': 'EBA.BANC-BPAT.ID.H',
'US-CAL-BANC->US-CAL-CISO': 'EBA.BANC-CISO.ID.H',
'US-CAL-BANC->US-CAL-TIDC': 'EBA.BANC-TIDC.ID.H',
'US-CAL-CISO->US-SW-AZPS': 'EBA.CISO-AZPS.ID.H',
'US-CAL-CISO->US-NW-BPAT': 'EBA.CISO-BPAT.ID.H',
'US-CAL-CISO->US-CAL-IID': 'EBA.CISO-IID.ID.H',
'US-CAL-CISO->US-CAL-LDWP': 'EBA.CISO-LDWP.ID.H',
'US-CAL-CISO->US-NW-NEVP': 'EBA.CISO-NEVP.ID.H',
'US-CAL-CISO->US-NW-PACW': 'EBA.CISO-PACW.ID.H',
'US-CAL-CISO->US-SW-SRP': 'EBA.CISO-SRP.ID.H',
'US-CAL-CISO->US-CAL-TIDC': 'EBA.CISO-TIDC.ID.H',
'US-CAL-CISO->US-SW-WALC': 'EBA.CISO-WALC.ID.H',
'US-CAL-IID->US-SW-AZPS': 'EBA.IID-AZPS.ID.H',
'US-CAL-IID->US-SW-WALC': 'EBA.IID-WALC.ID.H',
'US-CAL-LDWP->US-SW-AZPS': 'EBA.LDWP-AZPS.ID.H',
'US-CAL-LDWP->US-NW-BPAT': 'EBA.LDWP-BPAT.ID.H',
'US-CAL-LDWP->US-NW-NEVP': 'EBA.LDWP-NEVP.ID.H',
'US-CAL-LDWP->US-NW-PACE': 'EBA.LDWP-PACE.ID.H',
'US-CAL-LDWP->US-SW-WALC': 'EBA.LDWP-WALC.ID.H',
'US-CAR-CPLE->US-CAR-YAD': 'EBA.CPLE-YAD.ID.H',
'US-CAR-CPLE->US-CAR-DUK': 'EBA.CPLE-DUK.ID.H',
'US-CAR-CPLE->US-MIDA-PJM': 'EBA.CPLE-PJM.ID.H',
'US-CAR-CPLE->US-CAR-SCEG': 'EBA.CPLE-SCEG.ID.H',
'US-CAR-CPLE->US-CAR-SC': 'EBA.CPLE-SC.ID.H',
'US-CAR-CPLW->US-CAR-DUK': 'EBA.CPLW-DUK.ID.H',
'US-CAR-CPLW->US-MIDA-PJM': 'EBA.CPLW-PJM.ID.H',
'US-CAR-CPLW->US-TEN-TVA': 'EBA.CPLW-TVA.ID.H',
'US-CAR-DUK->US-CAR-YAD': 'EBA.DUK-YAD.ID.H',
'US-CAR-DUK->US-MIDA-PJM': 'EBA.DUK-PJM.ID.H',
'US-CAR-DUK->US-CAR-SCEG': 'EBA.DUK-SCEG.ID.H',
'US-CAR-DUK->US-CAR-SC': 'EBA.DUK-SC.ID.H',
'US-CAR-DUK->US-SE-SEPA': 'EBA.DUK-SEPA.ID.H',
'US-CAR-DUK->US-SE-SOCO': 'EBA.DUK-SOCO.ID.H',
'US-CAR-DUK->US-TEN-TVA': 'EBA.DUK-TVA.ID.H',
'US-CAR-SC->US-CAR-SCEG': 'EBA.SC-SCEG.ID.H',
'US-CAR-SC->US-SE-SEPA': 'EBA.SC-SEPA.ID.H',
'US-CAR-SC->US-SE-SOCO': 'EBA.SC-SOCO.ID.H',
'US-CAR-SCEG->US-SE-SEPA': 'EBA.SCEG-SEPA.ID.H',
'US-CAR-SCEG->US-SE-SOCO': 'EBA.SCEG-SOCO.ID.H',
'US-CENT-SPA->US-MIDW-AECI': 'EBA.SPA-AECI.ID.H',
'US-CENT-SPA->US-MIDW-MISO': 'EBA.SPA-MISO.ID.H',
'US-CENT-SPA->US-CENT-SWPP': 'EBA.SPA-SWPP.ID.H',
'US-CENT-SWPP->US-MIDW-AECI': 'EBA.SWPP-AECI.ID.H',
'US-CENT-SWPP->US-SW-EPE': 'EBA.SWPP-EPE.ID.H',
'US-CENT-SWPP->US-TEX-ERCO': 'EBA.SWPP-ERCO.ID.H',
'US-CENT-SWPP->US-MIDW-MISO': 'EBA.SWPP-MISO.ID.H',
'US-CENT-SWPP->US-NW-PSCO': 'EBA.SWPP-PSCO.ID.H',
'US-CENT-SWPP->US-SW-PNM': 'EBA.SWPP-PNM.ID.H',
'US-CENT-SWPP->US-NW-WACM': 'EBA.SWPP-WACM.ID.H',
'US-CENT-SWPP->US-NW-WAUW': 'EBA.SWPP-WAUW.ID.H',
'US-FLA-FMPP->US-FLA-FPC': 'EBA.FMPP-FPC.ID.H',
'US-FLA-FMPP->US-FLA-FPL': 'EBA.FMPP-FPL.ID.H',
'US-FLA-FMPP->US-FLA-JEA': 'EBA.FMPP-JEA.ID.H',
'US-FLA-FMPP->US-FLA-TEC': 'EBA.FMPP-TEC.ID.H',
'US-FLA-FPC->US-FLA-TAL': 'EBA.FPC-TAL.ID.H',
'US-FLA-FPC->US-FLA-FPL': 'EBA.FPC-FPL.ID.H',
'US-FLA-FPC->US-FLA-GVL': 'EBA.FPC-GVL.ID.H',
'US-FLA-FPC->US-FLA-SEC': 'EBA.FPC-SEC.ID.H',
'US-FLA-FPC->US-SE-SOCO': 'EBA.FPC-SOCO.ID.H',
'US-FLA-FPC->US-FLA-TEC': 'EBA.FPC-TEC.ID.H',
'US-FLA-FPC->US-FLA-NSB': 'EBA.FPC-NSB.ID.H',
'US-FLA-FPL->US-FLA-HST': 'EBA.FPL-HST.ID.H',
'US-FLA-FPL->US-FLA-GVL': 'EBA.FPL-GVL.ID.H',
'US-FLA-FPL->US-FLA-JEA': 'EBA.FPL-JEA.ID.H',
'US-FLA-FPL->US-FLA-SEC': 'EBA.FPL-SEC.ID.H',
'US-FLA-FPL->US-SE-SOCO': 'EBA.FPL-SOCO.ID.H',
'US-FLA-FPL->US-FLA-TEC': 'EBA.FPL-TEC.ID.H',
'US-FLA-FPL->US-FLA-NSB': 'EBA.FPL-NSB.ID.H',
'US-FLA-JEA->US-FLA-SEC': 'EBA.JEA-SEC.ID.H',
'US-FLA-SEC->US-FLA-TEC': 'EBA.SEC-TEC.ID.H',
'US-FLA-TAL->US-SE-SOCO': 'EBA.TAL-SOCO.ID.H',
'US-MIDA-OVEC->US-MIDW-LGEE': 'EBA.OVEC-LGEE.ID.H',
'US-MIDA-OVEC->US-MIDA-PJM': 'EBA.OVEC-PJM.ID.H',
'US-MIDA-PJM->US-MIDW-LGEE': 'EBA.PJM-LGEE.ID.H',
'US-MIDA-PJM->US-MIDW-MISO': 'EBA.PJM-MISO.ID.H',
'US-MIDA-PJM->US-NY-NYIS': 'EBA.PJM-NYIS.ID.H',
'US-MIDA-PJM->US-TEN-TVA': 'EBA.PJM-TVA.ID.H',
'US-MIDW-AECI->US-MIDW-MISO': 'EBA.AECI-MISO.ID.H',
'US-MIDW-AECI->US-TEN-TVA': 'EBA.AECI-TVA.ID.H',
'US-MIDW-EEI->US-MIDW-LGEE': 'EBA.EEI-LGEE.ID.H',
'US-MIDW-EEI->US-MIDW-MISO': 'EBA.EEI-MISO.ID.H',
'US-MIDW-EEI->US-TEN-TVA': 'EBA.EEI-TVA.ID.H',
'US-MIDW-GLHB->US-MIDW-LGEE': 'EBA.GLHB-LGEE.ID.H',
'US-MIDW-GLHB->US-MIDW-MISO': 'EBA.GLHB-MISO.ID.H',
'US-MIDW-LGEE->US-MIDW-MISO': 'EBA.LGEE-MISO.ID.H',
'US-MIDW-LGEE->US-TEN-TVA': 'EBA.LGEE-TVA.ID.H',
'US-MIDW-MISO->US-SE-AEC': 'EBA.MISO-AEC.ID.H',
'US-MIDW-MISO->US-SE-SOCO': 'EBA.MISO-SOCO.ID.H',
'US-MIDW-MISO->US-TEN-TVA': 'EBA.MISO-TVA.ID.H',
'US-NE-ISNE->US-NY-NYIS': 'EBA.ISNE-NYIS.ID.H',
'US-NW-AVA->US-NW-BPAT': 'EBA.AVA-BPAT.ID.H',
'US-NW-AVA->US-NW-IPCO': 'EBA.AVA-IPCO.ID.H',
'US-NW-AVA->US-NW-NWMT': 'EBA.AVA-NWMT.ID.H',
'US-NW-AVA->US-NW-PACW': 'EBA.AVA-PACW.ID.H',
'US-NW-AVA->US-NW-CHPD': 'EBA.AVA-CHPD.ID.H',
'US-NW-AVA->US-NW-GCPD': 'EBA.AVA-GCPD.ID.H',
'US-NW-AVRN->US-NW-BPAT': 'EBA.AVRN-BPAT.ID.H',
'US-NW-AVRN->US-NW-PACW': 'EBA.AVRN-PACW.ID.H',
'US-NW-BPAT->US-NW-TPWR': 'EBA.BPAT-TPWR.ID.H',
'US-NW-BPAT->US-NW-GRID': 'EBA.BPAT-GRID.ID.H',
'US-NW-BPAT->US-NW-IPCO': 'EBA.BPAT-IPCO.ID.H',
'US-NW-BPAT->US-NW-NEVP': 'EBA.BPAT-NEVP.ID.H',
'US-NW-BPAT->US-NW-NWMT': 'EBA.BPAT-NWMT.ID.H',
'US-NW-BPAT->US-NW-DOPD': 'EBA.BPAT-DOPD.ID.H',
'US-NW-BPAT->US-NW-PACW': 'EBA.BPAT-PACW.ID.H',
'US-NW-BPAT->US-NW-PGE': 'EBA.BPAT-PGE.ID.H',
'US-NW-BPAT->US-NW-CHPD': 'EBA.BPAT-CHPD.ID.H',
'US-NW-BPAT->US-NW-GCPD': 'EBA.BPAT-GCPD.ID.H',
'US-NW-BPAT->US-NW-PSEI': 'EBA.BPAT-PSEI.ID.H',
'US-NW-BPAT->US-NW-SCL': 'EBA.BPAT-SCL.ID.H',
'US-NW-CHPD->US-NW-DOPD': 'EBA.CHPD-DOPD.ID.H',
'US-NW-CHPD->US-NW-PSEI': 'EBA.CHPD-PSEI.ID.H',
'US-NW-GCPD->US-NW-PACW': 'EBA.GCPD-PACW.ID.H',
'US-NW-GCPD->US-NW-PSEI': 'EBA.GCPD-PSEI.ID.H',
'US-NW-GWA->US-NW-NWMT': 'EBA.GWA-NWMT.ID.H',
'US-NW-IPCO->US-NW-NEVP': 'EBA.IPCO-NEVP.ID.H',
'US-NW-IPCO->US-NW-NWMT': 'EBA.IPCO-NWMT.ID.H',
'US-NW-IPCO->US-NW-PACE': 'EBA.IPCO-PACE.ID.H',
'US-NW-IPCO->US-NW-PACW': 'EBA.IPCO-PACW.ID.H',
'US-NW-NEVP->US-NW-PACE': 'EBA.NEVP-PACE.ID.H',
'US-NW-NEVP->US-SW-WALC': 'EBA.NEVP-WALC.ID.H',
'US-NW-NWMT->US-NW-WWA': 'EBA.NWMT-WWA.ID.H',
'US-NW-NWMT->US-NW-PACE': 'EBA.NWMT-PACE.ID.H',
'US-NW-NWMT->US-NW-WAUW': 'EBA.NWMT-WAUW.ID.H',
'US-NW-PACE->US-SW-AZPS': 'EBA.PACE-AZPS.ID.H',
'US-NW-PACE->US-NW-PACW': 'EBA.PACE-PACW.ID.H',
'US-NW-PACE->US-NW-WACM': 'EBA.PACE-WACM.ID.H',
'US-NW-PACW->US-NW-PGE': 'EBA.PACW-PGE.ID.H',
'US-NW-PSCO->US-SW-PNM': 'EBA.PSCO-PNM.ID.H',
'US-NW-PSCO->US-NW-WACM': 'EBA.PSCO-WACM.ID.H',
'US-NW-PSEI->US-NW-TPWR': 'EBA.PSEI-TPWR.ID.H',
'US-NW-PSEI->US-NW-SCL': 'EBA.PSEI-SCL.ID.H',
'US-NW-WACM->US-SW-AZPS': 'EBA.WACM-AZPS.ID.H',
'US-NW-WACM->US-SW-PNM': 'EBA.WACM-PNM.ID.H',
'US-NW-WACM->US-SW-WALC': 'EBA.WACM-WALC.ID.H',
'US-NW-WACM->US-NW-WAUW': 'EBA.WACM-WAUW.ID.H',
'US-SE-AEC->US-SE-SOCO': 'EBA.AEC-SOCO.ID.H',
'US-SE-SEPA->US-SE-SOCO': 'EBA.SEPA-SOCO.ID.H',
'US-SE-SOCO->US-TEN-TVA': 'EBA.SOCO-TVA.ID.H',
'US-SW-AZPS->US-SW-GRMA': 'EBA.AZPS-GRMA.ID.H',
'US-SW-AZPS->US-SW-PNM': 'EBA.AZPS-PNM.ID.H',
'US-SW-AZPS->US-SW-SRP': 'EBA.AZPS-SRP.ID.H',
'US-SW-AZPS->US-SW-TEPC': 'EBA.AZPS-TEPC.ID.H',
'US-SW-AZPS->US-SW-WALC': 'EBA.AZPS-WALC.ID.H',
'US-SW-DEAA->US-SW-SRP': 'EBA.DEAA-SRP.ID.H',
'US-SW-EPE->US-SW-PNM': 'EBA.EPE-PNM.ID.H',
'US-SW-EPE->US-SW-TEPC': 'EBA.EPE-TEPC.ID.H',
'US-SW-GRIF->US-SW-WALC': 'EBA.GRIF-WALC.ID.H',
'US-SW-HGMA->US-SW-SRP': 'EBA.HGMA-SRP.ID.H',
'US-SW-PNM->US-SW-TEPC': 'EBA.PNM-TEPC.ID.H',
'US-SW-PNM->US-SW-SRP': 'EBA.SRP-PNM.ID.H',
'US-SW-SRP->US-SW-TEPC': 'EBA.SRP-TEPC.ID.H',
'US-SW-SRP->US-SW-WALC': 'EBA.SRP-WALC.ID.H',
'US-SW-TEPC->US-SW-WALC': 'EBA.TEPC-WALC.ID.H'
}
# based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48
# or https://www.eia.gov/opendata/qb.php?category=3390101
# List includes regions and Balancing Authorities.
REGIONS = {
#Old regions, to be updated/removed once clients have had time to switch
'US-BPA': 'BPAT',
'US-CA': 'CAL',
'US-CAR': 'CAR',
'US-DUK': 'DUK', #Duke Energy Carolinas
'US-SPP': 'CENT',
'US-FL': 'FLA',
'US-PJM': 'MIDA',
'US-MISO': 'MIDW',
'US-NEISO': 'NE',
'US-NEVP': 'NEVP', #Nevada Power Company
'US-NY': 'NY',
'US-NW': 'NW',
'US-SC': 'SC', #South Carolina Public Service Authority
'US-SE': 'SE',
'US-SEC': 'SEC',
'US-SOCO': 'SOCO', #Southern Company Services Inc - Trans
'US-SWPP': 'SWPP', #Southwest Power Pool
'US-SVERI': 'SW',
'US-TN': 'TEN',
'US-TX': 'TEX',
#New regions - EIA
'US-CAL-BANC': 'BANC', #Balancing Authority Of Northern California
'US-CAL-CISO': 'CISO', #California Independent System Operator
'US-CAL-IID': 'IID', #Imperial Irrigation District
'US-CAL-LDWP': 'LDWP', #Los Angeles Department Of Water And Power
'US-CAL-TIDC': 'TIDC', #Turlock Irrigation District
'US-CAR-CPLE': 'CPLE', #Duke Energy Progress East
'US-CAR-CPLW': 'CPLW', #Duke Energy Progress West
'US-CAR-DUK': 'DUK', #Duke Energy Carolinas
'US-CAR-SC': 'SC', #South Carolina Public Service Authority
'US-CAR-SCEG': 'SCEG', #South Carolina Electric & Gas Company
'US-CAR-YAD': 'YAD', #Alcoa Power Generating, Inc. - Yadkin Division
'US-CENT-SPA': 'SPA', #Southwestern Power Administration
'US-CENT-SWPP': 'SWPP', #Southwest Power Pool
'US-FLA-FMPP': 'FMPP', #Florida Municipal Power Pool
'US-FLA-FPC': 'FPC', #Duke Energy Florida Inc
'US-FLA-FPL': 'FPL', #Florida Power & Light Company
'US-FLA-GVL': 'GVL', #Gainesville Regional Utilities
'US-FLA-HST': 'HST', #City Of Homestead
'US-FLA-JEA': 'JEA', #Jea
'US-FLA-NSB': 'NSB', #New Smyrna Beach, Utilities Commission Of
'US-FLA-SEC': 'SEC', #Seminole Electric Cooperative
'US-FLA-TAL': 'TAL', #City Of Tallahassee
'US-FLA-TEC': 'TEC', #Tampa Electric Company
'US-MIDA-OVEC': 'OVEC', #Ohio Valley Electric Corporation
'US-MIDA-PJM': 'PJM', #Pjm Interconnection, Llc
'US-MIDW-AECI': 'AECI', #Associated Electric Cooperative, Inc.
'US-MIDW-EEI': 'EEI', #Electric Energy, Inc.
'US-MIDW-GLHB': 'GLHB', #GridLiance
'US-MIDW-LGEE': 'LGEE', #Louisville Gas And Electric Company And Kentucky Utilities
'US-MIDW-MISO': 'MISO', #Midcontinent Independent Transmission System Operator, Inc..
'US-NE-ISNE': 'ISNE', #Iso New England Inc.
'US-NW-AVA': 'AVA', #Avista Corporation
'US-NW-AVRN': 'AVRN', #Avangrid Renewables Cooperative
'US-NW-BPAT': 'BPAT', #Bonneville Power Administration
'US-NW-CHPD': 'CHPD', #Public Utility District No. 1 Of Chelan County
'US-NW-DOPD': 'DOPD', #Pud No. 1 Of Douglas County
'US-NW-GCPD': 'GCPD', #Public Utility District No. 2 Of Grant County, Washington
'US-NW-GRID': 'GRID', #Gridforce Energy Management, Llc
'US-NW-GWA': 'GWA', #Naturener Power Watch, Llc (Gwa)
'US-NW-IPCO': 'IPCO', #Idaho Power Company
'US-NW-NEVP': 'NEVP', #Nevada Power Company
'US-NW-NWMT': 'NWMT', #Northwestern Energy (Nwmt)
'US-NW-PACE': 'PACE', #Pacificorp - East
'US-NW-PACW': 'PACW', #Pacificorp - West
'US-NW-PGE': 'PGE', #Portland General Electric Company
'US-NW-PSCO': 'PSCO', #Public Service Company Of Colorado
'US-NW-PSEI': 'PSEI', #Puget Sound Energy
'US-NW-SCL': 'SCL', #Seattle City Light
'US-NW-TPWR': 'TPWR', #City Of Tacoma, Department Of Public Utilities, Light Division
'US-NW-WACM': 'WACM', #Western Area Power Administration - Rocky Mountain Region
'US-NW-WAUW': 'WAUW', #Western Area Power Administration Ugp West
'US-NW-WWA': 'WWA', #Naturener Wind Watch, Llc
'US-NY-NYIS': 'NYIS', #New York Independent System Operator
'US-SE-AEC': 'AEC', #Powersouth Energy Cooperative
'US-SE-SEPA': 'SEPA', #Southeastern Power Administration
'US-SE-SOCO': 'SOCO', #Southern Company Services, Inc. - Trans
'US-SW-AZPS': 'AZPS', #Arizona Public Service Company
'US-SW-DEAA': 'DEAA', #Arlington Valley, Llc - Avba
'US-SW-EPE': 'EPE', #El Paso Electric Company
'US-SW-GRIF': 'GRIF', #Griffith Energy, Llc
'US-SW-GRMA': 'GRMA', #Gila River Power, Llc
'US-SW-HGMA': 'HGMA', #New Harquahala Generating Company, Llc - Hgba
'US-SW-PNM': 'PNM', #Public Service Company Of New Mexico
'US-SW-SRP': 'SRP', #Salt River Project
'US-SW-TEPC': 'TEPC', #Tucson Electric Power Company
'US-SW-WALC': 'WALC', #Western Area Power Administration - Desert Southwest Region
'US-TEN-TVA': 'TVA', #Tennessee Valley Authority
'US-TEX-ERCO': 'ERCO' #Electric Reliability Council Of Texas, Inc.
}
TYPES = {
# 'biomass': 'BM', # not currently supported
'coal': 'COL',
'gas': 'NG',
'hydro': 'WAT',
'nuclear': 'NUC',
'oil': 'OIL',
'unknown': 'OTH',
'solar': 'SUN',
'wind': 'WND',
}
PRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'
PRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'
DEMAND_SERIES = 'EBA.%s-ALL.D.H'
FORECAST_SERIES = 'EBA.%s-ALL.DF.H'
def fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):
return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],
session=session, target_datetime=target_datetime,
logger=logger)
def fetch_production(zone_key, session=None, target_datetime=None, logger=None):
return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],
session=session, target_datetime=target_datetime,
logger=logger)
def fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):
consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],
session=session, target_datetime=target_datetime,
logger=logger)
for point in consumption:
point['consumption'] = point.pop('value')
return consumption
def fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):
mixes = []
for type, code in TYPES.items():
series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)
mix = _fetch_series(zone_key, series, session=session,
target_datetime=target_datetime, logger=logger)
# EIA does not currently split production from the Virgil Summer C
# plant across the two owning/ utilizing BAs:
# US-CAR-SCEG and US-CAR-SC,
# but attributes it all to US-CAR-SCEG
# Here we apply a temporary fix for that until EIA properly splits the production
# This split can be found in the eGRID data,
# https://www.epa.gov/energy/emissions-generation-resource-integrated-database-egrid
SC_VIRGIL_OWNERSHIP = 0.3333333
if zone_key == 'US-CAR-SC' and type == 'nuclear':
series = PRODUCTION_MIX_SERIES % (REGIONS['US-CAR-SCEG'], code)
mix = _fetch_series('US-CAR-SCEG', series, session=session,
target_datetime=target_datetime, logger=logger)
for point in mix:
point.update({
'value': point['value']*SC_VIRGIL_OWNERSHIP
})
if zone_key == 'US-CAR-SCEG' and type == 'nuclear':
for point in mix:
point.update({
'value': point['value']*(1-SC_VIRGIL_OWNERSHIP)
})
if not mix:
continue
for point in mix:
negative_threshold = NEGATIVE_PRODUCTION_THRESHOLDS['zoneOverrides']\
.get(zone_key, {})\
.get(type, NEGATIVE_PRODUCTION_THRESHOLDS['default'])
if type != 'hydro' and \
point['value'] and \
0 > point['value'] >= negative_threshold:
point['value'] = 0
if type == 'hydro' and point['value'] and point['value'] < 0:
point.update({
'production': {},# required by merge_production_outputs()
'storage': {type: point.pop('value')},
})
else:
point.update({
'production': {type: point.pop('value')},
'storage': {}, # required by merge_production_outputs()
})
#replace small negative values (>-5) with 0s This is necessary for solar
point = validate(point, logger=logger, remove_negative=True)
mixes.append(mix)
# Some of the returned mixes could be for older timeframes.
# Fx the latest oil data could be 6 months old.
# In this case we want to discard the old data as we won't be able to merge it
timeframes = [
sorted(map(lambda x: x['datetime'], mix))
for mix in mixes
]
latest_timeframe = max(timeframes, key=lambda x: x[-1])
correct_mixes = []
for mix in mixes:
correct_mix = []
for production_in_mix in mix:
if production_in_mix['datetime'] in latest_timeframe:
correct_mix.append(production_in_mix)
if len(correct_mix) > 0:
correct_mixes.append(correct_mix)
return merge_production_outputs(correct_mixes, zone_key, merge_source='eia.gov')
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))
exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,
target_datetime=target_datetime, logger=logger)
for point in exchange:
point.update({
'sortedZoneKeys': point.pop('zoneKey'),
'netFlow': point.pop('value'),
})
if sortedcodes in REVERSE_EXCHANGES:
point['netFlow'] = -point['netFlow']
return exchange
def _fetch_series(zone_key, series_id, session=None, target_datetime=None,
logger=None):
"""Fetches and converts a data series."""
s = session or requests.Session()
# local import to avoid the exception that happens if EIAPY token is not set
# even if this module is unused
from eiapy import Series
series = Series(series_id=series_id, session=s)
if target_datetime:
utc = tz.gettz('UTC')
#eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ
end = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')
start = (target_datetime.astimezone(utc) - datetime.timedelta(days=1)).strftime('%Y%m%dT%HZ')
raw_data = series.get_data(start=start, end=end)
else:
# Get the last 24 hours available.
raw_data = series.last(24)
# UTC timestamp with no offset returned.
if not raw_data.get('series'):
# Series doesn't exist. Probably requesting a fuel from a region that
# doesn't have any capacity for that fuel type.
return []
return [{
'zoneKey': zone_key,
'datetime': parser.parse(datapoint[0]),
'value': datapoint[1],
'source': 'eia.gov',
} for datapoint in raw_data['series'][0]['data']]
def main():
"Main method, never used by the Electricity Map backend, but handy for testing."
from pprint import pprint
pprint(fetch_consumption_forecast('US-NY'))
pprint(fetch_production('US-SEC'))
pprint(fetch_production_mix('US-TN'))
pprint(fetch_consumption('US-CAR'))
pprint(fetch_exchange('MX-BC', 'US-CA'))
if __name__ == '__main__':
main()
|
py
|
1a5d994b1faa81356e7029fc1886b309eb0b37e4
|
from typing import Union
from uuid import UUID
from getnet.services.plans.plan_response import PlanResponse
from getnet.services.subscriptions.credit import Credit
from getnet.services.subscriptions.customer import Customer
from getnet.services.utils import Device
class Subscription:
seller_id: str
customer_id: str
plan_id: str
order_id: str
credit: Credit
device: Device
def __init__(
self,
order_id: str,
customer_id: Union[Customer, str],
plan_id: Union[PlanResponse, UUID, str],
credit: Union[Credit, dict],
device: Union[Device, dict] = None,
seller_id: str = None,
):
self.customer_id = (
customer_id.customer_id
if isinstance(customer_id, Customer)
else customer_id
)
self.plan_id = (
plan_id.plan_id if isinstance(plan_id, PlanResponse) else str(plan_id)
)
self.order_id = order_id
self.credit = (
credit if isinstance(credit, Credit) or credit is None else Credit(**credit)
)
self.device = (
device if isinstance(device, Device) or device is None else Device(**device)
)
self.seller_id = seller_id
def as_dict(self):
data = {
"seller_id": str(self.seller_id),
"customer_id": str(self.customer_id),
"plan_id": str(self.plan_id),
"order_id": self.order_id,
"subscription": {"payment_type": {"credit": self.credit.as_dict()}},
}
if self.device is not None:
data["devise"] = self.device.as_dict()
return data
|
py
|
1a5d99844654778af12f0730dd9f5345a8e1d18a
|
#!/usr/bin/env python
from aoc import get_input
def run(cmds):
acc = ptr = 0
seen = set()
while ptr < len(cmds):
if ptr in seen:
return acc, False
seen.add(ptr)
cmd, val = cmds[ptr]
if cmd == 'j':
ptr += val
continue
if cmd == 'a':
acc += val
ptr += 1
return acc, True
def main():
with get_input(__file__) as ifile:
cmds = [[line[0], int(line[4:])] for line in ifile]
print(run(cmds)[0]) #1
for i, (cmd, _) in enumerate(cmds): # BRUTE FORCE BAYBEE
if cmd == 'a': continue
old = cmds[i][0]
cmds[i][0] = {'j': 'n', 'n': 'j'}[old]
acc, res = run(cmds)
if res: break
cmds[i][0] = old
print(acc) # 2
if __name__ == '__main__':
main()
|
py
|
1a5d99dea37bfec7350bf0cd9ad3468596cfd839
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 13:05:34 2019
@author: tadahaya
"""
import unittest
import pandas as pd
import os
import sys
import math
from enan.fet import FET
class SampleTest(unittest.TestCase):
CLS_VAL = 'none'
# called when test class initialization
@classmethod
def setUpClass(cls):
if sys.flags.debug:
print('> setUpClass method is called.')
cls.CLS_VAL = '> setUpClass : initialized!'
if sys.flags.debug:
print(cls.CLS_VAL)
# called when test class end
@classmethod
def tearDownClass(cls):
if sys.flags.debug:
print('> tearDownClass method is called.')
cls.CLS_VAL = '> tearDownClass : released!'
if sys.flags.debug:
print(cls.CLS_VAL)
# called when a test method runs
def setUp(self):
if sys.flags.debug:
print(os.linesep + '> setUp method is called.')
self.smpl = FET()
# called when a test method ends
def tearDown(self):
if sys.flags.debug:
print(os.linesep + '> tearDown method is called.')
def _df_checker(self,df):
if type(df)!=pd.core.frame.DataFrame:
return False
elif df.shape[0]==0:
return False
else:
head = df.head(1)
judge = math.isnan(head.iat[0,0])
return not judge
def _sr_checker(self,sr):
if type(sr)!=pd.core.series.Series:
return False
if sr.shape[0]==0:
return False
else:
head = sr.head(1)
judge = math.isnan(head.iat[0])
return not judge
def test_calc(self):
# prepare test patterns
test_patterns = [
("fdr_bh","greater",None), # (arg1, arg2, ..., expected result)
("fdr_bh","two-sided",None), # (arg1, arg2, ..., expected result)
("fdr_bh","less",None), # (arg1, arg2, ..., expected result)
("fdr_bh","greater",3), # (arg1, arg2, ..., expected result)
]
self.smpl.gene(ref="enrichr",species="human")
self.smpl.load_ref(library="KEGG_2019_Human")
self.smpl.set_obj({"abca1","abcg1","abcb11","abcc2","abcc3","abcc4"})
### loop for sweeping all conditions
for tcorr,tmode,tfocus in test_patterns:
with self.subTest(correction=tcorr,mode=tmode,focus=tfocus):
self.assertTrue(self._df_checker(self.smpl.calc(correction=tcorr,mode=tmode,focus=tfocus)))
|
py
|
1a5d9ac977410264e362ef73b45780f064a02b00
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 14:24:59 2019
@author: jdummer
"""
import yaml
yml_lines = []
with open('2019-01-08-oleanna.md', encoding='utf8') as file:
for line in file:
if line.strip() == "---":
break
for line in file:
if line.strip() == "---":
break
else:
yml_lines.append(line)
yml_string = "".join(yml_lines)
print(yaml.load(yml_string))
|
py
|
1a5d9add78a3f7affa35d590357f66535d42007c
|
def extractTeafragranceWordpressCom(item):
'''
Parser for 'teafragrance.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Rock Sugar And Pear Stew', 'Rock Sugar And Pear Stew', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
py
|
1a5d9ba78e66db5d75d816390729fd53b9b50710
|
# Generated by Django 3.1.4 on 2021-01-13 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0014_auto_20210113_2215'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='image',
field=models.ImageField(upload_to='images'),
),
]
|
py
|
1a5d9d03edb7f7bbe0fd5363b9e30fe757e649d5
|
"""
ASGI config for crypto project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crypto.settings')
application = get_asgi_application()
|
py
|
1a5d9e7627dc804b9c4f3d12dc7e317f78bbb4dd
|
__author__ = 'Przemyslaw "Blasto" Wentrys'
|
py
|
1a5da1a8a337ac605f5191619789004df6138735
|
"""
Zamien miejscami sasiadujace bity.
1010
0101
Maska 0xAAAAAAAA ma wszystkie parzyste bity ustawione
Maska 0x55555555 ma wszystkie nieparzyste bity ustawione
robiac AND zerujemy te ktore nie sa ustawione w pierwotnej liczbie
nastepnie przesuwamy w lewo prawo
dodajemy za pomoca OR
"""
def zamien_sasiadow(liczba):
parzyste = liczba & 0xAAAAAAAA
nieparzyste = liczba & 0x55555555
parzyste >>= 1
nieparzyste <<= 1
return parzyste | nieparzyste
if __name__ == "__main__":
liczba = 9131
wynik = 4951
assert zamien_sasiadow(liczba) == wynik
|
py
|
1a5da35ed0c91f4f215113e74d7285dc50e8d9a2
|
#!/usr/bin/env python
"""Script that makes determining PATTERN for a new [2to3] fix much easier.
Figuring out exactly what PATTERN I want for a given fixer class is
getting tedious. This script will step through each possible subtree
for a given string, allowing you to select which one you want. It will
then try to figure out an appropriate pattern to match that tree. This
pattern will require some editing (it will be overly restrictive) but
should provide a solid base to work with and handle the tricky parts.
Usage:
python find_pattern.py "g.throw(E, V, T)"
This will step through each subtree in the parse. To reject a
candidate subtree, hit enter; to accept a candidate, hit "y" and
enter. The pattern will be spit out to stdout.
For example, the above will yield a succession of possible snippets,
skipping all leaf-only trees. I accept
'g.throw(E, V, T)'
This causes find_pattern to spit out
power< 'g' trailer< '.' 'throw' >
trailer< '(' arglist< 'E' ',' 'V' ',' 'T' > ')' > >
Some minor tweaks later, I'm left with
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > >
which is exactly what I was after.
Larger snippets can be placed in a file (as opposed to a command-line
arg) and processed with the -f option.
"""
from __future__ import print_function
__author__ = "Collin Winter <[email protected]>"
# Python imports
import optparse
import sys
from StringIO import StringIO
# Local imports
from lib2to3 import pytree
from lib2to3.pgen2 import driver
from lib2to3.pygram import python_symbols, python_grammar
driver = driver.Driver(python_grammar, convert=pytree.convert)
def main(args):
parser = optparse.OptionParser(usage="find_pattern.py [options] [string]")
parser.add_option("-f", "--file", action="store",
help="Read a code snippet from the specified file")
# Parse command line arguments
options, args = parser.parse_args(args)
if options.file:
tree = driver.parse_file(options.file)
elif len(args) > 1:
tree = driver.parse_stream(StringIO(args[1] + "\n"))
else:
print("You must specify an input file or an input string", file=sys.stderr)
return 1
examine_tree(tree)
return 0
def examine_tree(tree):
for node in tree.post_order():
if isinstance(node, pytree.Leaf):
continue
print(repr(str(node)))
verdict = raw_input()
if verdict.strip():
print(find_pattern(node))
return
def find_pattern(node):
if isinstance(node, pytree.Leaf):
return repr(node.value)
return find_symbol(node.type) + \
"< " + " ".join(find_pattern(n) for n in node.children) + " >"
def find_symbol(sym):
for n, v in python_symbols.__dict__.items():
if v == sym:
return n
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py
|
1a5da444324757c2605bfa13fad2274f03d63794
|
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import tempfile
import threading
import atexit
import warnings
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version != 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action': 'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username': 'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username': 'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username': 'martin', 'email': '[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression, ' and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key] != target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key) != len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type, ' and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression = 'select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this 'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query = 'select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query, ' or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash != server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all', 'set_only', 'modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy == 'set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy == 'modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if 'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if 'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container = 'members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action': 'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data)
|
py
|
1a5da67c6b74115419ea73bf5b2f172dac06b418
|
from pathlib import Path
import pytest
from graeScript.file_explorer.delete_move_dirs import (_validate_args,
_validate_dir,
_validate_dirs)
class TestValidateArgs:
def test_one(self):
user_input = 'replace', 'cancel', 'delete'
num_allowed = 1
with pytest.raises(SystemExit):
assert _validate_args(user_input, num_allowed, 'replace',
'compare', 'delete')
def test_two(self):
user_input = 'make', 'withdraw', 'plan'
num_allowed = 5
assert _validate_args(user_input, num_allowed,
'make', 'withdraw', 'plan', 'deposit',
'draw', 'sample', 'save'
) == ['make', 'withdraw', 'plan']
def test_three(self):
user_input = 'replace', 'cancel'
num_allowed = 1
assert _validate_args(user_input, num_allowed, 'replace',
'compare', 'delete'
) == ['replace']
class TestValidateDir:
home = Path().home()
def test_one(self):
with pytest.raises(SystemExit):
assert _validate_dir('/src/graeScript/data')
def test_two(self):
with pytest.raises(SystemExit):
assert _validate_dir(self.home / 'bananaPaperSmallFakeFolder')
def test_three(self):
assert _validate_dir(str(self.home)) == self.home
class TestValidateDirs:
home = Path().home()
fake_folder = home / 'bananaPaperSmallFakeFolder'
def test_one(self):
assert _validate_dirs('/src/graeScript/data',
str(self.home),
self.fake_folder) == [self.home]
def test_two(self):
with pytest.raises(SystemExit):
assert _validate_dirs('/src/graeScript/data', self.fake_folder)
|
py
|
1a5da6bd87608d5d43123196669382027163c688
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from dlab.fab import *
from dlab.actions_lib import *
import sys
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
print('Generating infrastructure names and tags')
edge_conf = dict()
edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
edge_conf['zone'] = os.environ['gcp_zone']
edge_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
edge_conf['instance_name'] = '{0}-{1}-edge'.format(edge_conf['service_base_name'], edge_conf['edge_user_name'])
logging.info('[STOP EDGE]')
print('[STOP EDGE]')
try:
GCPActions().stop_instance(edge_conf['instance_name'], edge_conf['zone'])
except Exception as err:
append_result("Failed to stop edge.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"instance_name": edge_conf['instance_name'],
"Action": "Stop edge server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
py
|
1a5da72805b9cca87a57fb44c6b723a92d637080
|
import pickle
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import itertools
import functools
from gene_map2 import split_label
def encode_dataset(input_file, gw_map, c_map, f_map, s_map, y_map):
gw_unk = gw_map['<unk>']
c_con = c_map[' ']
c_unk = c_map['<unk>']
dataset = list()
tmpw_gw, tmpc, tmpf, tmps, tmpy = list(), list(), list(), list(), list()
with open(input_file, 'r') as fin:
for line in fin:
if line.isspace() or line.startswith('-DOCSTART-'):
if len(tmpw_gw) > 0:
dataset.append([tmpw_gw, tmpc, tmpf, tmps])
tmpw_gw, tmpc, tmpf, tmps = list(), list(), list(), list()
else:
line = line.split()
tmpw_gw.append(gw_map.get(line[0].lower(), gw_unk))
assert line[-1] != '<eof>'
a, b = split_label(line[-1])
tmpf.append(f_map[a])
tmps.append(s_map[b])
tmpc.append([c_map.get(tup, c_unk) for tup in line[0]])
tmpy.append(y_map[line[-1]])
if len(tmpw_gw) > 0:
dataset.append([tmpw_gw, tmpc, tmpf, tmps, tmpy])
return dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default="../data/ner/eng.train.iobes")
parser.add_argument('--test_file', default="../data/ner/eng.testb.iobes")
parser.add_argument('--dev_file', default="../data/ner/eng.testa.iobes")
parser.add_argument('--input_map', default="../data/conll_map.pk")
parser.add_argument('--output_file', default="../data/ner_dataset.pk")
parser.add_argument('--unk', default='<unk>')
args = parser.parse_args()
with open(args.input_map, 'rb') as f:
p_data = pickle.load(f)
name_list = ['gw_map', 'c_map', 'f_map', 's_map', 'y_map', 'emb_array']
gw_map, c_map, f_map, s_map, y_map, emb_array = [p_data[tup] for tup in name_list]
train_dataset = encode_dataset(args.train_file, gw_map, c_map, f_map, s_map, y_map)
test_dataset = encode_dataset(args.test_file, gw_map, c_map, f_map, s_map, y_map)
dev_dataset = encode_dataset(args.dev_file, gw_map, c_map, f_map, s_map, y_map)
with open(args.output_file, 'wb') as f:
pickle.dump({'gw_map': gw_map, 'c_map': c_map, 'f_map': f_map, 's_map': s_map, 'y_map': y_map,
'emb_array': emb_array, 'train_data': train_dataset, 'test_data': test_dataset, 'dev_data': dev_dataset}, f)
|
py
|
1a5da72e9bac3fb3b2a6b56d684c1c43944f6be7
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@dataclass
class TranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
source_lang: Optional[str] = field(
default=None,
metadata={
"help": "source language",
"argparse_alias": "-s",
},
)
target_lang: Optional[str] = field(
default=None,
metadata={
"help": "target language",
"argparse_alias": "-t",
},
)
load_alignments: bool = field(
default=False, metadata={"help": "load the binarized alignments"}
)
left_pad_source: bool = field(
default=False, metadata={"help": "pad the source on the left"}
)
left_pad_target: bool = field(
default=False, metadata={"help": "pad the target on the left"}
)
max_source_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=-1, metadata={"help": "the amount of upsample primary dataset"}
)
truncate_source: bool = field(
default=False, metadata={"help": "truncate source to max-source-positions"}
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations"
},
)
train_subset: str = II("dataset.train_subset")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
# options for reporting BLEU during validation
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_args: Optional[str] = field(
default="{}",
metadata={
"help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'
},
)
eval_bleu_detok: str = field(
default="space",
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; "
"use 'space' to disable detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: Optional[str] = field(
default="{}",
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE before computing BLEU",
"argparse_const": "@@ ",
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
@register_task("translation", dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
"""
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, cfg: TranslationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
# find language pair automatically
if cfg.source_lang is None or cfg.target_lang is None:
cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])
if cfg.source_lang is None or cfg.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict)))
return cls(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
load_alignments=self.cfg.load_alignments,
truncate_source=self.cfg.truncate_source,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.cfg.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, cfg):
model = super().build_model(cfg)
self.cfg.eval_bleu = False
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum(log.get(key, 0) for log in logging_outputs)
if torch.is_tensor(result):
result = result.cpu()
return result
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
py
|
1a5da96812b410e9860e96f8baee7fe3e9d3b239
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import time
import traceback
from sqlalchemy import event, exc
from airflow.configuration import conf
log = logging.getLogger(__name__)
def setup_event_handlers(engine):
"""Setups event handlers."""
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
if engine.dialect.name == "sqlite":
@event.listens_for(engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# this ensures sanity in mysql when storing datetimes (not required for postgres)
if engine.dialect.name == "mysql":
@event.listens_for(engine, "connect")
def set_mysql_timezone(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("SET time_zone = '+00:00'")
cursor.close()
@event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
if conf.getboolean('debug', 'sqlalchemy_stats', fallback=False):
@event.listens_for(engine, "before_cursor_execute")
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
conn.info.setdefault('query_start_time', []).append(time.perf_counter())
@event.listens_for(engine, "after_cursor_execute")
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
total = time.perf_counter() - conn.info['query_start_time'].pop()
file_name = [
f"'{f.name}':{f.filename}:{f.lineno}"
for f in traceback.extract_stack()
if 'sqlalchemy' not in f.filename
][-1]
stack = [f for f in traceback.extract_stack() if 'sqlalchemy' not in f.filename]
stack_info = ">".join([f"{f.filename.rpartition('/')[-1]}:{f.name}" for f in stack][-3:])
conn.info.setdefault('query_start_time', []).append(time.monotonic())
log.info(
"@SQLALCHEMY %s |$ %s |$ %s |$ %s ",
total,
file_name,
stack_info,
statement.replace("\n", " "),
)
|
py
|
1a5daa8612e15fccad7211af6659baaaf9ef38ea
|
from dataclasses import dataclass
from datetime import timedelta
@dataclass
class RetryProperties:
"""Defines properties that control services client retry behaviour"""
maxAttempts: int = 3
"""
Set the number of attempts before retries are exhausted.
Includes the initial attempt before the retries begin.
"""
initialInterval: timedelta = timedelta(seconds=5)
"""The initial interval before the first retry attempt."""
maxInterval: timedelta = timedelta(minutes=1)
"""The maximum interval between retry attempts."""
|
py
|
1a5daad893ef8eb0d1b14eb109f05fc847f17c73
|
# coding: utf-8
"""
AusSeabed product catalogue
The API description for the Ausseabed product catalogue inventory # noqa: E501
The version of the OpenAPI document: 0.2.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from product_catalogue_py_rest_client.api_client import ApiClient
from product_catalogue_py_rest_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class StylesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def styles_controller_find_all(self, **kwargs): # noqa: E501
"""styles_controller_find_all # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.styles_controller_find_all(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[Style]
"""
kwargs['_return_http_data_only'] = True
return self.styles_controller_find_all_with_http_info(**kwargs) # noqa: E501
def styles_controller_find_all_with_http_info(self, **kwargs): # noqa: E501
"""styles_controller_find_all # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.styles_controller_find_all_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[Style], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method styles_controller_find_all" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/styles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Style]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
py
|
1a5dab09d2c9ca58a52f0400b692e1eba50a8e64
|
# coding: utf-8
#!/usr/bin/python2
import nltk
import os
import codecs
import argparse
import numpy as np
def make_wordvectors():
import gensim
model = gensim.models.FastText.load('data/ko.bin')
with open('data/ko.vec', 'w', encoding='utf8') as f:
for v in model.wv.vocab:
s = v + ' '
arr = model.wv[v]
a = " ".join([str(round(a, 4)) for a in arr])
s += a + '\n'
f.write(s)
if __name__ == "__main__":
make_wordvectors()
print("Done")
|
py
|
1a5dab3948b2e8f6be10cc00cd506a70bc450a8e
|
def main():
hist = {}
for k, a_i in enumerate(a):
if a_i not in hist:
hist[a_i] = 0
hist[a_i] += 1
current_height = 0
carryover = None
for h_i, count in reversed(sorted(hist.items())):
cnt = count
if carryover:
cnt -= 1
current_height += h_i * (cnt // 2)
if cnt % 2 != 0:
current_height += h_i
carryover = h_i
else:
carryover = None
if current_height > h:
return k
return n
if __name__ == '__main__':
n, h = map(int, input().split())
a = list(map(int, input().split()))
print(main())
|
py
|
1a5dab50834ad2ed9b10a7561f6b7436a96cbfaf
|
import numpy as np
import numpy.linalg as la
import numpy.random as npr
import matplotlib.pyplot as plt
import os
import sys
sys.path.append("..")
from pickle_io import pickle_import,pickle_export
folderstr_list = []
folderstr_list.append("1564554983p5677059_1e4")
folderstr_list.append("1564555001p5515425_1e5")
folderstr_list.append("1564555047p6032026_1e6")
folderstr_list.append("1564555255p6612067_1e7")
folderstr_list.append("1564525514p9662921_1e8")
nr_list = [1e4,1e5,1e6,1e7,1e8]
N = len(nr_list)
data_noiseless = []
data_noisy = []
for i,folderstr in enumerate(folderstr_list):
dirname_in = folderstr
filename = 'data_noiseless.pickle'
filename_in = os.path.join(dirname_in,filename)
data_noiseless.append(pickle_import(filename_in))
filename = 'data_noisy.pickle'
filename_in = os.path.join(dirname_in,filename)
data_noisy.append(pickle_import(filename_in))
# Plotting
mean_error_norm_noiseless = np.zeros(N)
mean_error_norm_noisy = np.zeros(N)
mean_error_angle_noiseless = np.zeros(N)
mean_error_angle_noisy = np.zeros(N)
for i in range(N):
mean_error_norm_noiseless[i] = np.mean(data_noiseless[i][4])/la.norm(data_noiseless[0][0])
mean_error_norm_noisy[i] = np.mean(data_noisy[i][4])/la.norm(data_noisy[0][0])
mean_error_angle_noiseless[i] = np.mean(data_noiseless[i][2])
mean_error_angle_noisy[i] = np.mean(data_noisy[i][2])
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif')
plt.figure(figsize=(4,2.2))
#plt.figure(figsize=(7,5))
plt.semilogx(nr_list,mean_error_norm_noiseless,linewidth=4,marker='^',markersize=8,color='tab:blue')
plt.semilogx(nr_list,mean_error_norm_noisy,linewidth=4,marker='o',markersize=8,color='tab:red')
guide_color = 'tab:grey'
plt.semilogx(nr_list,0.1*np.ones(N),color=guide_color,linestyle='--')
#plt.axvline(5*10**5,ymax=0.15,color=guide_color,linestyle='--')
#plt.axvline(10**8,ymax=0.15,color=guide_color,linestyle='--')
plt.yticks(ticks=[0,0.1,0.25,0.50,0.75])
plt.xlabel('Number of rollouts')
plt.ylabel('Normalized gradient estimate error')
plt.ylabel(r'$\|\nabla C(K)-\widehat{\nabla} C_K \|/\|\nabla C(K)\|$')
plt.legend(["Noiseless","Noisy"])
plt.tight_layout()
plt.savefig("plot_gradient_estimation_error.png",dpi=300)
#plt.savefig("fig1alt.png",dpi=300)
#plt.figure()
#plt.semilogx(nr_list,mean_error_angle_noiseless,linewidth=4)
#plt.semilogx(nr_list,mean_error_angle_noisy,linewidth=4)
#plt.xlabel('Number of rollouts')
#plt.ylabel('Gradient estimate error angle (deg)')
#plt.legend(["Noiseless","Noisy"])
|
py
|
1a5daba7b3effc0a5b970360229ee35c5c802dc2
|
from sympy import *
def test_re():
x, y = symbols('xy')
r = Symbol('r', real=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
def test_im():
x, y = symbols('xy')
r = Symbol('r', real=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
def test_abs():
x, y = symbols('xy')
assert sqrt(x**2) == abs(x)
assert abs(x).diff(x) == sign(x)
|
py
|
1a5dabdd3cbdd4f775b91aab7d2035c0cfca1640
|
import logging
import pandas as pd
import numpy as np
def basicPivot(df, key, column, value):
return df.pivot_table(index=[key], columns=[column], values=value, aggfunc=np.median).fillna(0)
def gtex(dbad):
df = dbad.loadGTEX()
logging.info("staticData: DBAdapter:{0}; GTEX: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df.info() #DEBUG
df = basicPivot(df, "protein_id", "tissue_type_detail", "median_tpm")
df.reset_index(drop=False, inplace=True)
logging.info("staticData: GTEX proteins: rows: {0}; cols: {1}".format(df.shape[0], df.shape[1]))
return df
def lincs(dbad):
df = dbad.loadLINCS()
logging.info("staticData: DBAdapter:{0}; LINCS: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df = basicPivot(df, "protein_id", "col_id", "zscore")
df.reset_index(drop=False, inplace=True)
logging.info("staticData: LINCS proteins: rows: {0}; cols: {1}".format(df.shape[0], df.shape[1]))
return df
def ccle(dbad):
df = dbad.loadCCLE()
logging.info("staticData: DBAdapter:{0}; CCLE: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df["col_id"] = (df.cell_id+"_"+df.tissue)
df.col_id = df.col_id.str.replace("[ /,]", "_")
df = df[["protein_id", "col_id", "expression"]].drop_duplicates()
df = basicPivot(df, "protein_id", "col_id", "expression")
df.reset_index(drop=False, inplace=True)
logging.info("staticData: CCLE proteins: rows: {0}; cols: {1}".format(df.shape[0], df.shape[1]))
return df
def hpa(dbad):
#(Why did Oleg use mode not median?)
df = dbad.loadHPA()
logging.debug("staticData ({0}): HPA: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df = df.drop_duplicates()
df.col_id = df.col_id.str.replace("[ /,]", "_")
df = df.rename(columns={'level':'level_str'})
for key,val in df["level_str"].value_counts().iteritems():
logging.debug('\t%s: %6d: %s'%("level_str", val, key))
df["level"] = df.level_str.apply(lambda s: 3 if s=="High" else 2 if s=="Medium" else 1 if s=="Low" else 0 if "Not detected" else 0)
for key,val in df["level"].value_counts().iteritems():
logging.debug('\t%s: %6d: %s'%("level", val, key))
logging.debug("staticData ({0}): HPA: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
#df.info() #DEBUG
df = basicPivot(df, "protein_id", "col_id", "level")
logging.debug("staticData ({0}): HPA: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df.reset_index(drop=False, inplace=True)
return df
###
|
py
|
1a5dacd8b94c93ccf335e6bac5aede916b3b3e87
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('object_position04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [59106432, 60702720]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart, {'object_position': 2})
workbook.close()
self.assertExcelEqual()
|
py
|
1a5dad26a079e7e76ed90b22c955df08b72a2838
|
# -*- coding: utf-8 -*-
import requests, json, re
import time, datetime, os, sys
import getpass
# 环境变量
# 统一认证学号
username = os.environ["USERNAME"]
# username = ''
# 统一认证密码
password = os.environ["PASSWORD"]
# password = ''
# DingTalk的sckey
sckey = os.environ["PUSH_KEY"]
# sckey = ''
# customize address
# todo
def send_message(title='无效', text=''):
if text == '':
requests.post('https://api.zwya.ga/dingtalk/send?token=' + sckey + '&title=健康打卡通知&text=健康打卡通知 \n\n' + title)
else:
requests.post('https://api.zwya.ga/dingtalk/send?card=1&token=' + sckey + '&title=' + title + '&text=' + text)
return
class DaKa(object):
"""Hit card class
Attributes:
username: (str) 浙大统一认证平台用户名(一般为学号)
password: (str) 浙大统一认证平台密码
login_url: (str) 登录url
base_url: (str) 打卡首页url
save_url: (str) 提交打卡url
sess: (requests.Session) 统一的session
"""
def __init__(self):
self.username = username
self.password = password
self.login_url = "https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex"
self.base_url = "https://healthreport.zju.edu.cn/ncov/wap/default/index"
self.save_url = "https://healthreport.zju.edu.cn/ncov/wap/default/save"
self.sess = requests.Session()
def login(self):
"""Login to ZJU platform"""
res = self.sess.get(self.login_url)
execution = re.search('name="execution" value="(.*?)"', res.text).group(1)
res = self.sess.get(url='https://zjuam.zju.edu.cn/cas/v2/getPubKey').json()
n, e = res['modulus'], res['exponent']
encrypt_password = self._rsa_encrypt(self.password, e, n)
data = {
'username': self.username,
'password': encrypt_password,
'execution': execution,
'_eventId': 'submit'
}
res = self.sess.post(url=self.login_url, data=data)
# check if login successfully
if '统一身份认证' in res.content.decode():
raise LoginError('登录失败,请核实账号密码重新登录')
return self.sess
def post(self):
"""Post the hitcard info"""
res = self.sess.post(self.save_url, data=self.info)
return json.loads(res.text)
def get_date(self):
"""Get current date"""
today = datetime.date.today()
return "%4d%02d%02d" %(today.year, today.month, today.day)
def get_info(self, html=None):
"""Get hitcard info, which is the old info with updated new time."""
if not html:
res = self.sess.get(self.base_url)
html = res.content.decode()
try:
old_infos = re.findall(r'oldInfo: ({[^\n]+})', html)
if len(old_infos) != 0:
old_info = json.loads(old_infos[0])
else:
raise RegexMatchError("未发现缓存信息,请先至少手动成功打卡一次再运行脚本")
new_info_tmp = json.loads(re.findall(r'def = ({[^\n]+})', html)[0])
new_id = new_info_tmp['id']
name = re.findall(r'realname: "([^\"]+)",', html)[0]
number = re.findall(r"number: '([^\']+)',", html)[0]
except IndexError as err:
raise RegexMatchError('Relative info not found in html with regex')
except json.decoder.JSONDecodeError as err:
raise DecodeError('JSON decode error')
new_info = old_info.copy()
new_info['id'] = new_id
new_info['name'] = name
new_info['number'] = number
new_info["date"] = self.get_date()
new_info["created"] = round(time.time())
# todo
# new_info['address'] = '浙江省杭州市西湖区余杭塘路866号浙江大学紫金港校区' # 如: 'xx省xx市xx区xx街道xx小区'
# new_info['area'] = '浙江省 杭州市 西湖区' # 如: '浙江省 杭州市 西湖区' 记得中间用空格隔开, 省市区/县名称可以参考 打卡页面->基本信息->家庭所在地 中对应的省市区/县名
# new_info['province'] = new_info['area'].split(' ')[0] # 省名
# new_info['city'] = new_info['area'].split(' ')[1] # 市名
# form change
new_info['jrdqtlqk[]'] = 0
new_info['jrdqjcqk[]'] = 0
new_info['sfsqhzjkk'] = 1 # 是否申领杭州健康码
new_info['sqhzjkkys'] = 1 # 杭州健康吗颜色,1:绿色 2:红色 3:黄色
new_info['sfqrxxss'] = 1 # 是否确认信息属实
new_info['sfymqjczrj'] = 0 # 是否密切接触家人入境
new_info['jcqzrq'] = ""
new_info['gwszdd'] = ""
new_info['szgjcs'] = ""
self.info = new_info
# print(old_info, self.info)
return new_info
def _rsa_encrypt(self, password_str, e_str, M_str):
password_bytes = bytes(password_str, 'ascii')
password_int = int.from_bytes(password_bytes, 'big')
e_int = int(e_str, 16)
M_int = int(M_str, 16)
result_int = pow(password_int, e_int, M_int)
return hex(result_int)[2:].rjust(128, '0')
# Exceptions
class LoginError(Exception):
"""Login Exception"""
pass
class RegexMatchError(Exception):
"""Regex Matching Exception"""
pass
class DecodeError(Exception):
"""JSON Decode Exception"""
pass
def main():
"""Hit card process
"""
start_time = ("\n[Time] %s" %datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# print("🚌 打卡任务启动")
dk = DaKa()
try:
dk.login()
except Exception as err:
# spinner.fail(str(err))
return
# spinner.start(text='正在获取个人信息...')
try:
dk.get_info()
personal_info = ('%s %s同学, 你好~' %(dk.info['number'], dk.info['name']))
except Exception as err:
send_message(title='获取信息失败,请手动打卡,更多信息:', text=str(err))
return
# spinner.start(text='正在为您打卡')
try:
res = dk.post()
if str(res['e']) == '0':
send_message(title='打卡🎈成功!', text=start_time+'\n\n'+personal_info+'\n\n From HealthCheck.')
else:
send_message(title=res['m']+'[Check here](https://healthreport.zju.edu.cn/ncov/wap/default/index)')
except:
send_message(title='数据提交失败')
return
if __name__=="__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
pass
|
py
|
1a5dae93da517bd1a6b53867291ed37fdc879bad
|
###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_Toy_BE_64_default(ArchPcode):
name = 'Toy:BE:64:default'
pcode_arch = 'Toy:BE:64:default'
description = 'Toy (test) processor 64-bit big-endian'
bits = 64
ip_offset = 0x1078
sp_offset = 0x1068
bp_offset = sp_offset
instruction_endness = Endness.BE
register_list = [
Register('r0', 8, 0x1000),
Register('r0h', 4, 0x1000),
Register('r0l', 4, 0x1004),
Register('r1', 8, 0x1008),
Register('r1h', 4, 0x1008),
Register('r1l', 4, 0x100c),
Register('r2', 8, 0x1010),
Register('r2h', 4, 0x1010),
Register('r2l', 4, 0x1014),
Register('r3', 8, 0x1018),
Register('r3h', 4, 0x1018),
Register('r3l', 4, 0x101c),
Register('r4', 8, 0x1020),
Register('r4h', 4, 0x1020),
Register('r4l', 4, 0x1024),
Register('r5', 8, 0x1028),
Register('r5h', 4, 0x1028),
Register('r5l', 4, 0x102c),
Register('r6', 8, 0x1030),
Register('r6h', 4, 0x1030),
Register('r6l', 4, 0x1034),
Register('r7', 8, 0x1038),
Register('r7h', 4, 0x1038),
Register('r7l', 4, 0x103c),
Register('r8', 8, 0x1040),
Register('r8h', 4, 0x1040),
Register('r8l', 4, 0x1044),
Register('r9', 8, 0x1048),
Register('r9h', 4, 0x1048),
Register('r9l', 4, 0x104c),
Register('r10', 8, 0x1050),
Register('r10h', 4, 0x1050),
Register('r10l', 4, 0x1054),
Register('r11', 8, 0x1058),
Register('r11h', 4, 0x1058),
Register('r11l', 4, 0x105c),
Register('r12', 8, 0x1060),
Register('r12h', 4, 0x1060),
Register('r12l', 4, 0x1064),
Register('sp', 8, 0x1068),
Register('sph', 4, 0x1068),
Register('spl', 4, 0x106c),
Register('lr', 8, 0x1070),
Register('lrh', 4, 0x1070),
Register('lrl', 4, 0x1074),
Register('pc', 8, 0x1078, alias_names=('ip',)),
Register('pch', 4, 0x1078),
Register('pcl', 4, 0x107c),
Register('c', 1, 0x1100),
Register('z', 1, 0x1101),
Register('n', 1, 0x1102),
Register('v', 1, 0x1103)
]
register_arch(['toy:be:64:default'], 64, Endness.BE, ArchPcode_Toy_BE_64_default)
|
py
|
1a5daf75ff3525d03215c7f7fdccb1d42cbad35f
|
class Node:
def __init__(self,data):
self.data = data
self.previous = None
self.next = None
class removeDuplicates:
def __init__(self):
self.head = None
self.tail = None
def remove_duplicates(self):
if (self.head == None):
return
else:
current = self.head
while (current!= None):
index = current.next
while (index != None):
if (current.data == index.data):
temp = index
index.previous.next = index.next
if (index.next != None):
index.next.previous = index.previous
temp = None
index = index.next
current = current.next
|
py
|
1a5db05d80628acb7af640b6359068158657dd05
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 11:02:00 2021
@author: Annika
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.io
from datetime import datetime, timedelta
import numpy as np
from read_log_files import *
from read_time_rides import *
def datenum(d):
return 366 + d.toordinal() + (d - datetime.fromordinal(d.toordinal())).total_seconds()/(24*60*60)
#Function to reverse datenum
def datestr(x, tz=None):
dt = datetime.fromordinal(int(x)) + timedelta(days=x%1) - timedelta(days = 366)
return dt
def plot_temp_RH_Klosters_gondola(path_log_file,path_time_rides,path_data_Klosters):
myFmt = mdates.DateFormatter('%H:%M')
start_time = '2019-02-22 08:00:00'
end_time = '2019-02-22 10:00:00'
#Read in data from weather station in Klosters
mat = scipy.io.loadmat(path_data_Klosters)
data_KLA = mat['WS']
T_KLA = data_KLA['T']
T_KLA = T_KLA[0][0][0]
T_KLA = np.array(T_KLA,dtype=np.float)
RH_KLA = data_KLA['RH']
RH_KLA = RH_KLA[0][0][0]
RH_KLA = np.array(RH_KLA,dtype=np.float)
time_KLA = data_KLA['time']
time_KLA = time_KLA[0][0][0]
time_KLA = np.array([datestr(time_KLA[i]) for i in range(len(time_KLA))])
index_KLA = pd.DatetimeIndex(time_KLA)
T_KLA = pd.Series(T_KLA,index = index_KLA)
RH_KLA = pd.Series(RH_KLA,index=index_KLA)
#Read in log file from HOLIMO
log = read_log_file(start_time,end_time,path_log_file)
day_of_month = log['day_of_month'][0]
month = log['month'][0]
year = log['year'][0]
hour = log['hour'][0]
minute = log['minute'][0]
second = log['second'][0]
time_gondel = [str(day_of_month[i])+'/'+str(month[i])+'/'+str(year[i])+' ' +str(hour[i])+':'+str(minute[i])+':'+str(second[i]) for i in range(0,len(month))]
index_gondel = pd.DatetimeIndex(time_gondel)
T_gondel = pd.Series(log['temp'][0],index = index_gondel)
RH_gondel = pd.Series(log['rh'][0],index = index_gondel)
time_gondel = [datenum(index_gondel[i]) for i in range(0,len(index_gondel))]
#Read in time of gondola rides
[start_time_ride,end_time_ride] = read_time_rides(path_time_rides)
#Derive temperature at Gotschnaboden (Gondola at lowest point considered for measurements)
idx_gb = [np.argmin(np.abs(time_gondel-start_time_ride[i])) for i in range(0,len(start_time_ride))]
T_GB=T_gondel[idx_gb]
RH_GB=RH_gondel[idx_gb]
index_GB = index_gondel[idx_gb]
T_GB = pd.Series(T_GB,index=index_GB)
RH_GB = pd.Series(RH_GB,index=index_GB)
#Derive temperature at Gotschnagrat (Gondola at highest point considered for measurements)
idx_gg = [np.argmin(np.abs(time_gondel-end_time_ride[i])) for i in range(0,len(end_time_ride))]
T_GG=T_gondel[idx_gg]
RH_GG=RH_gondel[idx_gg]
index_GG = index_gondel[idx_gg]
T_GG = pd.Series(T_GG,index=index_GG)
RH_GG = pd.Series(RH_GG,index=index_GG)
time_gb = np.array([datestr(start_time_ride[i]) for i in range(len(start_time_ride))])
time_gg = np.array([datestr(end_time_ride[i]) for i in range(len(end_time_ride))])
x_gr = np.column_stack((time_gb,time_gg))
y_gr = np.column_stack((T_GB,T_GG))
y_gr_RH = np.column_stack((RH_GB,RH_GG))
#Melting layer
melting = [0,0]
time_melting = [start_time,end_time]
time_melting = pd.to_datetime(time_melting)
index_melting = pd.DatetimeIndex(time_melting)
melting = pd.Series(melting, index=index_melting)
#Lines for gondel rides
fs=25
f=1
plt.figure(f)
gr = plt.plot(x_gr.transpose(),y_gr.transpose(),color = [0.7, 0.7, 0.7])
gg, = plt.plot(T_GG[start_time:end_time].index,T_GG[start_time:end_time],label='Gotschnagrat 2300m',color = [0,0.447,0.741])
gb, = plt.plot(T_GB[start_time:end_time].index,T_GB[start_time:end_time],label='Gotschnaboden 1700m',color = [0.9290, 0.6940, 0.1250])
kla, = plt.plot(T_KLA[start_time:end_time].index,T_KLA[start_time:end_time],label='Klosters 1200m',color = [0, 0.5, 0])
m = plt.plot(melting[start_time:end_time].index,melting[start_time:end_time],'k')
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(myFmt)
plt.gca().invert_yaxis()
plt.xlim(start_time,end_time)
plt.ylim(4,-3)
plt.xlabel('Time (UTC)',fontsize=fs)
plt.ylabel('Temperature (°C)',fontsize=fs)
plt.tick_params(right=True)
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.show()
f=2
plt.figure(f)
gr = plt.plot(x_gr.transpose(),y_gr_RH.transpose(),color = [0.7, 0.7, 0.7])
gg, = plt.plot(RH_GG[start_time:end_time].index,RH_GG[start_time:end_time],label='Gotschnagrat 2300m',color = [0,0.447,0.741])
gb, = plt.plot(RH_GB[start_time:end_time].index,RH_GB[start_time:end_time],label='Gotschnaboden 1700m',color = [0.9290, 0.6940, 0.1250])
kla, = plt.plot(RH_KLA[start_time:end_time].index,RH_KLA[start_time:end_time],label='Klosters 1200m',color = [0, 0.5, 0])
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(myFmt)
plt.xlim(start_time,end_time)
plt.ylim(75,100)
plt.xlabel('Time (UTC)',fontsize=fs)
plt.ylabel('RH (%)',fontsize=fs)
plt.tick_params(right=True)
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.show()
|
py
|
1a5db11bc632aa2436ec55d2ef9d4b859bbc2a4f
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from decimal import Decimal
from test_framework.address import (
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
program_to_witness,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP
from test_framework.test_framework import LearncoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes, hex_str_to_bytes, sync_blocks, try_rpc
from io import BytesIO
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
class SegWitTest(LearncoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
"-rpcserialversion=0",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=4",
"-rpcserialversion=1",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=536870915",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, use_p2wsh=1, node=node, utxo=getutxo(txid), pubkey=self.pubkey[0], encode_p2sh=False, amount=Decimal("49.998"), sign=sign, insert_redeem_script=redeem_script)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_addr = self.nodes[i].addwitnessaddress(newaddress)
bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False)
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1]))
assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1]))
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify default node can't accept txs with missing witness")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', wit_ids[NODE_2][WIT_V0][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', wit_ids[NODE_2][WIT_V1][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', p2sh_ids[NODE_2][WIT_V0][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', p2sh_ids[NODE_2][WIT_V1][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].getaddressinfo(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3,5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
|
py
|
1a5db1b03c734561f2b15fcdbbfe22998785a483
|
# encoding: utf-8
import click, sys, ldap, getpass
# Defaults
# --------
GROUP_DN = 'cn=All EDRN,dc=edrn,dc=jpl,dc=nasa,dc=gov'
MANAGER_DN = 'uid=admin,ou=system'
SCOPE = 'one'
URL = 'ldaps://edrn-ds.jpl.nasa.gov'
USER_BASE = 'dc=edrn,dc=jpl,dc=nasa,dc=gov'
USER_CLASS = 'edrnPerson'
# Map from command-line to ldap constants
# ---------------------------------------
_scopes = {
'base': ldap.SCOPE_BASE,
'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE
}
# Let's do it
# -----------
@click.command()
@click.option('--url', default=URL, help='URL to the LDAP server')
@click.option('--manager', default=MANAGER_DN, help='DN of the LDAP admin account')
@click.option('--password', help='Password of the LDAP admin account; if not given you will be prompted')
@click.option('--userbase', default=USER_BASE, help='Base DN where users are found')
@click.option('--scope', default=SCOPE, help='Search scope to find users', type=click.Choice(['base', 'one', 'sub']))
@click.option('--userclass', default=USER_CLASS, help='Object class to determine users')
@click.option('--group', default=GROUP_DN, help='DN of group to update')
def main(url: str, manager: str, password: str, userbase: str, scope: str, userclass: str , group: str):
if not password:
password = getpass.getpass()
connection = ldap.initialize(url)
connection.simple_bind_s(manager, password)
allUsers = connection.search_s(userbase, _scopes[scope], '(objectClass={})'.format(userclass), [], attrsonly=1)
allUsers = set([i[0] for i in allUsers])
currentMembers = connection.search_s(group, ldap.SCOPE_BASE, '(objectClass=*)', ['uniquemember'])
currentMembers = set([str(i, 'utf-8') for i in currentMembers[0][1]['uniquemember']])
usersToAdd = allUsers - currentMembers
membersToRemove = currentMembers - allUsers
if usersToAdd:
connection.modify_s(group, [(ldap.MOD_ADD, 'uniquemember', [i.encode('utf-8') for i in usersToAdd])])
if membersToRemove:
connection.modify_s(group, [(ldap.MOD_DELETE, 'uniquemember', [i.encode('utf-8') for i in membersToRemove])])
sys.exit(0)
|
py
|
1a5db2c55a9f2e6df6a8f79395a747c1b29d0a3d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os,re
from setuptools import setup, find_packages
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, "__init__.py")) as f:
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
def get_long_description():
"""
Return the README.
"""
with open("README.md", encoding="utf8") as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))
]
setup(
name="{{cookiecutter.project_name}}",
version=get_version("{{cookiecutter.project_slug}}"),
python_requires=">=3.6",
license="BSD",
description="{{cookiecutter.project_short_description}}",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
author_email="{{ cookiecutter.email }}",
packages=get_packages("{{cookiecutter.project_slug}}"),
# package_data={"databases": ["py.typed"]},
# data_files=[("", ["LICENSE.md"])],
install_requires=[
"starlette>=0.13.0",
"websockets==8.0.1",
'dalchemy @ git+https://github.com/Tuteria/shared_lib.git@master',
],
extras_require={},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
zip_safe=False,
)
|
py
|
1a5db3a30ff1e598fa445c7411962ca5c9548beb
|
edad = 17
hambre = edad
dinero = 2000
helados_consumidos = 0
precio_helado = 100
while (hambre) < 85 and (dinero-precio_helado) > 0:
dinero = dinero - precio_helado
precio_helado = precio_helado + (precio_helado * 0.2)
helados_consumidos = helados_consumidos + 1
hambre = hambre + edad
if (hambre + edad) >= 100:
break
print("Helados consumidos:" + str(helados_consumidos))
print("Dinero restante:" + str(dinero))
print("Nivel de saciedad:" + str(hambre))
|
py
|
1a5db3c0698b2e9e752f6005935df28421a2b98f
|
# MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os
from pathlib import Path
import pytest
from flexmock import flexmock
from ogr import GithubService, GitlabService
from packit.config import JobConfigTriggerType
from packit_service.config import ServiceConfig
from packit_service.models import JobTriggerModelType
from packit_service.service.events import (
PullRequestGithubEvent,
PushGitHubEvent,
ReleaseEvent,
MergeRequestGitlabEvent,
)
from packit_service.worker.parser import Parser
from tests.spellbook import SAVED_HTTPD_REQS, DATA_DIR
@pytest.fixture(scope="session", autouse=True)
def global_service_config():
"""
This config will be used instead of the one loaded from the local config file.
You can still mock/overwrite the service config content in your tests
but this one will be used by default.
"""
service_config = ServiceConfig()
service_config.services = {
GithubService(token="token"),
GitlabService(token="token"),
}
service_config.dry_run = False
service_config.server_name = "localhost"
service_config.github_requests_log_path = "/path"
ServiceConfig.service_config = service_config
@pytest.fixture()
def dump_http_com():
"""
This fixture is able to dump whole http traffic of a single test case
so that no http comm is happening while testing
Usage:
1. add it to your test case and pass the test path
def test_something(dump_http_com):
service_config = dump_http_com(f"{Path(__file__).name}/pr_handle.yaml")
2. Run your test
GITHUB_TOKEN=asdqwe pytest-3 -k test_something
3. Your http communication should now be stored in tests/data/http-requests/{path}
4. Once you rerun the tests WITHOUT the token, the offline communication should be picked up
"""
def f(path: str):
""" path points to a file where the http communication will be saved """
conf = ServiceConfig()
# TODO: add pagure support
# conf._pagure_user_token = os.environ.get("PAGURE_TOKEN", "test")
# conf._pagure_fork_token = os.environ.get("PAGURE_FORK_TOKEN", "test")
conf._github_token = os.getenv("GITHUB_TOKEN", None)
conf.dry_run = True
target_path: Path = SAVED_HTTPD_REQS / path
target_path.parent.mkdir(parents=True, exist_ok=True)
conf.github_requests_log_path = str(target_path)
return conf
return f
def copr_build_model(
repo_name="bar",
repo_namespace="foo",
forge_instance="github.com",
job_config_trigger_type=JobConfigTriggerType.pull_request,
job_trigger_model_type=JobTriggerModelType.pull_request,
**trigger_model_kwargs,
):
project_model = flexmock(
repo_name=repo_name,
namespace=repo_namespace,
project_url=f"https://{forge_instance}/{repo_namespace}/{repo_name}",
)
pr_model = flexmock(
id=1,
pr_id=123,
project=project_model,
job_config_trigger_type=job_config_trigger_type,
**trigger_model_kwargs,
)
trigger_model = flexmock(
id=2,
type=job_trigger_model_type,
trigger_id=1,
get_trigger_object=lambda: pr_model,
)
return flexmock(
id=1,
build_id="1",
commit_sha="0011223344",
project_name="some-project",
owner="some-owner",
web_url="https://some-url",
target="some-target",
status="some-status",
srpm_build=flexmock(logs="asdsdf", url=None),
job_trigger=trigger_model,
)
@pytest.fixture(scope="module")
def copr_build_pr():
return copr_build_model()
@pytest.fixture()
def koji_build_pr():
project_model = flexmock(
repo_name="bar", namespace="foo", project_url="https://github.com/foo/bar"
)
pr_model = flexmock(
id=1,
pr_id=123,
project=project_model,
job_config_trigger_type=JobConfigTriggerType.pull_request,
)
trigger_model = flexmock(
id=2,
type=JobTriggerModelType.pull_request,
trigger_id=1,
get_trigger_object=lambda: pr_model,
)
koji_build_model = flexmock(
id=1,
build_id="1",
commit_sha="0011223344",
project_name="some-project",
owner="some-owner",
web_url="https://some-url",
target="some-target",
status="some-status",
srpm_build=flexmock(logs="asdsdf"),
job_trigger=trigger_model,
)
return koji_build_model
@pytest.fixture(scope="module")
def github_release_webhook() -> dict:
with open(DATA_DIR / "webhooks" / "github" / "release.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def release_event(github_release_webhook) -> ReleaseEvent:
return Parser.parse_release_event(github_release_webhook)
@pytest.fixture(scope="module")
def github_pr_webhook():
with open(DATA_DIR / "webhooks" / "github" / "pr.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def github_push_webhook():
with open(DATA_DIR / "webhooks" / "github" / "push_branch.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def github_pr_event(github_pr_webhook) -> PullRequestGithubEvent:
return Parser.parse_pr_event(github_pr_webhook)
@pytest.fixture(scope="module")
def github_push_event(github_push_webhook) -> PushGitHubEvent:
return Parser.parse_push_event(github_push_webhook)
@pytest.fixture(scope="module")
def gitlab_mr_webhook():
with open(DATA_DIR / "webhooks" / "gitlab" / "mr_event.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def gitlab_mr_event(gitlab_mr_webhook) -> MergeRequestGitlabEvent:
return Parser.parse_mr_event(gitlab_mr_webhook)
@pytest.fixture
def cache_clear(request):
"""
Fixture which cleans lru_cache of functions defined in module variable CACHE_CLEAR.
This allows reliable test results.
:return:
"""
if getattr(request.module, "CACHE_CLEAR", None):
[f.cache_clear() for f in getattr(request.module, "CACHE_CLEAR")]
|
py
|
1a5db46af507263ce96d893f01097c727c515aeb
|
"""
Django settings for assignment project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gw=a_++!+7%64@bb!f+47()y4+qw#6j%8t*5qy@4epcqpt-2$#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'assignment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'assignment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/api/archive/get/'
|
py
|
1a5db46da7082cf0928ed5d8fa2f9b805c9adf78
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='4EIgRS3BINfpj8mFVlolNSa914hNKAgdKSxHDeO5Ym4dU2GLOzq5yTdvaisW6xWL')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
|
py
|
1a5db4a374a596e70d3c68bd014a90ebc2f16edd
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.common.net.luci_auth import LuciAuth
class LuciAuthTest(unittest.TestCase):
def test_run_on_linux(self):
host = MockHost(os_name='linux')
host.filesystem.maybe_make_directory(
'/mock-checkout/third_party/depot_tools')
luci_auth = LuciAuth(host)
luci_auth.get_access_token()
self.assertListEqual(
host.executive.calls,
[['/mock-checkout/third_party/depot_tools/luci-auth', 'token']])
def test_run_on_windows(self):
host = MockHost(os_name='win')
host.filesystem.maybe_make_directory(
'/mock-checkout/third_party/depot_tools')
luci_auth = LuciAuth(host)
luci_auth.get_access_token()
self.assertEqual(
host.executive.calls,
[['/mock-checkout/third_party/depot_tools/luci-auth.bat', 'token']
])
|
py
|
1a5db5237de410d311cd0094aea5844830bb3ab4
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if 'RACK_ENV' in os.environ:
RACK_ENV = os.environ.get("RACK_ENV")
print 'RACK_ENV: ', RACK_ENV
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pinry.settings."+RACK_ENV)
else:
print 'RACK_ENV not detected using development settings'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pinry.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py
|
1a5db590ed71c32569807eb52ff727c002a80df8
|
def add_all(a):
r = t = 0
while len(a) > 1:
a = sorted(a)
r = sum(a[:2])
t += r
a = a[2:] + [r]
return t
|
py
|
1a5db6a1a9b32cdfa20cc8df9abef9c5fac1616f
|
import os
import re
import threading
import unittest
import pytest
from packaging import version
from localstack.services.install import TERRAFORM_BIN, install_terraform
from localstack.utils.aws import aws_stack
from localstack.utils.common import is_command_available, rm_rf, run, start_worker_thread
BUCKET_NAME = "tf-bucket"
QUEUE_NAME = "tf-queue"
QUEUE_ARN = "arn:aws:sqs:us-east-1:000000000000:tf-queue"
# lambda Testing Variables
LAMBDA_NAME = "tf-lambda"
LAMBDA_ARN = f"arn:aws:lambda:us-east-1:000000000000:function:{LAMBDA_NAME}"
LAMBDA_HANDLER = "DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler"
LAMBDA_RUNTIME = "dotnetcore2.0"
LAMBDA_ROLE = "arn:aws:iam::000000000000:role/iam_for_lambda"
INIT_LOCK = threading.RLock()
def check_terraform_version():
if not is_command_available(TERRAFORM_BIN):
return False, None
ver_string = run([TERRAFORM_BIN, "-version"])
ver_string = re.search(r"v(\d+\.\d+\.\d+)", ver_string).group(1)
if ver_string is None:
return False, None
return version.parse(ver_string) < version.parse("0.15"), ver_string
class TestTerraform(unittest.TestCase):
@classmethod
def setUpClass(cls):
with INIT_LOCK:
available, version = check_terraform_version()
if not available:
msg = "could not find a compatible version of terraform"
if version:
msg += f" (version = {version})"
else:
msg += " (command not found)"
return pytest.skip(msg)
run("cd %s; %s apply -input=false tfplan" % (cls.get_base_dir(), TERRAFORM_BIN))
@classmethod
def tearDownClass(cls):
run("cd %s; %s destroy -auto-approve" % (cls.get_base_dir(), TERRAFORM_BIN))
@classmethod
def init_async(cls):
def _run(*args):
with INIT_LOCK:
install_terraform()
base_dir = cls.get_base_dir()
if not os.path.exists(os.path.join(base_dir, ".terraform", "plugins")):
run("cd %s; %s init -input=false" % (base_dir, TERRAFORM_BIN))
# remove any cache files from previous runs
for tf_file in [
"tfplan",
"terraform.tfstate",
"terraform.tfstate.backup",
]:
rm_rf(os.path.join(base_dir, tf_file))
# create TF plan
run("cd %s; %s plan -out=tfplan -input=false" % (base_dir, TERRAFORM_BIN))
start_worker_thread(_run)
@classmethod
def get_base_dir(cls):
return os.path.join(os.path.dirname(__file__), "terraform")
@pytest.mark.skip_offline
def test_bucket_exists(self):
s3_client = aws_stack.connect_to_service("s3")
response = s3_client.head_bucket(Bucket=BUCKET_NAME)
self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
cors = {
"AllowedHeaders": ["*"],
"AllowedMethods": ["GET", "PUT", "POST"],
"AllowedOrigins": ["*"],
"ExposeHeaders": ["ETag", "x-amz-version-id"],
"MaxAgeSeconds": 3000,
}
response = s3_client.get_bucket_cors(Bucket=BUCKET_NAME)
self.assertEqual(cors, response["CORSRules"][0])
response = s3_client.get_bucket_versioning(Bucket=BUCKET_NAME)
self.assertEqual("Enabled", response["Status"])
@pytest.mark.skip_offline
def test_sqs(self):
sqs_client = aws_stack.connect_to_service("sqs")
queue_url = sqs_client.get_queue_url(QueueName=QUEUE_NAME)["QueueUrl"]
response = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"])
self.assertEqual("90", response["Attributes"]["DelaySeconds"])
self.assertEqual("2048", response["Attributes"]["MaximumMessageSize"])
self.assertEqual("86400", response["Attributes"]["MessageRetentionPeriod"])
self.assertEqual("10", response["Attributes"]["ReceiveMessageWaitTimeSeconds"])
@pytest.mark.skip_offline
def test_lambda(self):
lambda_client = aws_stack.connect_to_service("lambda")
response = lambda_client.get_function(FunctionName=LAMBDA_NAME)
self.assertEqual(LAMBDA_NAME, response["Configuration"]["FunctionName"])
self.assertEqual(LAMBDA_HANDLER, response["Configuration"]["Handler"])
self.assertEqual(LAMBDA_RUNTIME, response["Configuration"]["Runtime"])
self.assertEqual(LAMBDA_ROLE, response["Configuration"]["Role"])
@pytest.mark.skip_offline
def test_event_source_mapping(self):
lambda_client = aws_stack.connect_to_service("lambda")
all_mappings = lambda_client.list_event_source_mappings(
EventSourceArn=QUEUE_ARN, FunctionName=LAMBDA_NAME
)
function_mapping = all_mappings.get("EventSourceMappings")[0]
assert function_mapping["FunctionArn"] == LAMBDA_ARN
assert function_mapping["EventSourceArn"] == QUEUE_ARN
@pytest.mark.skip_offline
def test_apigateway(self):
apigateway_client = aws_stack.connect_to_service("apigateway")
rest_apis = apigateway_client.get_rest_apis()
rest_id = None
for rest_api in rest_apis["items"]:
if rest_api["name"] == "test-tf-apigateway":
rest_id = rest_api["id"]
break
self.assertTrue(rest_id)
resources = apigateway_client.get_resources(restApiId=rest_id)["items"]
# We always have 1 default root resource (with path "/")
self.assertEqual(3, len(resources))
res1 = [r for r in resources if r.get("pathPart") == "mytestresource"]
self.assertTrue(res1)
self.assertEqual("/mytestresource", res1[0]["path"])
self.assertEqual(2, len(res1[0]["resourceMethods"]))
self.assertEqual("MOCK", res1[0]["resourceMethods"]["GET"]["methodIntegration"]["type"])
res2 = [r for r in resources if r.get("pathPart") == "mytestresource1"]
self.assertTrue(res2)
self.assertEqual("/mytestresource1", res2[0]["path"])
self.assertEqual(2, len(res2[0]["resourceMethods"]))
self.assertEqual(
"AWS_PROXY", res2[0]["resourceMethods"]["GET"]["methodIntegration"]["type"]
)
self.assertTrue(res2[0]["resourceMethods"]["GET"]["methodIntegration"]["uri"])
@pytest.mark.skip_offline
def test_route53(self):
route53 = aws_stack.connect_to_service("route53")
response = route53.create_hosted_zone(Name="zone123", CallerReference="ref123")
self.assertEqual(201, response["ResponseMetadata"]["HTTPStatusCode"])
change_id = response.get("ChangeInfo", {}).get("Id", "change123")
response = route53.get_change(Id=change_id)
self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
@pytest.mark.skip_offline
def test_acm(self):
acm = aws_stack.connect_to_service("acm")
certs = acm.list_certificates()["CertificateSummaryList"]
certs = [c for c in certs if c.get("DomainName") == "example.com"]
self.assertEqual(1, len(certs))
@pytest.mark.skip_offline
def test_apigateway_escaped_policy(self):
apigateway_client = aws_stack.connect_to_service("apigateway")
rest_apis = apigateway_client.get_rest_apis()
service_apis = []
for rest_api in rest_apis["items"]:
if rest_api["name"] == "service_api":
service_apis.append(rest_api)
self.assertEqual(1, len(service_apis))
@pytest.mark.skip_offline
def test_dynamodb(self):
def _table_exists(tablename, dynamotables):
return any(name for name in dynamotables["TableNames"] if name == tablename)
dynamo_client = aws_stack.connect_to_service("dynamodb")
tables = dynamo_client.list_tables()
self.assertTrue(_table_exists("tf_dynamotable1", tables))
self.assertTrue(_table_exists("tf_dynamotable2", tables))
self.assertTrue(_table_exists("tf_dynamotable3", tables))
|
py
|
1a5db6b9dacac668e164c45ba5b7c30f457574be
|
from utils import utils
class AskComplexGraph():
filepath = "../outputs/v4/intents/"
name = 'Ask for complex graph'
def __init__(self, database, table):
self.database = database
self.table = table
@property
def intent(self):
intent = {
"id":"7c308982-f0d2-4129-b5d4-5f01d21545b8",
"name":"Ask for complex graph",
"auto":True,
"contexts":[
],
"responses":[
{
"resetContexts":False,
"affectedContexts":[
{
"name":"Twovargraph-followup",
"parameters":{
},
"lifespan":2
}
],
"parameters":[
{
"id":"3bb0df88-f36d-42eb-be60-08c021ef469d",
"required":True,
"dataType":"@columns_select",
"name":"columns_select",
"value":"$columns_select",
"promptMessages":[
],
"noMatchPromptMessages":[
],
"noInputPromptMessages":[
],
"outputDialogContexts":[
],
"isList":True
},
{
"id":"26e56b0a-03b7-4ceb-8322-5b4d9089d231",
"required":False,
"dataType":"",
"name":"tablename",
"value":self.table,
"promptMessages":[
],
"noMatchPromptMessages":[
],
"noInputPromptMessages":[
],
"outputDialogContexts":[
],
"isList":False
},
{
"id":"ba50e724-051b-4c8d-aacf-23b97fea00f0",
"required":False,
"dataType":"",
"name":"databasename",
"value":self.database,
"promptMessages":[
],
"noMatchPromptMessages":[
],
"noInputPromptMessages":[
],
"outputDialogContexts":[
],
"isList":False
}
],
"messages":[
],
"defaultResponsePlatforms":{
},
"speech":[
]
}
],
"priority":500000,
"webhookUsed":True,
"webhookForSlotFilling":False,
"fallbackIntent":False,
"events":[
],
"conditionalResponses":[
],
"condition":"",
"conditionalFollowupEvents":[
]
}
return intent
@property
def usersays(self):
us = [
{
"id":"41f24594-9c05-4797-9c02-bdfef39b8a2e",
"data":[
{
"text":"graph ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":", ",
"userDefined":False
},
{
"text":"three",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"58ed5d98-560c-4758-88ab-c50e5fd627ac",
"data":[
{
"text":"graph ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"5063647f-b26c-419a-bb2f-af018a191dec",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" and ",
"userDefined":False
},
{
"text":"three",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"9b9bc170-b480-4e56-8e22-fa6a3976bc06",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" and ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"15f1fa59-af6a-4bdc-b122-ba787d70aae4",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"34ec4914-f864-4d2d-9c9d-cb6f042e78c1",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":", ",
"userDefined":False
},
{
"text":"three",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"9c8e394c-7a00-42d1-8a56-f51833413f53",
"data":[
{
"text":"graph",
"userDefined":False
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"b6e5447b-0460-4280-989e-efa128fd2ef4",
"data":[
{
"text":"localizacion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"localizacion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"07de519b-3f1f-4e92-84a5-5df1647bc275",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"direccion_coordenada_x",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"direccion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"f1b88e4d-420e-47df-ad71-d43ceb637657",
"data":[
{
"text":"I want a graph with ",
"userDefined":False
},
{
"text":"direccion_coordenada_x",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"direccion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"01e193cd-906e-4bde-a574-ba08b768d4de",
"data":[
{
"text":"view graph with ",
"userDefined":False
},
{
"text":"direccion_coordenada_x",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"direccion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
}
]
return us
@property
def database(self):
return self.__database
@property
def table(self):
return self.__table
@database.setter
def database(self, database):
self.__database = database
@table.setter
def table(self, table):
self.__table = table
def writeToFile(self):
utils.writeToFile(self.intent, self.filepath + self.name + '.json')
utils.writeToFile(self.usersays, self.filepath + self.name + '_usersays_en.json')
|
py
|
1a5db6d5288990e8bb77a5c265fb559f900ac82d
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
CONFIG_DEBUG = 'Debug'
CONFIG_RELEASE = 'Release'
class SkiaVarsApi(recipe_api.RecipeApi):
def setup(self):
"""Prepare the variables."""
# Setup
self.builder_name = self.m.properties['buildername']
self.slave_dir = self.m.path['start_dir']
# Special input/output directories.
self.build_dir = self.slave_dir.join('build')
self.default_env = self.m.context.env
self.default_env['CHROME_HEADLESS'] = '1'
self.default_env['PATH'] = self.m.path.pathsep.join([
self.default_env.get('PATH', '%(PATH)s'),
str(self.m.bot_update._module.PACKAGE_REPO_ROOT),
])
self.cache_dir = self.slave_dir.join('cache')
self.swarming_out_dir = self.slave_dir.join(
self.m.properties['swarm_out_dir'])
self.tmp_dir = self.m.path['start_dir'].join('tmp')
self.builder_cfg = self.m.builder_name_schema.DictForBuilderName(
self.builder_name)
self.role = self.builder_cfg['role']
if self.role in [self.m.builder_name_schema.BUILDER_ROLE_HOUSEKEEPER,
self.m.builder_name_schema.BUILDER_ROLE_CALMBENCH]:
self.configuration = CONFIG_RELEASE
else:
self.configuration = self.builder_cfg.get('configuration', CONFIG_DEBUG)
arch = (self.builder_cfg.get('arch') or self.builder_cfg.get('target_arch'))
if ('Win' in self.builder_cfg.get('os', '') and arch == 'x86_64'):
self.configuration += '_x64'
self.extra_tokens = []
if len(self.builder_cfg.get('extra_config', '')) > 0:
if self.builder_cfg['extra_config'].startswith('SK'):
assert self.builder_cfg['extra_config'].isupper()
self.extra_tokens = [self.builder_cfg['extra_config']]
else:
self.extra_tokens = self.builder_cfg['extra_config'].split('_')
self.patch_storage = self.m.properties.get('patch_storage', 'gerrit')
self.issue = None
self.patchset = None
self.is_trybot = False
if (self.m.properties.get('patch_issue', '') and
self.m.properties.get('patch_set', '')):
self.is_trybot = True
self.issue = self.m.properties['patch_issue']
self.patchset = self.m.properties['patch_set']
self._swarming_bot_id = None
self._swarming_task_id = None
# Internal bot support.
self.internal_hardware_label = (
self.m.properties.get('internal_hardware_label'))
self.is_internal_bot = self.internal_hardware_label is not None
@property
def is_linux(self):
return 'Ubuntu' in self.builder_name or 'Debian' in self.builder_name
@property
def swarming_bot_id(self):
if not self._swarming_bot_id:
self._swarming_bot_id = self.m.python.inline(
name='get swarming bot id',
program='''import os
print os.environ.get('SWARMING_BOT_ID', '')
''',
stdout=self.m.raw_io.output()).stdout.rstrip()
return self._swarming_bot_id
@property
def swarming_task_id(self):
if not self._swarming_task_id:
self._swarming_task_id = self.m.python.inline(
name='get swarming task id',
program='''import os
print os.environ.get('SWARMING_TASK_ID', '')
''',
stdout=self.m.raw_io.output()).stdout.rstrip()
return self._swarming_task_id
|
py
|
1a5db761b32385d62ee3c67b65df9a36f3b56254
|
from turtle import *
color('red', 'yellow')
begin_fill()
while True:
forward(200)
left(170)
if abs(pos()) < 1:
break
end_fill()
done()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.