ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3c5b061cfb04fc3706c52f1552ac3d137645b2 | from django.contrib import admin
from app.models import Kategori, Produk
# Register your models here.
class DaftarProduk(admin.ModelAdmin):
list_display = ['judul', 'harga', 'kategori_produk']
search_fields = ['judul', 'harga', 'kategori_produk']
list_filter = ['kategori_produk']
list_per_page = 5
admin.site.register(Produk, DaftarProduk)
admin.site.register(Kategori) |
py | 1a3c5b357161e2419271914ec116886a345960a4 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Eugenio Parodi <ceccopierangiolieugenio AT googlemail DOT com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from TermTk.TTkCore.log import TTkLog
from TermTk.TTkCore.cfg import TTkCfg
from TermTk.TTkCore.constant import TTkK
from TermTk.TTkCore.helper import TTkHelper
# Ansi Escape Codes:
# https://conemu.github.io/en/AnsiEscapeCodes.html
# From http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html
# Code: Client: Meaning:
# [0m -- reset; clears all colors and styles (to white on black)
# [1m -- bold on (see below)
# [3m -- italics on
# [4m -- underline on
# [7m 2.50 inverse on; reverses foreground & background colors
# [9m 2.50 strikethrough on
# [22m 2.50 bold off (see below)
# [23m 2.50 italics off
# [24m 2.50 underline off
# [27m 2.50 inverse off
# [29m 2.50 strikethrough off
# [30m -- set foreground color to black
# [31m -- set foreground color to red
# [32m -- set foreground color to green
# [33m -- set foreground color to yellow
# [34m -- set foreground color to blue
# [35m -- set foreground color to magenta (purple)
# [36m -- set foreground color to cyan
# [37m -- set foreground color to white
# [39m 2.53 set foreground color to default (white)
# [40m -- set background color to black
# [41m -- set background color to red
# [42m -- set background color to green
# [43m -- set background color to yellow
# [44m -- set background color to blue
# [45m -- set background color to magenta (purple)
# [46m -- set background color to cyan
# [47m -- set background color to white
# [49m 2.53 set background color to default (black)
class _TTkColor:
__slots__ = ("_fg", "_bg", "_mod", "_colorMod")
_fg: str
_bg: str
_mod: str
def __init__(self, fg: str = "", bg: str = "", mod: str = "", colorMod=None):
self._fg = fg
self._bg = bg
self._mod = mod
self._colorMod = colorMod
def colorType(self):
return (
(TTkK.Foreground if self._fg != "" else TTkK.NONE)
| (TTkK.Background if self._bg != "" else TTkK.NONE)
| (TTkK.Modifier if self._mod != "" else TTkK.NONE)
)
def getHex(self, ctype):
if ctype == TTkK.Foreground:
r, g, b = self.fgToRGB()
else:
r, g, b = self.bgToRGB()
return "#{:06x}".format(r << 16 | g << 8 | b)
def fgToRGB(self):
if self._fg == "":
return 0xFF, 0xFF, 0xFF
cc = self._fg.split(";")
r = int(cc[2])
g = int(cc[3])
b = int(cc[4][:-1])
return r, g, b
def bgToRGB(self):
if self._bg == "":
return 0, 0, 0
cc = self._bg.split(";")
r = int(cc[2])
g = int(cc[3])
b = int(cc[4][:-1])
return r, g, b
def __str__(self):
return self._fg + self._bg + self._mod
def __eq__(self, other):
if other is None:
return False
return (
self._fg == other._fg and self._bg == other._bg and self._mod == other._mod
)
def __add__(self, other):
# TTkLog.debug("__add__")
if isinstance(other, str):
return str(self) + other
else:
fg: str = other._fg or self._fg
bg: str = other._bg or self._bg
mod: str = self._mod + other._mod
colorMod = other._colorMod or self._colorMod
return TTkColor(fg, bg, mod, colorMod)
def __radd__(self, other):
# TTkLog.debug("__radd__")
if isinstance(other, str):
return other + str(self)
else:
fg: str = other._fg or self._fg
bg: str = other._bg or self._bg
mod: self._mod + other._mod
colorMod = other._colorMod or self._colorMod
return TTkColor(fg, bg, mod, colorMod)
def __sub__(self, other):
# TTkLog.debug("__sub__")
# if other is None: return str(self)
if (
"" == self._bg != other._bg
or "" == self._fg != other._fg
or "" == self._mod != other._mod
):
return "\033[0m" + self
return str(self)
def modParam(self, *args, **kwargs):
if self._colorMod is None:
return self
ret = self.copy()
ret._colorMod.setParam(*args, **kwargs)
return ret
def mod(self, x, y):
if self._colorMod is None:
return self
return self._colorMod.exec(x, y, self)
def copy(self, modifier=True):
ret = _TTkColor()
ret._fg = self._fg
ret._bg = self._bg
ret._mod = self._mod
if modifier:
ret._colorMod = self._colorMod.copy()
return ret
class _TTkColorModifier:
def __init__(self, *args, **kwargs):
pass
def setParam(self, *args, **kwargs):
pass
def copy(self):
return self
class TTkColorGradient(_TTkColorModifier):
__slots__ = ("_increment", "_val", "_buffer")
_increment: int
_val: int
def __init__(self, *args, **kwargs):
_TTkColorModifier.__init__(self, *args, **kwargs)
self._increment = kwargs.get("increment", 0)
self._val = 0
self._buffer = {}
def setParam(self, *args, **kwargs):
self._val = kwargs.get("val", 0)
def exec(self, x, y, color):
def _applyGradient(c):
if c == "":
return c
multiplier = abs(self._val + y)
cc = c.split(";")
# TTkLog.debug("Eugenio "+c.replace('\033','<ESC>'))
r = int(cc[2]) + self._increment * multiplier
g = int(cc[3]) + self._increment * multiplier
b = int(cc[4][:-1]) + self._increment * multiplier
r = max(min(255, r), 0)
g = max(min(255, g), 0)
b = max(min(255, b), 0)
return f"{cc[0]};{cc[1]};{r};{g};{b}m"
bname = str(color)
# I made a buffer to keep all the gradient values to speed up the paint process
if bname not in self._buffer:
self._buffer[bname] = [None] * (256 * 2)
id = self._val + y - 256
if self._buffer[bname][id] is not None:
return self._buffer[bname][id]
copy = color.copy(modifier=False)
copy._fg = _applyGradient(color._fg)
copy._bg = _applyGradient(color._bg)
self._buffer[bname][id] = copy
return self._buffer[bname][id]
def copy(self):
return self
# ret = TTkColorGradient()
# ret._increment = self._increment
# ret._val = self._val
# return ret
class TTkColor(_TTkColor):
"""TermTk Color helper
.. role:: strike
:class: strike
.. role:: underline
:class: underline
The TTkColor constructor creates the color based on HEX values.
Example:
.. code:: python
# Foreground only colors:
color_fg_red = TTkColor.fg('#FF0000')
color_fg_green = TTkColor.fg('#00FF00')
color_fg_blue = TTkColor.fg('#0000FF')
# Background only colors:
color_bg_red = TTkColor.bg('#FF0000')
color_bg_green = TTkColor.bg('#00FF00')
color_bg_blue = TTkColor.bg('#0000FF')
# Combine
color_1 = color_fg_red + color_bg_blue
color_2 = color_fg_red + TTkColor.bg('#FFFF00')
color_3 = color_2 + TTkColor.UNDERLINE + TTkColor.BOLD
"""
RST = _TTkColor(fg="\033[0m")
"""Reset to the default terminal color and modifiers"""
# Modifiers:
BOLD = _TTkColor(mod="\033[1m")
"""**Bold** modifier"""
ITALIC = _TTkColor(mod="\033[3m")
"""*Italic* modifier"""
UNDERLINE = _TTkColor(mod="\033[4m")
""":underline:`Underline` modifier"""
STRIKETROUGH = _TTkColor(mod="\033[9m")
""":strike:`Striketrough` modifier"""
@staticmethod
def fg(*args, **kwargs):
"""Helper to generate a Foreground color
Example:
.. code:: python
color_1 = TTkColor.fg('#FF0000')
color_2 = TTkColor.fg(color='#00FF00')
color_3 = TTkColor.fg('#0000FF', modifier=TTkColorGradient(increment=6))
:param str color: the color representation in (str)HEX
:type color: str
:param str modifier: (experimental) the color modifier to be used to improve the **kinkiness**
:type modifier: TTkColorModifier, optional
"""
mod = kwargs.get("modifier", None)
if len(args) > 0:
color = args[0]
else:
color = kwargs.get("color", "")
return TTkColor(fg=TTkHelper.Color.fg(color), colorMod=mod)
@staticmethod
def bg(*args, **kwargs):
"""Helper to generate a Background color
Example:
.. code:: python
color_1 = TTkColor.bg('#FF0000')
color_2 = TTkColor.bg(color='#00FF00')
color_3 = TTkColor.bg('#0000FF', modifier=TTkColorGradient(increment=6))
:param str color: the color representation in (str)HEX
:type color: str
:param str modifier: (experimental) the color modifier to be used to improve the **kinkiness**
:type modifier: TTkColorModifier, optional
"""
mod = kwargs.get("modifier", None)
if len(args) > 0:
color = args[0]
else:
color = kwargs.get("color", "")
return TTkColor(bg=TTkHelper.Color.bg(color), colorMod=mod)
|
py | 1a3c5b45534df39d73ceb11b5b6e5e2f94b4f2e0 | import cv2 as cv
import argparse
import numpy as np
import sys
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
parser = argparse.ArgumentParser(description='Use this script to run semantic segmentation deep learning networks using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', required=True,
help='Path to a binary file of model contains trained weights. '
'It could be a file with extensions .caffemodel (Caffe), '
'.pb (TensorFlow), .t7 or .net (Torch), .weights (Darknet)')
parser.add_argument('--config',
help='Path to a text file of model contains network configuration. '
'It could be a file with extensions .prototxt (Caffe), .pbtxt (TensorFlow), .cfg (Darknet)')
parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'],
help='Optional name of an origin framework of the model. '
'Detect it automatically if it does not set.')
parser.add_argument('--classes', help='Optional path to a text file with names of classes.')
parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. '
'An every color is represented with three values from 0 to 255 in BGR channels order.')
parser.add_argument('--mean', nargs='+', type=float, default=[0, 0, 0],
help='Preprocess input image by subtracting mean values. '
'Mean values should be in BGR order.')
parser.add_argument('--scale', type=float, default=1.0,
help='Preprocess input image by multiplying on a scale factor.')
parser.add_argument('--width', type=int, required=True,
help='Preprocess input image by resizing to a specific width.')
parser.add_argument('--height', type=int, required=True,
help='Preprocess input image by resizing to a specific height.')
parser.add_argument('--rgb', action='store_true',
help='Indicate that model works with RGB input images instead BGR ones.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: default C++ backend, "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.seek.intel.com/deep-learning-deployment)" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL' % targets)
args = parser.parse_args()
np.random.seed(324)
# Load names of classes
classes = None
if args.classes:
with open(args.classes, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Load colors
colors = None
if args.colors:
with open(args.colors, 'rt') as f:
colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')]
legend = None
def showLegend(classes):
global legend
if not classes is None and legend is None:
blockHeight = 30
assert(len(classes) == len(colors))
legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8)
for i in range(len(classes)):
block = legend[i * blockHeight:(i + 1) * blockHeight]
block[:,:] = colors[i]
cv.putText(block, classes[i], (0, blockHeight/2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
cv.namedWindow('Legend', cv.WINDOW_NORMAL)
cv.imshow('Legend', legend)
classes = None
# Load a network
net = cv.dnn.readNet(args.model, args.config, args.framework)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
winName = 'Deep learning image classification in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
cap = cv.VideoCapture(args.input if args.input else 0)
legend = None
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, args.scale, (args.width, args.height), args.mean, args.rgb, crop=False)
# Run a model
net.setInput(blob)
score = net.forward()
numClasses = score.shape[1]
height = score.shape[2]
width = score.shape[3]
# Draw segmentation
if not colors:
# Generate colors
colors = [np.array([0, 0, 0], np.uint8)]
for i in range(1, numClasses):
colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2)
classIds = np.argmax(score[0], axis=0)
segm = np.stack([colors[idx] for idx in classIds.flatten()])
segm = segm.reshape(height, width, 3)
segm = cv.resize(segm, (frame.shape[1], frame.shape[0]), interpolation=cv.INTER_NEAREST)
frame = (0.1 * frame + 0.9 * segm).astype(np.uint8)
# Put efficiency information.
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
showLegend(classes)
cv.imshow(winName, frame)
|
py | 1a3c5bdb9db681db9dd806d00addec26c14c465b | import attr
import json
from ._core import Enum
class GuestStatus(Enum):
INVITED = 1
GOING = 2
DECLINED = 3
@attr.s(cmp=False)
class Plan:
"""Represents a plan."""
#: ID of the plan
uid = attr.ib(None, init=False)
#: Plan time (timestamp), only precise down to the minute
time = attr.ib(converter=int)
#: Plan title
title = attr.ib()
#: Plan location name
location = attr.ib(None, converter=lambda x: x or "")
#: Plan location ID
location_id = attr.ib(None, converter=lambda x: x or "")
#: ID of the plan creator
author_id = attr.ib(None, init=False)
#: Dictionary of `User` IDs mapped to their `GuestStatus`
guests = attr.ib(None, init=False)
@property
def going(self):
"""List of the `User` IDs who will take part in the plan."""
return [
id_
for id_, status in (self.guests or {}).items()
if status is GuestStatus.GOING
]
@property
def declined(self):
"""List of the `User` IDs who won't take part in the plan."""
return [
id_
for id_, status in (self.guests or {}).items()
if status is GuestStatus.DECLINED
]
@property
def invited(self):
"""List of the `User` IDs who are invited to the plan."""
return [
id_
for id_, status in (self.guests or {}).items()
if status is GuestStatus.INVITED
]
@classmethod
def _from_pull(cls, data):
rtn = cls(
time=data.get("event_time"),
title=data.get("event_title"),
location=data.get("event_location_name"),
location_id=data.get("event_location_id"),
)
rtn.uid = data.get("event_id")
rtn.author_id = data.get("event_creator_id")
rtn.guests = {
x["node"]["id"]: GuestStatus[x["guest_list_state"]]
for x in json.loads(data["guest_state_list"])
}
return rtn
@classmethod
def _from_fetch(cls, data):
rtn = cls(
time=data.get("event_time"),
title=data.get("title"),
location=data.get("location_name"),
location_id=str(data["location_id"]) if data.get("location_id") else None,
)
rtn.uid = data.get("oid")
rtn.author_id = data.get("creator_id")
rtn.guests = {id_: GuestStatus[s] for id_, s in data["event_members"].items()}
return rtn
@classmethod
def _from_graphql(cls, data):
rtn = cls(
time=data.get("time"),
title=data.get("event_title"),
location=data.get("location_name"),
)
rtn.uid = data.get("id")
rtn.author_id = data["lightweight_event_creator"].get("id")
rtn.guests = {
x["node"]["id"]: GuestStatus[x["guest_list_state"]]
for x in data["event_reminder_members"]["edges"]
}
return rtn
|
py | 1a3c5c91490c469b383c00d1d5764fd587e0ee5a | from sklearn.cluster import DBSCAN
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df_energy = pd.read_csv('results/SklearnScaledEnergy.csv')
df_energy = df_energy.drop(['id', '1', '2', '3', '4', '5','6','7','8','9'], axis=1)
df_energy.columns = ['energy']
df_perplex = pd.read_csv('results/SklearnScaledPPL.csv')
df_perplex = df_perplex.drop(['id','1','2','3','4','5','6','7','8','9'], axis=1)
df_perplex.columns = ['perplexity']
#df = pd.read_csv('data/actualbase.csv')
#df = df.drop(['id', 'vocab_size', 'hidden_size','num_hidden_layers', 'num_attention_heads', 'intermediate_size', 'actual_hidden_size', 'hidden_act', 'hidden_dropout_prob',
#'attention_probs_dropout_prog', 'max_position_embeddings', 'type_vocab_size', 'initializer_range', 'layer_norm_eps', 'gradient_checkpointing', 'position_embedding_type',
#'use_cache', 'energy_loss'], axis=1)
df = pd.concat([df_perplex, df_energy], axis=1)
#print(df)
#df = df[['perplexity', 'energy_consumption']]
#df['energy_consumption'] = df['energy_consumption'] * 142.3439911
#df['perplexity'] = df.apply(lambda x: np.log2(x))
X = df.to_numpy()
clustering = DBSCAN(eps=0.4, min_samples=5).fit(X)
labels = clustering.labels_
print(labels)
core_samples_mask = np.zeros_like(labels, dtype=bool)
core_samples_mask[clustering.core_sample_indices_] = True
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=10)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=10)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.xlabel('Perplexity, tranlated and scaled')
plt.ylabel('Energy Consumption (kWh), tranlated and scaled')
df['clusters'] = labels
#print(df)
df.to_csv('out.csv')
plt.show() |
py | 1a3c5d005af47383f15fa8d865b96f005f4bb7c7 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i
DO_COMPOUND = False
phrasesCompound = [
# use a dialetc with restricted grammar
# 1. all functions are on their own line
# 2. all functions only use data thru their params, or created in the function
# "a=1; a=2; function(x){x=a;a=3}",
# "a=r.hex; function(x){x=a;a=3;nrow(x)*a}(a)",
# "function(x){y=x*2; y+1}(2)",
# "mean=function(x){apply(x,1,sum)/nrow(x)};mean(r.hex)",
]
badPhrases = [
"&&",
"||",
"%*%",
"ifelse",
"cbind",
"print",
"apply",
"sapply",
"ddply",
"var",
"Reduce",
"cut",
"findInterval",
"runif",
"scale",
"t",
"seq_len",
"seq",
"rep_len",
"c",
"table",
"unique",
"factor",
]
phrases = [
"func1",
"func2",
"func3",
"func4",
"func5",
# "func6",
"nrow",
"ncol",
"length",
"is.factor",
"any.factor",
"any.na",
"isTRUE",
"min.na.rm",
"max.na.rm",
"min",
"max",
"xorsum",
]
if DO_COMPOUND:
phrases += phrasesCompound
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1, java_heap_GB=12)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_apply_phrases(self):
h2o.beta_features = True
bucket = 'home-0xdiag-datasets'
# csvPathname = 'standard/covtype.data'
csvPathname = "standard/covtype.shuffled.10pct.data"
hexKey = 'i.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local', hex_key=hexKey)
for col in [1]:
initList = [
('r.hex', 'r.hex=i.hex'),
(None, "func1=function(x){max(x[,%s])}" % col),
(None, "func2=function(x){a=3;nrow(x[,%s])*a}" % col),
(None, "func3=function(x){apply(x[,%s],2,sum)/nrow(x[,%s])}" % (col, col) ),
# (None, "function(x) { cbind( mean(x[,1]), mean(x[,%s]) ) }" % col),
(None, "func4=function(x) { mean( x[,%s]) }" % col),
(None, "func5=function(x) { sd( x[,%s]) }" % col),
(None, "func6=function(x) { quantile(x[,%s] , c(0.9) ) }" % col),
]
for resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=resultKey, timeoutSecs=60)
for p in phrases:
# execExpr = "apply(r.hex, c(2), " + p + ")"
execExpr = "apply(r.hex, 2, " + p + ")"
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=60)
if __name__ == '__main__':
h2o.unit_main()
|
py | 1a3c5f1c29b2903e57252d4cb51d3b074b29c371 | import requests
requests.post(url="http://192.168.101:5001/api/iot", data={"test": "echo"}) |
py | 1a3c60a8d225b801d3d34f6c9c430bda1cf81fd5 | from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
from opts import opts
from tensorflow.contrib import slim
########################################################################
class LanguageModel(object):
"""
build language model based on objects attention and image attributes
"""
#----------------------------------------------------------------------
def __init__(self,opt,phase,reuse = False):
"""Constructor"""
assert isinstance(opt,opts)
self.opt = opt
# attributes information
self.attri = None
# feature information of each object in the image
self.objects_features = None
# input_mask: weight for each word in input_seqs
self.input_mask = None
# input seqs for language model
self.input_seqs = None
# target seqs for language model
self.target_seqs = None
# whole image features
self.image_features = None
# train/validation/inference
self.phase = phase
self.batch_loss = None
# language mode's batch_size is image batch_size * nSeqs_per_img
# it is because each image has nSeqs_per_img labels
self.batch_size = self.opt.batch_size * self.opt.nSeqs_per_img
# global time step
self.step = None
self.length = None
# whether or not reuse variables
self.reuse = reuse
# atten_loss initialize 0
self.atten_loss = 0.0
# weight for atten_loss
self.lamb = 0.2
self.logprob = []
def build_inputs(self):
'''
three sources of inputs: objects features, image attributes, previous words
'''
if self.phase == 'train':
attri = tf.placeholder(dtype = tf.float32,shape = [self.batch_size,
self.opt.attr_size],
name='attributes')
objects_features = tf.placeholder(dtype=tf.float32,
shape=[self.batch_size,
self.opt.nRegions,
self.opt.image_encoding_size],name='objects_features')
image_features = tf.placeholder(dtype=tf.float32,
shape=[self.batch_size,
self.opt.image_encoding_size],name='image_features')
input_seqs = tf.placeholder(dtype=tf.int32,shape=[self.batch_size,None],name='input_feed')
target_seqs = tf.placeholder(dtype=tf.int32,shape = [self.batch_size,None],name = 'target_feed')
self.input_seqs = input_seqs
self.target_seqs = target_seqs
# check size
# if the number of words in input_seqs equals the number of words in target_seqs
with tf.control_dependencies([tf.assert_equal(tf.cast(tf.not_equal(input_seqs,0),tf.int32),
tf.cast(tf.not_equal(target_seqs,0),tf.int32))]):
self.input_mask = tf.cast(tf.not_equal(input_seqs,0),tf.int32)
self.attri = attri
self.objects_features = objects_features
self.image_features = image_features
self.freeze = tf.placeholder(shape = [],dtype = tf.bool,name = 'freeze')
else:
# At inference step: one image per batch
attri = tf.placeholder(dtype = tf.float32,shape = [None,
self.opt.attr_size],
name='attributes')
objects_features = tf.placeholder(dtype=tf.float32,
shape=[None,
self.opt.nRegions,
self.opt.image_encoding_size],name='objects_features')
image_features = tf.placeholder(dtype=tf.float32,
shape=[None,
self.opt.image_encoding_size],name='image_features')
# feed all previous words(history information)
# the sequence length is unknown, the batch size is unknown
input_seqs = tf.placeholder(dtype=tf.int32,shape=[None,None],name = 'input_feed')
batch_size = tf.shape(input_seqs)[0]
self.input_seqs = input_seqs
self.target_seqs = None
self.input_mask = None
self.attri = attri
self.objects_features = objects_features
self.image_features = image_features
def seq_embedding(self,word,reuse = False):
with tf.variable_scope('seq_embedding',reuse = reuse):
emb_matrix = tf.get_variable('map',
shape=[self.opt.vocab_size,
self.opt.input_encoding_size],
initializer=tf.contrib.layers.xavier_initializer(),
dtype= tf.float32)
# the attributes embeddings is from row 1~999
# attributes embedding share weights with words embeddings
# 0 is a null token
ix = tf.constant(range(1,1+self.opt.attr_size))
self.attr_matrix = tf.gather(emb_matrix,ix)
word_embedding = tf.nn.embedding_lookup(emb_matrix,word)
return word_embedding
def build_forward_step(self):
'''This is a step of forwarding'''
# build the model in reuse mode or in unreuse mode
# in train: unreuse, validation: reuse(reuse variable in train mode), test: unreuse
# first level attention
# attributes attention, previous words attention, objects attention
def first_layer_attention(step,h,all_atten,all_inputs,reuse):
def attributes_attention(step,attributes,h,attri_atten,reuse = None):
# attention on each attribute
with tf.variable_scope('attributes_att',reuse = reuse) as scope:
mapped_size = 512
attr_size = self.opt.attr_size
# it is a matrix that record the feature of each attributes
# share weights with word embeddings
attr_matrix = self.attr_matrix
h_emb = slim.fully_connected(h,
mapped_size,
biases_initializer=None,
activation_fn=None,
reuse = reuse,
scope='h_emb')
# select top 20 attributes
# DONT put it in the loop
#top20_prob,top20_ix = tf.nn.top_k(attributes,20)
top20_prob,top20_ix = self.top20_prob,self.top20_ix
top20_emb = tf.gather(attr_matrix,top20_ix)
# mapping top20 attributes and h to the same space
top20_map = slim.fully_connected(tf.nn.relu(top20_emb),
mapped_size,
biases_initializer = None,
activation_fn = None,
reuse = reuse,
scope = 'top20_map')
score = slim.fully_connected(tf.reshape(tf.nn.tanh(h_emb[:,tf.newaxis,:]+top20_map),[-1,mapped_size]),
1,
biases_initializer = None,
activation_fn = None,
reuse = reuse,
scope = 'score')
weights = tf.nn.softmax(tf.reshape(score,[-1,20]))
assert(isinstance(attri_atten,tf.TensorArray))
if self.phase == 'train':
mask_t = tf.to_float(self.input_mask[:,step])[:,tf.newaxis]
else:
mask_t = 1.0
new_attri_atten = attri_atten.write(step,weights*mask_t)
# weights* probability * embedding
weighted_emb = weights[:,:,tf.newaxis]*top20_emb*top20_prob[:,:,tf.newaxis]
context = tf.reduce_sum(weighted_emb,axis = 1)
if self.phase == 'train':
# compute attention correctness
# attributes index in the vocabulary
eps = 1e-7
top20_attributes = top20_ix +1
target_seq = self.target_seqs[:,step]
mask = tf.equal(top20_attributes,target_seq[:,tf.newaxis])
atten_loss = -tf.log(tf.boolean_mask(weights,mask)+eps)
atten_loss = tf.reduce_sum(atten_loss)
self.atten_loss = self.atten_loss + atten_loss
return context, new_attri_atten
def objects_attention(step,objects_features,h,obj_atten,reuse):
# attention on each objects
with tf.variable_scope('objects_att',reuse = reuse) as scope:
mapped_size = 512
obj_emb = slim.conv2d(objects_features,
mapped_size,
kernel_size=[1],
activation_fn=None,
biases_initializer=None,
reuse = reuse,
scope = 'obj_emb')
nRegions = tf.shape(obj_emb)[1]
h_emb = slim.fully_connected(h,
mapped_size,
activation_fn=None,
biases_initializer=None,
reuse = reuse,
scope = 'h_emb')
score = slim.fully_connected(tf.reshape(tf.nn.tanh(obj_emb + tf.expand_dims(h_emb,axis=1)),
[-1,mapped_size]),
1,
activation_fn=None,
biases_initializer=None,
reuse = reuse,
scope = 'score')
score = tf.reshape(score,[-1,nRegions])
weights = tf.nn.softmax(score)
context = tf.reduce_sum(tf.expand_dims(weights,axis=2)*objects_features,axis=1)
assert(isinstance(obj_atten,tf.TensorArray))
new_obj_atten = obj_atten.write(step,weights)
return context, new_obj_atten
###########################################################################
# attention on attributes, objects feature, history(previously generated words)
with tf.variable_scope('first_att',reuse = reuse):
attributes, objects_features, word_embeddings = all_inputs
attrib_atten, obj_atten = all_atten
# use attributes attention
attri_context, new_attri_atten = attributes_attention(step,attributes,h,attrib_atten,reuse = reuse)
# don't use attributs attention, directly use attributes information
#new_attri_atten = tf.TensorArray(dtype = tf.float32,size = 10)
#attri_context = tf.identity(attributes)
objects_context, new_obj_atten = objects_attention(step,objects_features,h,obj_atten,reuse = reuse)
#history_context, new_history_atten = history_attention(step,h,word_embeddings,history_atten,reuse)
all_outputs = [attri_context,objects_context]
all_new_att = [new_attri_atten,new_obj_atten]
return all_outputs, all_new_att
# second layer attention
def second_layer_attention(step,attri_context,obj_context,h,second_atten,reuse):
with tf.variable_scope('second_att',reuse = reuse):
mapped_size = 512
attri_linear = slim.fully_connected(attri_context,
mapped_size,
activation_fn=tf.nn.relu,
scope='attri_linear',
reuse = reuse)
attri_emb = slim.fully_connected(attri_linear,
mapped_size,
activation_fn=None,
scope='attr_emb',
reuse = reuse)
obj_linear = slim.fully_connected(obj_context,
mapped_size,
activation_fn = tf.nn.relu,
scope = 'obj_linear',
reuse = reuse)
obj_emb = slim.fully_connected(obj_linear,
mapped_size,
activation_fn=None,
scope='obj_emb',
reuse = reuse)
h_emb = slim.fully_connected(h,
mapped_size,
activation_fn=None,
scope='h_emb',
reuse = reuse)
inputs = tf.concat([tf.expand_dims(attri_emb,axis=1),
tf.expand_dims(obj_emb,axis=1)],axis=1)
score = slim.fully_connected(tf.reshape(tf.nn.tanh(tf.expand_dims(h_emb,axis = 1)+inputs),[-1,mapped_size]),
1,
activation_fn=None,
biases_initializer=None,
reuse = reuse,
scope='score')
score = tf.reshape(score,[-1,2])
weights = tf.nn.softmax(score)
if self.phase == 'train':
weights = tf.cond(self.freeze,lambda:tf.constant([[0.5,0.5]]),lambda: weights)
context = weights[:,0::2]*attri_linear
context = context + weights[:,1::2]*obj_linear
#context = context + weights[:,2::3]*history_linear
assert isinstance(second_atten,tf.TensorArray)
if self.phase == 'train':
mask_t = self.input_mask[:,step][:,tf.newaxis]
else:
mask_t = tf.constant(1.0)
new_second_atten = second_atten.write(step,weights*tf.cast(mask_t,tf.float32))
return context, new_second_atten
# control wether or not should reuse parameters in attended_lstm
def attended_lstm(step,states,loss,first_atten,second_att,all_inputs,save_logprob,reuse):
attributes, objects_features, seq_embeddings = all_inputs
# build_attention
c,h = tf.split(states,2,axis=1)
new_outputs,new_first_att = first_layer_attention(step,h,first_atten,all_inputs,reuse)
context,new_second_att = second_layer_attention(step,new_outputs[0],new_outputs[1],h,second_att,reuse = reuse)
# lstm
def lstm_cell(inputs,(c,h)):
'''lstm cell inplementations'''
seq_embedding, context = inputs
i2h = slim.fully_connected(seq_embedding,
4*self.opt.rnn_size,
activation_fn=None,
biases_initializer=tf.contrib.layers.xavier_initializer(),
reuse = reuse,
scope='i2h')
h2h = slim.fully_connected(h,
4*self.opt.rnn_size,
activation_fn=None,
biases_initializer=tf.contrib.layers.xavier_initializer(),
reuse = reuse,
scope='h2h')
context2h = slim.fully_connected(context,
4*self.opt.rnn_size,
activation_fn=None,
biases_initializer=tf.contrib.layers.xavier_initializer(),
reuse = reuse,
scope='context2h')
all_input_sums = i2h+h2h+context2h
reshaped = tf.reshape(all_input_sums,[-1,4,self.opt.rnn_size])
n1, n2, n3, n4 = reshaped[:,0],reshaped[:,1],reshaped[:,2],reshaped[:,3]
in_gate = tf.nn.sigmoid(n1)
forget_gate = tf.nn.sigmoid(n2)
out_gate = tf.nn.sigmoid(n3)
in_transform = tf.nn.tanh(n4)
next_c = forget_gate*c+in_gate*in_transform
next_h = out_gate*tf.nn.tanh(next_c)
return next_h,(next_c,next_h)
with tf.variable_scope('lstm',reuse = reuse):
# three kinds of information: sentence information,attributes information, context(attention over objects)
seq_embedding = seq_embeddings[:,step]
inputs = [seq_embedding, context]
lstm_output,new_states = lstm_cell(inputs,(c,h))
new_states = tf.concat(new_states,axis=1,name='new_states')
# _outputProb
with tf.variable_scope('logits',reuse=reuse):
MidSize = 1024
all_sum = tf.add_n([
slim.fully_connected(lstm_output,
MidSize,
reuse=reuse,
activation_fn=None,
biases_initializer=tf.contrib.layers.xavier_initializer(),
scope='h_emb3'),
slim.fully_connected(context,
MidSize,
reuse=reuse,
biases_initializer=tf.contrib.layers.xavier_initializer(),
activation_fn=None,
scope='context_emb3'),
slim.fully_connected(seq_embedding,
MidSize,
reuse=reuse,
biases_initializer=tf.contrib.layers.xavier_initializer(),
activation_fn=None,
scope='seq_emb3')])
all_sum = tf.nn.relu(all_sum)
logits = slim.fully_connected(all_sum,
self.opt.vocab_size,
activation_fn=None,
reuse = reuse,
biases_initializer=tf.contrib.layers.xavier_initializer(),
scope='logits2')
# criterion
if self.phase == 'train':
with tf.name_scope('loss'):
word_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits,
labels=self.target_seqs[:,step])
if save_logprob:
self.logprob.append(-word_loss)
weight = tf.cast(self.input_mask[:,step],tf.float32)
new_loss = loss + tf.reduce_sum(word_loss*weight)
return step+1,new_states,new_loss,new_first_att,new_second_att,all_inputs,logits
# calculate probability over vocabulary
else:
tf.nn.softmax(logits,name='prob')
return 0, new_states, 0.0, new_first_att,new_second_att,all_inputs, logits
# return the attended_lstm function
return attended_lstm
def build_model(self):
'''
build model based on objects attention and image attributes
'''
# return the create a step of forward passing function
forward_fn = self.build_forward_step()
self.forward_fn = forward_fn
# for preparation of forward passing
reuse = self.reuse
# generate lstm initial state
with tf.variable_scope('init',reuse = reuse):
h = slim.fully_connected(self.image_features,
self.opt.rnn_size,
biases_initializer=tf.contrib.layers.xavier_initializer(),
scope='F2H',
reuse = reuse)
c = slim.fully_connected(self.image_features,
self.opt.rnn_size,
biases_initializer=tf.contrib.layers.xavier_initializer(),
scope='F2C',
reuse = reuse)
init_states = tf.concat([c,h],axis=-1,name='init_states')
# max length of the sequence
if self.input_mask is not None:
length =tf.reduce_max(tf.reduce_sum(self.input_mask,axis=1))
else:
length = tf.shape(self.input_seqs)[1]
# seq_embeddings BxTxfeature_dim
seq_embeddings = self.seq_embedding(self.input_seqs,reuse = reuse)
# do it by looping
loss = tf.constant(0.0)
states = init_states
# the attention of first level
first_atten = [tf.TensorArray(dtype=tf.float32,
size=self.opt.seq_length+1),
tf.TensorArray(dtype=tf.float32,
size=self.opt.seq_length+1)]
# the attention of second level
second_atten = tf.TensorArray(dtype=tf.float32,
size=self.opt.seq_length+1)
all_inputs = [self.attri,self.objects_features,seq_embeddings]
# top 20 attributes probability and their index
self.top20_prob, self.top20_ix = tf.nn.top_k(self.attri,20)
# in train mode : loop
if self.phase == 'train':
for i in xrange(self.opt.seq_length+1):
# a step forward attended lstm
reuse = reuse or (i!=0)
_,states, loss,first_atten,second_atten, all_inputs,_ = forward_fn(tf.constant(i),states,loss,first_atten,second_atten,all_inputs,True,reuse)
words_loss = tf.div(loss,
tf.cast(tf.reduce_sum(self.input_mask),tf.float32))
self.logprob = tf.concat([logprob[:,tf.newaxis] for logprob in self.logprob],axis=1)
self.words_loss = words_loss
# how many words in input_seqs are attributes
top20_attributes = self.top20_ix +1
ntop20 = tf.reduce_sum(tf.cast(tf.equal(self.input_seqs[:,:,tf.newaxis],top20_attributes[:,tf.newaxis,:]),tf.float32))
# in case ntop20 become zero
ntop20 = tf.maximum(ntop20,1.0)
self.atten_loss = self.atten_loss/ntop20
# batch loss == words_loss + lambda* atten_loss
self.batch_loss = words_loss + self.lamb* self.atten_loss
batch_loss = self.batch_loss
self.length = i
self.weights = tf.transpose(second_atten.stack(),[1,0,2])
self.attr_atten = tf.transpose(first_atten[0].stack(),[1,0,2])
tf.summary.scalar('batch_loss',batch_loss)
else:
# in inference mode: a single forward passing
state_feed = tf.placeholder(dtype=tf.float32,shape = [None,self.opt.rnn_size*2],name='state_feed')
# seq_embeddings bxtxfeature_dims
step = tf.shape(seq_embeddings)[1]-1
all_inputs = [self.attri,self.objects_features,seq_embeddings]
_,new_states,_,first_atten,second_atten,_,_= forward_fn(step,state_feed,tf.constant(0,dtype = tf.float32),first_atten,second_atten,all_inputs,False,reuse)
def build_step(self):
with tf.variable_scope('global_step',reuse = self.reuse):
self.step = tf.get_variable(name='step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(value=0,dtype=tf.int32),
trainable=False)
def build(self):
# build inputs
self.build_inputs()
# build the model
# weight and bias initializer
with slim.arg_scope([slim.fully_connected],
biases_initializer = tf.contrib.layers.xavier_initializer(),
weights_initializer = tf.contrib.layers.xavier_initializer()):
self.build_model()
# build step
if self.phase == 'train':
self.build_step()
if self.opt.scst:
self.seqs, self.seq_log_probs = self.sample()
def sample(self):
'''This builds sampling from the model, only used in scst'''
assert self.phase=='train'
reuse = True
# prepare inputs
with tf.variable_scope('init',reuse = reuse):
h = slim.fully_connected(self.image_features,
self.opt.rnn_size,
biases_initializer=tf.contrib.layers.xavier_initializer(),
scope='F2H',
reuse = reuse)
c = slim.fully_connected(self.image_features,
self.opt.rnn_size,
biases_initializer=tf.contrib.layers.xavier_initializer(),
scope='F2C',
reuse = reuse)
init_states = tf.concat([c,h],axis=-1)
states = init_states
# the attention of first level
first_atten = [tf.TensorArray(dtype=tf.float32,
size=self.opt.seq_length+1),
tf.TensorArray(dtype=tf.float32,
size=self.opt.seq_length+1)]
# the attention of second level
second_atten = tf.TensorArray(dtype=tf.float32,
size=self.opt.seq_length+1)
# one step forward function
forward_fn = self.forward_fn
batch_size = tf.shape(self.attri)[0]
seq_log_probs = []
seq = []
def sample_fn(logits,max_sample):
def sample_max():
sample_log_prob = tf.to_float(tf.reduce_max(tf.nn.log_softmax(logits),axis=-1)[:,tf.newaxis])
it = tf.to_int32(tf.argmax(logits,axis=-1)[:,tf.newaxis])
return it, sample_log_prob
def sample_random():
it = tf.to_int32(tf.multinomial(logits,1))
log_prob = tf.nn.log_softmax(logits)
sample_log_prob = tf.gather(tf.transpose(log_prob,[1,0]),tf.squeeze(it,axis=1))
sample_log_prob = tf.to_float(tf.diag_part(sample_log_prob)[:,tf.newaxis])
return it, sample_log_prob
it, sample_log_prob = tf.cond(max_sample,sample_max,sample_random)
return it, sample_log_prob
max_sample = tf.placeholder(dtype=tf.bool,shape=[],name='max_sample')
for i in xrange(self.opt.seq_length+1):
if i==0:
it = tf.ones(shape=[batch_size,1],dtype=tf.int32)*self.opt.START_TOKEN
else:
it, sample_log_prob = sample_fn(logits,max_sample)
if i>0:
if i==1:
unfinished = tf.cast(tf.logical_and(tf.not_equal(it,self.opt.END_TOKEN),
tf.not_equal(it, 0)),tf.int32)
else:
unfinished = unfinished*tf.cast(tf.logical_and(tf.not_equal(it,self.opt.END_TOKEN),
tf.not_equal(it,0)),tf.int32)
# replace end_token with zero
it = tf.cast(it,tf.int32) * unfinished
seq.append(it)
seq_log_probs.append(sample_log_prob*tf.cast(unfinished,tf.float32))
if i==0:
xt = it
seq.append(it)
else:
xt = tf.concat(seq,axis=1,name='concat{:d}'.format(i+3))
seq_embeddings = self.seq_embedding(tf.stop_gradient(xt),reuse = True)
all_inputs = [self.attri,self.objects_features,seq_embeddings]
_, states, _, first_atten, second_atten, all_inputs,logits = forward_fn(tf.constant(i),
states,
tf.constant(0.0),
first_atten,
second_atten,
all_inputs,
save_logprob = False,
reuse = True)
return tf.concat(seq,axis=1,name='concat2'), tf.concat(seq_log_probs,axis = 1,name='concat1')
|
py | 1a3c62ae981dbe0e591f6e9c036275ea2e688ecb | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Tests a portion of the Component CRUD Python API while the Editor is running
import azlmbr.bus as bus
import azlmbr.entity as entity
import azlmbr.editor as editor
import azlmbr.object
import azlmbr.math
from azlmbr.entity import EntityId
def CompareComponentEntityIdPairs(component1, component2):
return component1.Equal(component2)
# Open a level (any level should work)
editor.EditorToolsApplicationRequestBus(bus.Broadcast, 'OpenLevelNoPrompt', 'WaterSample')
# Get Component Types for Mesh and Comment
typeIdsList = editor.EditorComponentAPIBus(bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Mesh", "Comment", "Mesh Collider"], entity.EntityType().Game)
if(len(typeIdsList) > 0):
print("Type Ids List returned correctly")
meshComponentTypeId = typeIdsList[0]
commentComponentTypeId = typeIdsList[1]
meshColliderComponentTypeId = typeIdsList[2]
# Get Component Ids from Component Types
typeNamesList = editor.EditorComponentAPIBus(bus.Broadcast, 'FindComponentTypeNames', typeIdsList)
if(typeNamesList[0] == "Mesh") and (typeNamesList[1] == "Comment") and (typeNamesList[2] == "Mesh Collider"):
print("Type Names List returned correctly")
# Test Component API
newEntityId = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', EntityId())
if (newEntityId):
print("New entity with no parent created")
hadComponent = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', newEntityId, meshComponentTypeId)
if not(hadComponent):
print("Entity does not have a Mesh component")
meshComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', newEntityId, [meshComponentTypeId])
if (meshComponentOutcome.IsSuccess()):
print("Mesh component added to entity")
meshComponents = meshComponentOutcome.GetValue()
meshComponent = meshComponents[0]
if(meshComponent.get_entity_id() == newEntityId):
print("EntityId on the meshComponent EntityComponentIdPair matches")
if not(meshComponent.to_string() == ""):
print("EntityComponentIdPair to_string works")
hasComponent = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', newEntityId, meshComponentTypeId)
if(hasComponent):
print("Entity has a Mesh component")
isActive = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', meshComponent)
if(isActive):
print("Mesh component is active")
editor.EditorComponentAPIBus(bus.Broadcast, 'DisableComponents', [meshComponent])
isNotActive = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', meshComponent)
if not(isNotActive):
print("Mesh component is not active")
if(editor.EditorComponentAPIBus(bus.Broadcast, 'IsValid', meshComponent)):
print("Mesh component is valid")
CommentComponentsOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', newEntityId, [commentComponentTypeId, commentComponentTypeId])
if (CommentComponentsOutcome.IsSuccess()):
print("Comment components added to entity")
CommentComponents = CommentComponentsOutcome.GetValue()
GetCommentComponentsOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentsOfType', newEntityId, commentComponentTypeId)
if(GetCommentComponentsOutcome.IsSuccess()):
GetCommentComponents = GetCommentComponentsOutcome.GetValue()
if(CompareComponentEntityIdPairs(CommentComponents[0], GetCommentComponents[0]) and CompareComponentEntityIdPairs(CommentComponents[1], GetCommentComponents[1])):
print("Got both Comment components")
if(CompareComponentEntityIdPairs(CommentComponents[0], GetCommentComponents[1]) and CompareComponentEntityIdPairs(CommentComponents[1], GetCommentComponents[0])):
print("Got both Comment components")
GetComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', newEntityId, meshComponentTypeId)
if(GetComponentOutcome.IsSuccess() and CompareComponentEntityIdPairs(GetComponentOutcome.GetValue(), meshComponent)):
print("GetComponent works")
commentsCount = editor.EditorComponentAPIBus(bus.Broadcast, 'CountComponentsOfType', newEntityId, commentComponentTypeId)
if(commentsCount == 2):
print("Entity has two Comment components")
editor.EditorComponentAPIBus(bus.Broadcast, 'DisableComponents', CommentComponents)
isCActive = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', CommentComponents[0])
isC2Active = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', CommentComponents[1])
if not(isCActive) and not(isC2Active):
print("Disabled both Comment components")
editor.EditorComponentAPIBus(bus.Broadcast, 'EnableComponents', CommentComponents)
isCActive = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', CommentComponents[0])
isC2Active = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', CommentComponents[1])
if (isCActive) and (isC2Active):
print("Enabled both Comment components")
editor.EditorComponentAPIBus(bus.Broadcast, 'RemoveComponents', [meshComponent])
hasMesh = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', newEntityId, meshComponentTypeId)
componentSingleOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentOfType', newEntityId, commentComponentTypeId)
if (componentSingleOutcome.IsSuccess()):
print("Single comment component added to entity")
commentsCount = editor.EditorComponentAPIBus(bus.Broadcast, 'CountComponentsOfType', newEntityId, commentComponentTypeId)
if (commentsCount == 3):
print("Entity has three Comment components")
if not(hasMesh):
print("Mesh Component removed")
if not(editor.EditorComponentAPIBus(bus.Broadcast, 'IsValid', meshComponent)):
print("Mesh component is no longer valid")
# Test that it is possible to access Components with no Editor Component (for example, the legacy mesh collider)
meshColliderComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', newEntityId, [meshColliderComponentTypeId])
if (meshColliderComponentOutcome.IsSuccess()):
print("Mesh Collider component added to entity")
meshColliderComponent = meshColliderComponentOutcome.GetValue()
getMeshColliderComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', newEntityId, meshComponentTypeId)
if(getMeshColliderComponentOutcome.IsSuccess() and CompareComponentEntityIdPairs(meshColliderComponent, getMeshColliderComponentOutcome.GetValue())):
print("Mesh Collider component retrieved from entity")
editor.EditorComponentAPIBus(bus.Broadcast, 'RemoveComponents', [meshColliderComponent])
hasMeshCollider = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', newEntityId, meshColliderComponentTypeId)
if not(hasMeshCollider):
print("Mesh Collider Component removed")
# Test that it is possible to access Components with no Editor Component(for example, the legacy mesh collider) via GetComponentOfType
meshColliderComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', newEntityId, [meshColliderComponentTypeId])
if (meshColliderComponentOutcome.IsSuccess()):
print("Mesh Collider component added to entity")
meshColliderComponent = meshColliderComponentOutcome.GetValue()[0]
getMeshColliderComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', newEntityId, meshColliderComponentTypeId)
if(getMeshColliderComponentOutcome.IsSuccess() and CompareComponentEntityIdPairs(meshColliderComponent, getMeshColliderComponentOutcome.GetValue())):
print("Mesh Collider component retrieved from entity")
meshColliderRemoved = False;
meshColliderRemoved = editor.EditorComponentAPIBus(bus.Broadcast, 'RemoveComponents', [meshColliderComponent])
if meshColliderRemoved:
print("Mesh Collider component removed from entity")
editor.EditorToolsApplicationRequestBus(bus.Broadcast, 'ExitNoPrompt')
|
py | 1a3c63395a9218faca1cb43c25ec9293846d1596 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2017-2020 Airinnova AB and the PyTornado authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
# Authors:
# * Alessandro Gastaldi
# * Aaron Dettmann
"""
Visualisation of the VLM downwash matrix
Developed at Airinnova AB, Stockholm, Sweden.
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
import pytornado.plot.plottools as pt
logger = logging.getLogger(__name__)
def view_downwash(vlmdata, plt_settings):
"""
Visualise matrix of downwash factors
Args:
:vlmdata: (object) data structure for VLM analysis data
:plt_settings: general plot settings
"""
logger.info("Generating downwash plot...")
if not isinstance(vlmdata.matrix_downwash, np.ndarray):
err_msg = "Downwash factor matrix is not a numpy array"
logger.error(err_msg)
raise TypeError(err_msg)
figure = plt.figure(figsize=(9, 9))
axes = figure.add_subplot(111)
axes.set_aspect('equal')
axes.matshow(vlmdata.matrix_downwash, cmap=pt.C.COLORMAP)
axes.set_xlabel('i')
axes.set_ylabel('j')
axes.set_title("Downwash factor matrix")
pt.show_and_save(plt_settings, (figure, 'downwash'))
plt.close('all')
|
py | 1a3c63c54956849688d9aa90d3d9edcce9fb0009 | # Originally contributed by Stefan Schukat as part of this arbitrary-sized
# arrays patch.
from win32com.client import gencache
from win32com.test import util
import unittest
ZeroD = 0
OneDEmpty = []
OneD = [1, 2, 3]
TwoD = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
TwoD1 = [[[1, 2, 3, 5], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3], [1, 2, 3]]]
OneD1 = [[[1, 2, 3], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]]
OneD2 = [
[1, 2, 3],
[1, 2, 3, 4, 5],
[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]],
]
ThreeD = [[[1, 2, 3], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3], [1, 2, 3]]]
FourD = [
[
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
],
[
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
],
]
LargeD = [
[[list(range(10))] * 10],
] * 512
def _normalize_array(a):
if type(a) != type(()):
return a
ret = []
for i in a:
ret.append(_normalize_array(i))
return ret
class ArrayTest(util.TestCase):
def setUp(self):
self.arr = gencache.EnsureDispatch("PyCOMTest.ArrayTest")
def tearDown(self):
self.arr = None
def _doTest(self, array):
self.arr.Array = array
self.assertEqual(_normalize_array(self.arr.Array), array)
def testZeroD(self):
self._doTest(ZeroD)
def testOneDEmpty(self):
self._doTest(OneDEmpty)
def testOneD(self):
self._doTest(OneD)
def testTwoD(self):
self._doTest(TwoD)
def testThreeD(self):
self._doTest(ThreeD)
def testFourD(self):
self._doTest(FourD)
def testTwoD1(self):
self._doTest(TwoD1)
def testOneD1(self):
self._doTest(OneD1)
def testOneD2(self):
self._doTest(OneD2)
def testLargeD(self):
self._doTest(LargeD)
if __name__ == "__main__":
try:
util.testmain()
except SystemExit as rc:
if not rc:
raise
|
py | 1a3c64575b6b7172eebde29638d84b19be39d456 | from . import ImageCaptionsDataset, RSICD, Sydney, UCM |
bzl | 1a3c6517e69f96b930f3d55c86fad25fb6094ba0 | """Package variables module.
Package-scoped configuration variable definitions.
"""
PKG_DEBUG_OPT = select({":enable_debug": ["-g"], "//conditions:default": []})
PKG_VERBOSE_OPT = select({":enable_verbose": ["-verbose"], "//conditions:default": []})
PKG_OPTS = PKG_DEBUG_OPT + PKG_VERBOSE_OPT
PKG_PPX_EXECUTABLE_OPTS = PKG_OPTS
PKG_PPX_MODULE_OPTS = PKG_OPTS
PKG_PPX_ARCHIVE_OPTS = PKG_OPTS
PKG_NS_MODULE_OPTS = PKG_OPTS
|
py | 1a3c65a364f1794f66836001a164a2c47f9cc6b8 | # -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""sktime window forecaster base class."""
__author__ = ["@mloning", "@big-o"]
__all__ = ["_BaseWindowForecaster"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.model_selection import CutoffSplitter
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.utils.datetime import _shift
from sktime.utils.validation.forecasting import check_cv
class _BaseWindowForecaster(BaseForecaster):
"""Base class for forecasters that use sliding windows."""
def __init__(self, window_length=None):
super(_BaseWindowForecaster, self).__init__()
self.window_length = window_length
self.window_length_ = None
def update_predict(
self,
y,
cv=None,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Make and update predictions iteratively over the test set.
Parameters
----------
y : pd.Series
cv : temporal cross-validation generator, optional (default=None)
X : pd.DataFrame, optional (default=None)
update_params : bool, optional (default=True)
return_pred_int : bool, optional (default=False)
alpha : int or list of ints, optional (default=None)
Returns
-------
y_pred : pd.Series or pd.DataFrame
"""
if cv is not None:
cv = check_cv(cv)
else:
cv = SlidingWindowSplitter(
self.fh.to_relative(self.cutoff),
window_length=self.window_length_,
start_with_window=False,
)
return self._predict_moving_cutoff(
y,
cv,
X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Predict core logic."""
if return_pred_int:
raise NotImplementedError()
kwargs = {"X": X, "return_pred_int": return_pred_int, "alpha": alpha}
# all values are out-of-sample
if fh.is_all_out_of_sample(self.cutoff):
return self._predict_fixed_cutoff(
fh.to_out_of_sample(self.cutoff), **kwargs
)
# all values are in-sample
elif fh.is_all_in_sample(self.cutoff):
return self._predict_in_sample(fh.to_in_sample(self.cutoff), **kwargs)
# both in-sample and out-of-sample values
else:
y_ins = self._predict_in_sample(fh.to_in_sample(self.cutoff), **kwargs)
y_oos = self._predict_fixed_cutoff(
fh.to_out_of_sample(self.cutoff), **kwargs
)
return y_ins.append(y_oos)
def _predict_fixed_cutoff(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Make single-step or multi-step fixed cutoff predictions.
Parameters
----------
fh : np.array
all positive (> 0)
X : pd.DataFrame
return_pred_int : bool
alpha : float or array-like
Returns
-------
y_pred = pd.Series
"""
# assert all(fh > 0)
y_pred = self._predict_last_window(
fh, X, return_pred_int=return_pred_int, alpha=alpha
)
index = fh.to_absolute(self.cutoff)
return pd.Series(y_pred, index=index)
def _predict_in_sample(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Make in-sample prediction using single-step moving-cutoff predictions.
Parameters
----------
fh : np.array
all non-positive (<= 0)
X : pd.DataFrame
return_pred_int : bool
alpha : float or array-like
Returns
-------
y_pred : pd.DataFrame or pd.Series
"""
y_train = self._y
# generate cutoffs from forecasting horizon, note that cutoffs are
# still based on integer indexes, so that they can be used with .iloc
cutoffs = fh.to_relative(self.cutoff) + len(y_train) - 2
cv = CutoffSplitter(cutoffs, fh=1, window_length=self.window_length_)
return self._predict_moving_cutoff(
y_train,
cv,
X,
update_params=False,
return_pred_int=return_pred_int,
alpha=alpha,
)
def _predict_last_window(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Predict core logic.
Parameters
----------
fh : np.array
X : pd.DataFrame
return_pred_int : bool
alpha : float or list of floats
Returns
-------
y_pred : np.array
"""
raise NotImplementedError("abstract method")
def _get_last_window(self):
"""Select last window."""
# Get the start and end points of the last window.
cutoff = self.cutoff
start = _shift(cutoff, by=-self.window_length_ + 1)
# Get the last window of the endogenous variable.
y = self._y.loc[start:cutoff].to_numpy()
# If X is given, also get the last window of the exogenous variables.
X = self._X.loc[start:cutoff].to_numpy() if self._X is not None else None
return y, X
@staticmethod
def _predict_nan(fh):
"""Predict nan if predictions are not possible."""
return np.full(len(fh), np.nan)
def _update_predict_single(
self,
y,
fh,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Update and make forecasts, core logic..
Implements default behaviour of calling update and predict
sequentially, but can be overwritten by subclasses
to implement more efficient updating algorithms when available.
Parameters
----------
y
fh
X
update_params
return_pred_int
alpha
Returns
-------
predictions
"""
if X is not None:
raise NotImplementedError()
self.update(y, X, update_params=update_params)
return self._predict(fh, X, return_pred_int=return_pred_int, alpha=alpha)
def _format_moving_cutoff_predictions(y_preds, cutoffs):
"""Format moving-cutoff predictions."""
if not isinstance(y_preds, list):
raise ValueError(f"`y_preds` must be a list, but found: {type(y_preds)}")
if len(y_preds[0]) == 1:
# return series for single step ahead predictions
return pd.concat(y_preds)
else:
# return data frame when we predict multiple steps ahead
y_pred = pd.DataFrame(y_preds).T
y_pred.columns = cutoffs
if y_pred.shape[1] == 1:
return y_pred.iloc[:, 0]
return y_pred
|
py | 1a3c65f41997d8cb9a050a4800b3ab9bfd2209fb | import json
import uuid
import factory
from django.conf import settings
from django.db import connection
from django.test import TestCase
from django.utils import timezone
from facility_profile.models import Facility
from morango.controller import MorangoProfileController
from morango.models import (Buffer, DatabaseIDModel, InstanceIDModel,
RecordMaxCounter, RecordMaxCounterBuffer, Store,
SyncSession, TransferSession)
from morango.syncsession import SyncClient
from morango.utils.sync_utils import _dequeue_into_store, _queue_into_buffer
from morango.utils.backends.utils import load_backend
from .helpers import (create_buffer_and_store_dummy_data,
create_dummy_store_data)
DBBackend = load_backend(connection).SQLWrapper()
class FacilityModelFactory(factory.DjangoModelFactory):
class Meta:
model = Facility
name = factory.Sequence(lambda n: "Fac %d" % n)
class QueueStoreIntoBufferTestCase(TestCase):
def setUp(self):
settings.MORANGO_SERIALIZE_BEFORE_QUEUING = False
self.data = create_dummy_store_data()
def assertRecordsBuffered(self, records):
buffer_ids = Buffer.objects.values_list('model_uuid', flat=True)
rmcb_ids = RecordMaxCounterBuffer.objects.values_list('model_uuid', flat=True)
# ensure all store and buffer records are buffered
for i in records:
self.assertIn(i.id, buffer_ids)
self.assertIn(i.id, rmcb_ids)
def assertRecordsNotBuffered(self, records):
buffer_ids = Buffer.objects.values_list('model_uuid', flat=True)
rmcb_ids = RecordMaxCounterBuffer.objects.values_list('model_uuid', flat=True)
# ensure all store and buffer records are buffered
for i in records:
self.assertNotIn(i.id, buffer_ids)
self.assertNotIn(i.id, rmcb_ids)
def test_all_fsics(self):
fsics = {self.data['group1_id'].id: 1, self.data['group2_id'].id: 1}
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure all store and buffer records are buffered
self.assertRecordsBuffered(self.data['group1_c1'])
self.assertRecordsBuffered(self.data['group1_c2'])
self.assertRecordsBuffered(self.data['group2_c1'])
def test_fsic_specific_id(self):
fsics = {self.data['group2_id'].id: 1}
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure only records modified with 2nd instance id are buffered
self.assertRecordsNotBuffered(self.data['group1_c1'])
self.assertRecordsNotBuffered(self.data['group1_c2'])
self.assertRecordsBuffered(self.data['group2_c1'])
def test_fsic_counters(self):
counter = InstanceIDModel.objects.get(id=self.data['group1_id'].id).counter
fsics = {self.data['group1_id'].id: counter - 1}
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
fsics[self.data['group1_id'].id] = 0
self.data['sc'].current_transfer_session.server_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure only records with updated 1st instance id are buffered
self.assertRecordsBuffered(self.data['group1_c1'])
self.assertRecordsBuffered(self.data['group1_c2'])
self.assertRecordsNotBuffered(self.data['group2_c1'])
def test_fsic_counters_too_high(self):
fsics = {self.data['group1_id'].id: 100, self.data['group2_id'].id: 100}
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
self.data['sc'].current_transfer_session.server_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure no records are buffered
self.assertFalse(Buffer.objects.all())
self.assertFalse(RecordMaxCounterBuffer.objects.all())
def test_partition_filter_buffering(self):
fsics = {self.data['group2_id'].id: 1}
filter_prefixes = '{}:user:summary\n{}:user:interaction'.format(self.data['user3'].id, self.data['user3'].id)
self.data['sc'].current_transfer_session.filter = filter_prefixes
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure records with different partition values are buffered
self.assertRecordsNotBuffered([self.data['user2']])
self.assertRecordsBuffered(self.data['user3_sumlogs'])
self.assertRecordsBuffered(self.data['user3_interlogs'])
def test_partition_prefix_buffering(self):
fsics = {self.data['group2_id'].id: 1}
filter_prefixes = '{}'.format(self.data['user2'].id)
self.data['sc'].current_transfer_session.filter = filter_prefixes
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure only records with user2 partition are buffered
self.assertRecordsBuffered([self.data['user2']])
self.assertRecordsBuffered(self.data['user2_sumlogs'])
self.assertRecordsBuffered(self.data['user2_interlogs'])
self.assertRecordsNotBuffered([self.data['user3']])
def test_partition_and_fsic_buffering(self):
filter_prefixes = '{}:user:summary'.format(self.data['user1'].id)
fsics = {self.data['group1_id'].id: 1}
self.data['sc'].current_transfer_session.filter = filter_prefixes
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure records updated with 1st instance id and summarylog partition are buffered
self.assertRecordsBuffered(self.data['user1_sumlogs'])
self.assertRecordsNotBuffered(self.data['user2_sumlogs'])
self.assertRecordsNotBuffered(self.data['user3_sumlogs'])
def test_valid_fsic_but_invalid_partition(self):
filter_prefixes = '{}:user:summary'.format(self.data['user1'].id)
fsics = {self.data['group2_id'].id: 1}
self.data['sc'].current_transfer_session.filter = filter_prefixes
self.data['sc'].current_transfer_session.client_fsic = json.dumps(fsics)
_queue_into_buffer(self.data['sc'].current_transfer_session)
# ensure that record with valid fsic but invalid partition is not buffered
self.assertRecordsNotBuffered([self.data['user4']])
class BufferIntoStoreTestCase(TestCase):
def setUp(self):
settings.MORANGO_DESERIALIZE_AFTER_DEQUEUING = False
self.data = {}
DatabaseIDModel.objects.create()
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
# create controllers for app/store/buffer operations
self.data['mc'] = MorangoProfileController('facilitydata')
self.data['sc'] = SyncClient(None, 'host')
session = SyncSession.objects.create(id=uuid.uuid4().hex, profile="", last_activity_timestamp=timezone.now())
self.data['sc'].current_transfer_session = TransferSession.objects.create(id=uuid.uuid4().hex, sync_session=session, push=True, last_activity_timestamp=timezone.now())
self.data.update(create_buffer_and_store_dummy_data(self.data['sc'].current_transfer_session.id))
def test_dequeuing_delete_rmcb_records(self):
for i in self.data['model1_rmcb_ids']:
self.assertTrue(RecordMaxCounterBuffer.objects.filter(instance_id=i, model_uuid=self.data['model1']).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_rmcb_records(cursor, self.data['sc'].current_transfer_session.id)
for i in self.data['model1_rmcb_ids']:
self.assertFalse(RecordMaxCounterBuffer.objects.filter(instance_id=i, model_uuid=self.data['model1']).exists())
# ensure other records were not deleted
for i in self.data['model2_rmcb_ids']:
self.assertTrue(RecordMaxCounterBuffer.objects.filter(instance_id=i, model_uuid=self.data['model2']).exists())
def test_dequeuing_delete_buffered_records(self):
self.assertTrue(Buffer.objects.filter(model_uuid=self.data['model1']).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_buffered_records(cursor, self.data['sc'].current_transfer_session.id)
self.assertFalse(Buffer.objects.filter(model_uuid=self.data['model1']).exists())
# ensure other records were not deleted
self.assertTrue(Buffer.objects.filter(model_uuid=self.data['model2']).exists())
def test_dequeuing_merge_conflict_rmcb_greater_than_rmc(self):
rmc = RecordMaxCounter.objects.get(instance_id=self.data['model2_rmc_ids'][0], store_model_id=self.data['model2'])
rmcb = RecordMaxCounterBuffer.objects.get(instance_id=self.data['model2_rmc_ids'][0], model_uuid=self.data['model2'])
self.assertNotEqual(rmc.counter, rmcb.counter)
self.assertGreaterEqual(rmcb.counter, rmc.counter)
with connection.cursor() as cursor:
DBBackend._dequeuing_merge_conflict_rmcb(cursor, self.data['sc'].current_transfer_session.id)
rmc = RecordMaxCounter.objects.get(instance_id=self.data['model2_rmc_ids'][0], store_model_id=self.data['model2'])
rmcb = RecordMaxCounterBuffer.objects.get(instance_id=self.data['model2_rmc_ids'][0], model_uuid=self.data['model2'])
self.assertEqual(rmc.counter, rmcb.counter)
def test_dequeuing_merge_conflict_rmcb_less_than_rmc(self):
rmc = RecordMaxCounter.objects.get(instance_id=self.data['model5_rmc_ids'][0], store_model_id=self.data['model5'])
rmcb = RecordMaxCounterBuffer.objects.get(instance_id=self.data['model5_rmc_ids'][0], model_uuid=self.data['model5'])
self.assertNotEqual(rmc.counter, rmcb.counter)
self.assertGreaterEqual(rmc.counter, rmcb.counter)
with connection.cursor() as cursor:
DBBackend._dequeuing_merge_conflict_rmcb(cursor, self.data['sc'].current_transfer_session.id)
rmc = RecordMaxCounter.objects.get(instance_id=self.data['model5_rmc_ids'][0], store_model_id=self.data['model5'])
rmcb = RecordMaxCounterBuffer.objects.get(instance_id=self.data['model5_rmc_ids'][0], model_uuid=self.data['model5'])
self.assertNotEqual(rmc.counter, rmcb.counter)
self.assertGreaterEqual(rmc.counter, rmcb.counter)
def test_dequeuing_merge_conflict_buffer_rmcb_greater_than_rmc(self):
store = Store.objects.get(id=self.data['model2'])
self.assertNotEqual(store.last_saved_instance, self.current_id.id)
self.assertEqual(store.conflicting_serialized_data, "store")
self.assertFalse(store.deleted)
with connection.cursor() as cursor:
current_id = InstanceIDModel.get_current_instance_and_increment_counter()
DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, self.data['sc'].current_transfer_session.id)
store = Store.objects.get(id=self.data['model2'])
self.assertEqual(store.last_saved_instance, current_id.id)
self.assertEqual(store.last_saved_counter, current_id.counter)
self.assertEqual(store.conflicting_serialized_data, "buffer\nstore")
self.assertTrue(store.deleted)
def test_dequeuing_merge_conflict_buffer_rmcb_less_rmc(self):
store = Store.objects.get(id=self.data['model5'])
self.assertNotEqual(store.last_saved_instance, self.current_id.id)
self.assertEqual(store.conflicting_serialized_data, "store")
with connection.cursor() as cursor:
current_id = InstanceIDModel.get_current_instance_and_increment_counter()
DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, self.data['sc'].current_transfer_session.id)
store = Store.objects.get(id=self.data['model5'])
self.assertEqual(store.last_saved_instance, current_id.id)
self.assertEqual(store.last_saved_counter, current_id.counter)
self.assertEqual(store.conflicting_serialized_data, "buffer\nstore")
def test_dequeuing_update_rmcs_last_saved_by(self):
self.assertFalse(RecordMaxCounter.objects.filter(instance_id=self.current_id.id).exists())
with connection.cursor() as cursor:
current_id = InstanceIDModel.get_current_instance_and_increment_counter()
DBBackend._dequeuing_update_rmcs_last_saved_by(cursor, current_id, self.data['sc'].current_transfer_session.id)
self.assertTrue(RecordMaxCounter.objects.filter(instance_id=current_id.id).exists())
def test_dequeuing_delete_mc_buffer(self):
self.assertTrue(Buffer.objects.filter(model_uuid=self.data['model2']).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_mc_buffer(cursor, self.data['sc'].current_transfer_session.id)
self.assertFalse(Buffer.objects.filter(model_uuid=self.data['model2']).exists())
# ensure other records were not deleted
self.assertTrue(Buffer.objects.filter(model_uuid=self.data['model3']).exists())
def test_dequeuing_delete_mc_rmcb(self):
self.assertTrue(RecordMaxCounterBuffer.objects.filter(model_uuid=self.data['model2'], instance_id=self.data['model2_rmcb_ids'][0]).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_mc_rmcb(cursor, self.data['sc'].current_transfer_session.id)
self.assertFalse(RecordMaxCounterBuffer.objects.filter(model_uuid=self.data['model2'], instance_id=self.data['model2_rmcb_ids'][0]).exists())
self.assertTrue(RecordMaxCounterBuffer.objects.filter(model_uuid=self.data['model2'], instance_id=self.data['model2_rmcb_ids'][1]).exists())
# ensure other records were not deleted
self.assertTrue(RecordMaxCounterBuffer.objects.filter(model_uuid=self.data['model3'], instance_id=self.data['model3_rmcb_ids'][0]).exists())
def test_dequeuing_insert_remaining_buffer(self):
self.assertNotEqual(Store.objects.get(id=self.data['model3']).serialized, "buffer")
self.assertFalse(Store.objects.filter(id=self.data['model4']).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_insert_remaining_buffer(cursor, self.data['sc'].current_transfer_session.id)
self.assertEqual(Store.objects.get(id=self.data['model3']).serialized, "buffer")
self.assertTrue(Store.objects.filter(id=self.data['model4']).exists())
def test_dequeuing_insert_remaining_rmcb(self):
for i in self.data['model4_rmcb_ids']:
self.assertFalse(RecordMaxCounter.objects.filter(instance_id=i, store_model_id=self.data['model4']).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_insert_remaining_buffer(cursor, self.data['sc'].current_transfer_session.id)
DBBackend._dequeuing_insert_remaining_rmcb(cursor, self.data['sc'].current_transfer_session.id)
for i in self.data['model4_rmcb_ids']:
self.assertTrue(RecordMaxCounter.objects.filter(instance_id=i, store_model_id=self.data['model4']).exists())
def test_dequeuing_delete_remaining_rmcb(self):
self.assertTrue(RecordMaxCounterBuffer.objects.filter(transfer_session_id=self.data['sc'].current_transfer_session.id).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_remaining_rmcb(cursor, self.data['sc'].current_transfer_session.id)
self.assertFalse(RecordMaxCounterBuffer.objects.filter(transfer_session_id=self.data['sc'].current_transfer_session.id).exists())
def test_dequeuing_delete_remaining_buffer(self):
self.assertTrue(Buffer.objects.filter(transfer_session_id=self.data['sc'].current_transfer_session.id).exists())
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_remaining_buffer(cursor, self.data['sc'].current_transfer_session.id)
self.assertFalse(Buffer.objects.filter(transfer_session_id=self.data['sc'].current_transfer_session.id).exists())
def test_dequeue_into_store(self):
_dequeue_into_store(self.data['sc'].current_transfer_session)
# ensure a record with different transfer session id is not affected
self.assertTrue(Buffer.objects.filter(transfer_session_id=self.data['tfs_id']).exists())
self.assertFalse(Store.objects.filter(id=self.data['model6']).exists())
self.assertFalse(RecordMaxCounter.objects.filter(store_model_id=self.data['model6'], instance_id__in=self.data['model6_rmcb_ids']).exists())
# ensure reverse fast forward records are not modified
self.assertNotEqual(Store.objects.get(id=self.data['model1']).serialized, "buffer")
self.assertFalse(RecordMaxCounter.objects.filter(instance_id=self.data['model1_rmcb_ids'][1]).exists())
# ensure records with merge conflicts are modified
self.assertEqual(Store.objects.get(id=self.data['model2']).conflicting_serialized_data, "buffer\nstore") # conflicting field is overwritten
self.assertEqual(Store.objects.get(id=self.data['model5']).conflicting_serialized_data, "buffer\nstore")
self.assertTrue(RecordMaxCounter.objects.filter(instance_id=self.data['model2_rmcb_ids'][1]).exists())
self.assertTrue(RecordMaxCounter.objects.filter(instance_id=self.data['model5_rmcb_ids'][1]).exists())
self.assertEqual(Store.objects.get(id=self.data['model2']).last_saved_instance, InstanceIDModel.get_or_create_current_instance()[0].id)
self.assertEqual(Store.objects.get(id=self.data['model5']).last_saved_instance, InstanceIDModel.get_or_create_current_instance()[0].id)
# ensure fast forward records are modified
self.assertEqual(Store.objects.get(id=self.data['model3']).serialized, "buffer") # serialized field is overwritten
self.assertTrue(RecordMaxCounter.objects.filter(instance_id=self.data['model3_rmcb_ids'][1]).exists())
self.assertEqual(Store.objects.get(id=self.data['model3']).last_saved_instance, self.data['model3_rmcb_ids'][1]) # last_saved_by is updated
self.assertEqual(RecordMaxCounter.objects.get(instance_id=self.data['model3_rmcb_ids'][0], store_model_id=self.data['model3']).counter, 3)
# ensure all buffer and rmcb records were deleted for this transfer session id
self.assertFalse(Buffer.objects.filter(transfer_session_id=self.data['sc'].current_transfer_session.id).exists())
self.assertFalse(RecordMaxCounterBuffer.objects.filter(transfer_session_id=self.data['sc'].current_transfer_session.id).exists())
|
py | 1a3c6729638d1fdae2324149d11ba075c45770b5 | """
Unbound Zone Feeder feeds zone information to Unbound via the control interface
@copyright: (C) 2019 Gordon BLeux <>
@license: Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
|
py | 1a3c6962c94c76cf7968532083ef38f825b03529 | import logging
import yaml
from .dict_util import deep_dict_merge
from .loader import IncludeLoader
logger = logging.getLogger(__name__)
def load_global_config(global_cfg_paths):
"""Given a list of file paths to global config files, load each of them and
return the joined dictionary.
This does a deep dict merge.
Args:
global_cfg_paths (list(str)): List of filenames to load from
Returns:
dict: joined global configs
"""
global_cfg = {}
if global_cfg_paths:
logger.debug("Loading global config from %s", global_cfg_paths)
for filename in global_cfg_paths:
with open(filename, "r") as gfileobj:
contents = yaml.load(gfileobj, Loader=IncludeLoader)
global_cfg = deep_dict_merge(global_cfg, contents)
return global_cfg
|
py | 1a3c6b52a0d9768ac63c46cc26962d332dd9a919 | """Media Player component to integrate TVs exposing the Joint Space API."""
from __future__ import annotations
from haphilipsjs import ConnectionFailure
from homeassistant.components.media_player import (
BrowseMedia,
MediaPlayerDeviceClass,
MediaPlayerEntity,
)
from homeassistant.components.media_player.const import (
MEDIA_CLASS_APP,
MEDIA_CLASS_CHANNEL,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_APP,
MEDIA_TYPE_APPS,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_CHANNELS,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import LOGGER as _LOGGER, PhilipsTVDataUpdateCoordinator
from .const import DOMAIN
SUPPORT_PHILIPS_JS = (
SUPPORT_TURN_OFF
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_BROWSE_MEDIA
| SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_STOP
)
CONF_ON_ACTION = "turn_on_action"
def _inverted(data):
return {v: k for k, v in data.items()}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the configuration entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
PhilipsTVMediaPlayer(
coordinator,
)
]
)
class PhilipsTVMediaPlayer(CoordinatorEntity, MediaPlayerEntity):
"""Representation of a Philips TV exposing the JointSpace API."""
_coordinator: PhilipsTVDataUpdateCoordinator
_attr_device_class = MediaPlayerDeviceClass.TV
def __init__(
self,
coordinator: PhilipsTVDataUpdateCoordinator,
) -> None:
"""Initialize the Philips TV."""
self._tv = coordinator.api
self._coordinator = coordinator
self._sources = {}
self._channels = {}
self._supports = SUPPORT_PHILIPS_JS
self._system = coordinator.system
self._attr_name = coordinator.system["name"]
self._attr_unique_id = coordinator.unique_id
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, coordinator.unique_id),
},
manufacturer="Philips",
model=coordinator.system.get("model"),
sw_version=coordinator.system.get("softwareversion"),
name=coordinator.system["name"],
)
self._state = STATE_OFF
self._media_content_type: str | None = None
self._media_content_id: str | None = None
self._media_title: str | None = None
self._media_channel: str | None = None
super().__init__(coordinator)
self._update_from_coordinator()
async def _async_update_soon(self):
"""Reschedule update task."""
self.async_write_ha_state()
await self.coordinator.async_request_refresh()
@property
def supported_features(self):
"""Flag media player features that are supported."""
supports = self._supports
if self._coordinator.turn_on or (
self._tv.on and self._tv.powerstate is not None
):
supports |= SUPPORT_TURN_ON
return supports
@property
def state(self):
"""Get the device state. An exception means OFF state."""
if self._tv.on and (self._tv.powerstate == "On" or self._tv.powerstate is None):
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return the current input source."""
return self._sources.get(self._tv.source_id)
@property
def source_list(self):
"""List of available input sources."""
return list(self._sources.values())
async def async_select_source(self, source):
"""Set the input source."""
if source_id := _inverted(self._sources).get(source):
await self._tv.setSource(source_id)
await self._async_update_soon()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._tv.volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._tv.muted
async def async_turn_on(self):
"""Turn on the device."""
if self._tv.on and self._tv.powerstate:
await self._tv.setPowerState("On")
self._state = STATE_ON
else:
await self._coordinator.turn_on.async_run(self.hass, self._context)
await self._async_update_soon()
async def async_turn_off(self):
"""Turn off the device."""
if self._state == STATE_ON:
await self._tv.sendKey("Standby")
self._state = STATE_OFF
await self._async_update_soon()
else:
_LOGGER.debug("Ignoring turn off when already in expected state")
async def async_volume_up(self):
"""Send volume up command."""
await self._tv.sendKey("VolumeUp")
await self._async_update_soon()
async def async_volume_down(self):
"""Send volume down command."""
await self._tv.sendKey("VolumeDown")
await self._async_update_soon()
async def async_mute_volume(self, mute):
"""Send mute command."""
if self._tv.muted != mute:
await self._tv.sendKey("Mute")
await self._async_update_soon()
else:
_LOGGER.debug("Ignoring request when already in expected state")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._tv.setVolume(volume, self._tv.muted)
await self._async_update_soon()
async def async_media_previous_track(self):
"""Send rewind command."""
await self._tv.sendKey("Previous")
await self._async_update_soon()
async def async_media_next_track(self):
"""Send fast forward command."""
await self._tv.sendKey("Next")
await self._async_update_soon()
async def async_media_play_pause(self):
"""Send pause command to media player."""
if self._tv.quirk_playpause_spacebar:
await self._tv.sendUnicode(" ")
else:
await self._tv.sendKey("PlayPause")
await self._async_update_soon()
async def async_media_play(self):
"""Send pause command to media player."""
await self._tv.sendKey("Play")
await self._async_update_soon()
async def async_media_pause(self):
"""Send play command to media player."""
await self._tv.sendKey("Pause")
await self._async_update_soon()
async def async_media_stop(self):
"""Send play command to media player."""
await self._tv.sendKey("Stop")
await self._async_update_soon()
@property
def media_channel(self):
"""Get current channel if it's a channel."""
return self._media_channel
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def media_content_type(self):
"""Return content type of playing media."""
return self._media_content_type
@property
def media_content_id(self):
"""Content type of current playing media."""
return self._media_content_id
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._media_content_id and self._media_content_type in (
MEDIA_TYPE_APP,
MEDIA_TYPE_CHANNEL,
):
return self.get_browse_image_url(
self._media_content_type, self._media_content_id, media_image_id=None
)
return None
@property
def app_id(self):
"""ID of the current running app."""
return self._tv.application_id
@property
def app_name(self):
"""Name of the current running app."""
if app := self._tv.applications.get(self._tv.application_id):
return app.get("label")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Call play media type <%s>, Id <%s>", media_type, media_id)
if media_type == MEDIA_TYPE_CHANNEL:
list_id, _, channel_id = media_id.partition("/")
if channel_id:
await self._tv.setChannel(channel_id, list_id)
await self._async_update_soon()
else:
_LOGGER.error("Unable to find channel <%s>", media_id)
elif media_type == MEDIA_TYPE_APP:
if app := self._tv.applications.get(media_id):
await self._tv.setApplication(app["intent"])
await self._async_update_soon()
else:
_LOGGER.error("Unable to find application <%s>", media_id)
else:
_LOGGER.error("Unsupported media type <%s>", media_type)
async def async_browse_media_channels(self, expanded):
"""Return channel media objects."""
if expanded:
children = [
BrowseMedia(
title=channel.get("name", f"Channel: {channel_id}"),
media_class=MEDIA_CLASS_CHANNEL,
media_content_id=f"alltv/{channel_id}",
media_content_type=MEDIA_TYPE_CHANNEL,
can_play=True,
can_expand=False,
)
for channel_id, channel in self._tv.channels.items()
]
else:
children = None
return BrowseMedia(
title="Channels",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="channels",
media_content_type=MEDIA_TYPE_CHANNELS,
children_media_class=MEDIA_CLASS_CHANNEL,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_favorites(self, list_id, expanded):
"""Return channel media objects."""
if expanded:
favorites = await self._tv.getFavoriteList(list_id)
if favorites:
def get_name(channel):
channel_data = self._tv.channels.get(str(channel["ccid"]))
if channel_data:
return channel_data["name"]
return f"Channel: {channel['ccid']}"
children = [
BrowseMedia(
title=get_name(channel),
media_class=MEDIA_CLASS_CHANNEL,
media_content_id=f"{list_id}/{channel['ccid']}",
media_content_type=MEDIA_TYPE_CHANNEL,
can_play=True,
can_expand=False,
)
for channel in favorites
]
else:
children = None
else:
children = None
favorite = self._tv.favorite_lists[list_id]
return BrowseMedia(
title=favorite.get("name", f"Favorites {list_id}"),
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=f"favorites/{list_id}",
media_content_type=MEDIA_TYPE_CHANNELS,
children_media_class=MEDIA_CLASS_CHANNEL,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_applications(self, expanded):
"""Return application media objects."""
if expanded:
children = [
BrowseMedia(
title=application["label"],
media_class=MEDIA_CLASS_APP,
media_content_id=application_id,
media_content_type=MEDIA_TYPE_APP,
can_play=True,
can_expand=False,
thumbnail=self.get_browse_image_url(
MEDIA_TYPE_APP, application_id, media_image_id=None
),
)
for application_id, application in self._tv.applications.items()
]
else:
children = None
return BrowseMedia(
title="Applications",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="applications",
media_content_type=MEDIA_TYPE_APPS,
children_media_class=MEDIA_CLASS_APP,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_favorite_lists(self, expanded):
"""Return favorite media objects."""
if self._tv.favorite_lists and expanded:
children = [
await self.async_browse_media_favorites(list_id, False)
for list_id in self._tv.favorite_lists
]
else:
children = None
return BrowseMedia(
title="Favorites",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="favorite_lists",
media_content_type=MEDIA_TYPE_CHANNELS,
children_media_class=MEDIA_CLASS_CHANNEL,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_root(self):
"""Return root media objects."""
return BrowseMedia(
title="Library",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="",
media_content_type="",
can_play=False,
can_expand=True,
children=[
await self.async_browse_media_channels(False),
await self.async_browse_media_applications(False),
await self.async_browse_media_favorite_lists(False),
],
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if not self._tv.on:
raise BrowseError("Can't browse when tv is turned off")
if media_content_id in (None, ""):
return await self.async_browse_media_root()
path = media_content_id.partition("/")
if path[0] == "channels":
return await self.async_browse_media_channels(True)
if path[0] == "applications":
return await self.async_browse_media_applications(True)
if path[0] == "favorite_lists":
return await self.async_browse_media_favorite_lists(True)
if path[0] == "favorites":
return await self.async_browse_media_favorites(path[2], True)
raise BrowseError(f"Media not found: {media_content_type} / {media_content_id}")
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[bytes | None, str | None]:
"""Serve album art. Returns (content, content_type)."""
try:
if media_content_type == MEDIA_TYPE_APP and media_content_id:
return await self._tv.getApplicationIcon(media_content_id)
if media_content_type == MEDIA_TYPE_CHANNEL and media_content_id:
return await self._tv.getChannelLogo(media_content_id)
except ConnectionFailure:
_LOGGER.warning("Failed to fetch image")
return None, None
async def async_get_media_image(self):
"""Serve album art. Returns (content, content_type)."""
return await self.async_get_browse_image(
self.media_content_type, self.media_content_id, None
)
@callback
def _update_from_coordinator(self):
if self._tv.on:
if self._tv.powerstate in ("Standby", "StandbyKeep"):
self._state = STATE_OFF
else:
self._state = STATE_ON
else:
self._state = STATE_OFF
self._sources = {
srcid: source.get("name") or f"Source {srcid}"
for srcid, source in (self._tv.sources or {}).items()
}
if self._tv.channel_active:
self._media_content_type = MEDIA_TYPE_CHANNEL
self._media_content_id = f"all/{self._tv.channel_id}"
self._media_title = self._tv.channels.get(self._tv.channel_id, {}).get(
"name"
)
self._media_channel = self._media_title
elif self._tv.application_id:
self._media_content_type = MEDIA_TYPE_APP
self._media_content_id = self._tv.application_id
self._media_title = self._tv.applications.get(
self._tv.application_id, {}
).get("label")
self._media_channel = None
else:
self._media_content_type = None
self._media_content_id = None
self._media_title = self._sources.get(self._tv.source_id)
self._media_channel = None
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_from_coordinator()
super()._handle_coordinator_update()
|
py | 1a3c6bab90f510cb58a029c0a984bcf205726e7c | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""doc
"""
import sys
import time
import datetime
import os
import yaml
import random
import shutil
import six
import warnings
import glob
import numpy as np
def get_last_dir(path):
"""Get the last directory of a path.
"""
if os.path.isfile(path):
# e.g: "../checkpoints/task_name/epoch0_step300/predict.txt"
# return "epoch0_step300"
last_dir = path.split("/")[-2]
elif os.path.isdir(path):
if path[-1] == '/':
# e.g: "../checkpoints/task_name/epoch0_step300/"
last_dir = path.split('/')[-2]
else:
# e.g: "../checkpoints/task_name/epoch0_step300"
last_dir = path.split('/')[-1]
else:
# path or file is not existed
warnings.warn('%s is not a existed file or path' % path)
last_dir = ""
return last_dir
class AttrDict(dict):
def __init__(self, d={}, **kwargs):
if kwargs:
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
# Class attributes
# for k in self.__class__.__dict__.keys():
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
# setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if isinstance(value, (list, tuple)):
value = [
self.__class__(x) if isinstance(x, dict) else x for x in value
]
elif isinstance(value, dict) and not isinstance(value, self.__class__):
value = self.__class__(value)
super(AttrDict, self).__setattr__(name, value)
super(AttrDict, self).__setitem__(name, value)
__setitem__ = __setattr__
def __getattr__(self, attr):
try:
value = super(AttrDict, self).__getitem__(attr)
except KeyError:
# log.warn("%s attribute is not existed, return None" % attr)
warnings.warn("%s attribute is not existed, return None" % attr)
value = None
return value
def update(self, e=None, **f):
d = e or dict()
d.update(f)
for k in d:
setattr(self, k, d[k])
def pop(self, k, d=None):
delattr(self, k)
return super(AttrDict, self).pop(k, d)
def make_dir(path):
"""Build directory"""
if not os.path.exists(path):
os.makedirs(path)
def load_config(config_file):
"""Load config file"""
with open(config_file) as f:
if hasattr(yaml, 'FullLoader'):
config = yaml.load(f, Loader=yaml.FullLoader)
else:
config = yaml.load(f)
return config
def create_necessary_dirs(config, worker_index=None):
"""Create some necessary directories to save some important files.
"""
config.log_dir = os.path.join(config.log_dir, config.task_name)
config.save_dir = os.path.join(config.save_dir, config.task_name)
config.output_dir = os.path.join(config.output_dir, config.task_name)
# if worker_index is None or worker_index == 0:
np.random.seed(worker_index)
time.sleep(np.random.uniform() * 2)
make_dir(config.log_dir)
make_dir(config.save_dir)
make_dir(config.output_dir)
def save_files(config):
"""Save config file so that we can know the config when we look back
"""
filelist = config.files2saved
targetpath = config.log_dir
if filelist is not None:
for file_or_dir in filelist:
if os.path.isdir(file_or_dir):
last_name = get_last_dir(file_or_dir)
dst = os.path.join(targetpath, last_name)
try:
copy_and_overwrite(file_or_dir, dst)
except Exception as e:
print(e)
print("backup %s to %s" % (file_or_dir, targetpath))
else:
for filename in files(files=file_or_dir):
if os.path.isfile(filename):
print("backup %s to %s" % (filename, targetpath))
shutil.copy2(filename, targetpath)
else:
print("%s is not existed." % filename)
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def files(curr_dir='./', files='*.py'):
for i in glob.glob(os.path.join(curr_dir, files)):
yield i
def prepare_config(config_file,
isCreate=False,
isSave=False,
worker_index=None):
if os.path.isfile(config_file):
config = load_config(config_file)
config = AttrDict(config)
else:
raise TypeError("%s is not a yaml file" % config_file)
if isCreate:
create_necessary_dirs(config, worker_index)
if isSave:
if worker_index is None or worker_index == 0:
save_files(config)
return config
|
py | 1a3c6d692f67566aabae3804a2338392e9684072 | """*********************************************************************
* *
* Description: A simple asynchronous http library *
* Date: 12/02/2021 *
* Author: Marcos Vinicios da Silveira *
* *
* *
************************************************************************
"""
import os
import sys
from codecs import open
from setuptools import setup
BASE = os.path.abspath(os.path.dirname(__file__))
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
packages = ['fasthttp', 'tests']
requires = [
"requests==2.22.0",
"colorama==0.4.3",
"aiohttp==3.7.4.post0",
"urllib3==1.25.11",
"dataclasses==0.8" ,
]
test_requirements = []
about = {}
with open(os.path.join(BASE, 'fasthttp', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.md', 'r', 'utf-8') as f:
readme = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=packages,
include_package_data=True,
python_requires=">=3.6",
install_requires=requires,
license=about['__license__'],
tests_require=test_requirements,
)
# end-of-file
|
py | 1a3c6e400d2e3106c93dafae00260fd2f4f5cb92 | # Usage: python demo_receiver.py [dummy|ss|gbn]
import config
import sys
import time
import util
def msg_handler(msg):
print(repr(msg))
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python demo_receiver.py [dummy|ss|gbn|sr]")
sys.exit(1)
transport_layer = None
name = sys.argv[1]
try:
transport_layer = util.get_transport_layer_by_name(
name, config.RECEIVER_LISTEN_PORT, config.SENDER_LISTEN_PORT, msg_handler
)
while True:
time.sleep(1)
finally:
if transport_layer:
transport_layer.shutdown()
|
py | 1a3c6e527408f3e23adcdd9725a9ea417956f50c | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class CredentialTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"credentials": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"credential_list_sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 19 Aug 2015 19:48:45 +0000",
"date_updated": "Wed, 19 Aug 2015 19:48:45 +0000",
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"username": "1440013725.28"
}
],
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json?PageSize=50&Page=0",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json?PageSize=50&Page=0",
"next_page_uri": null,
"num_pages": 1,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"credentials": [],
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json?PageSize=50&Page=0",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json?PageSize=50&Page=0",
"next_page_uri": null,
"num_pages": 1,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials.create(username="username", password="password")
values = {
'Username': "username",
'Password': "password",
}
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"credential_list_sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 19 Aug 2015 19:48:45 +0000",
"date_updated": "Wed, 19 Aug 2015 19:48:45 +0000",
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"username": "1440013725.28"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials.create(username="username", password="password")
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"credential_list_sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 19 Aug 2015 19:48:45 +0000",
"date_updated": "Wed, 19 Aug 2015 19:48:45 +0000",
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"username": "1440013725.28"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"credential_list_sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 19 Aug 2015 19:48:45 +0000",
"date_updated": "Wed, 19 Aug 2015 19:48:45 +0000",
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"username": "1440013725.28"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.sip \
.credential_lists(sid="CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.assertTrue(actual)
|
py | 1a3c6e996172cc3e308e26b5daf22cc6a16628b6 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from six.moves.urllib import parse as urlparse
from webtest.app import AppError
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers.v1 import cluster_template as api_cluster_template
from magnum.common import exception
from magnum.common import policy as magnum_policy
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestClusterTemplateObject(base.TestCase):
def test_cluster_template_init(self):
cluster_template_dict = apiutils.cluster_template_post_data()
del cluster_template_dict['image_id']
del cluster_template_dict['registry_enabled']
del cluster_template_dict['tls_disabled']
del cluster_template_dict['public']
del cluster_template_dict['server_type']
del cluster_template_dict['master_lb_enabled']
del cluster_template_dict['floating_ip_enabled']
cluster_template = api_cluster_template.ClusterTemplate(
**cluster_template_dict)
self.assertEqual(wtypes.Unset, cluster_template.image_id)
self.assertFalse(cluster_template.registry_enabled)
self.assertFalse(cluster_template.tls_disabled)
self.assertFalse(cluster_template.public)
self.assertEqual('vm', cluster_template.server_type)
self.assertFalse(cluster_template.master_lb_enabled)
self.assertTrue(cluster_template.floating_ip_enabled)
class TestListClusterTemplate(api_base.FunctionalTest):
_cluster_template_attrs = ('name', 'apiserver_port', 'network_driver',
'coe', 'flavor_id', 'fixed_network',
'dns_nameserver', 'http_proxy',
'docker_volume_size', 'server_type',
'cluster_distro', 'external_network_id',
'image_id', 'registry_enabled', 'no_proxy',
'keypair_id', 'https_proxy', 'tls_disabled',
'public', 'labels', 'master_flavor_id',
'volume_driver', 'insecure_registry')
def test_empty(self):
response = self.get_json('/clustertemplates')
self.assertEqual([], response['clustertemplates'])
def test_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates')
self.assertEqual(cluster_template.uuid,
response['clustertemplates'][0]["uuid"])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s' %
cluster_template['uuid'])
self.assertEqual(cluster_template.uuid, response['uuid'])
self._verify_attrs(self._cluster_template_attrs, response)
def test_get_one_by_name(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s' %
cluster_template['name'])
self.assertEqual(cluster_template.uuid, response['uuid'])
self._verify_attrs(self._cluster_template_attrs, response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/clustertemplates/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_one_by_uuid(self):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid)
response = self.get_json(
'/clustertemplates/%s' % temp_uuid)
self.assertEqual(temp_uuid, response['uuid'])
def test_get_one_by_uuid_not_found(self):
temp_uuid = uuidutils.generate_uuid()
response = self.get_json(
'/clustertemplates/%s' % temp_uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_one_by_name_multiple_cluster_template(self):
obj_utils.create_test_cluster_template(
self.context, name='test_clustertemplate',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(
self.context, name='test_clustertemplate',
uuid=uuidutils.generate_uuid())
response = self.get_json(
'/clustertemplates/test_clustertemplate',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_all_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template)
response = self.get_json('/clustertemplates?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['clustertemplates']))
self.assertEqual(bm_list[-1].uuid,
response['clustertemplates'][0]['uuid'])
def test_detail(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/detail')
self.assertEqual(cluster_template.uuid,
response['clustertemplates'][0]["uuid"])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_detail_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template)
response = self.get_json('/clustertemplates/detail?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['clustertemplates']))
self.assertEqual(bm_list[-1].uuid,
response['clustertemplates'][0]['uuid'])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_detail_against_single(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s/detail' %
cluster_template['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
bm_list = []
for id_ in range(5):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template.uuid)
response = self.get_json('/clustertemplates')
self.assertEqual(len(bm_list), len(response['clustertemplates']))
uuids = [bm['uuid'] for bm in response['clustertemplates']]
self.assertEqual(sorted(bm_list), sorted(uuids))
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid)
response = self.get_json('/clustertemplates/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'],
bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/clustertemplates/?limit=3')
self.assertEqual(3, len(response['clustertemplates']))
next_marker = response['clustertemplates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/clustertemplates')
self.assertEqual(3, len(response['clustertemplates']))
next_marker = response['clustertemplates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
self.cluster_template = obj_utils.create_test_cluster_template(
self.context,
name='cluster_model_example_A',
image_id='nerdherd',
apiserver_port=8080,
fixed_network='private',
flavor_id='m1.magnum',
master_flavor_id='m1.magnum',
external_network_id='public',
keypair_id='test',
volume_driver='rexray',
public=False,
docker_volume_size=20,
coe='swarm',
labels={'key1': 'val1', 'key2': 'val2'}
)
def test_update_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/clustertemplates/%s' % uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'add'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_update_cluster_template_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(cluster_template.uuid,
response.json['errors'][0]['detail'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_cluster_template_success(self, mock_policy):
mock_policy.return_value = True
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertTrue(response['public'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_cluster_template_fail(self, mock_policy):
mock_policy.return_value = False
self.assertRaises(AppError, self.patch_json,
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
def test_update_cluster_template_with_cluster_allow_update(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/public',
'value': True,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['public'], True)
def test_update_cluster_template_with_cluster_not_allow_update(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/name',
'value': 'new_name',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_singular(self, mock_utcnow):
name = 'cluster_model_example_B'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/name', 'value': name,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(name, response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.cluster_template.uuid, response['uuid'])
self.assertEqual(self.cluster_template.image_id, response['image_id'])
self.assertEqual(self.cluster_template.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.cluster_template.fixed_network,
response['fixed_network'])
self.assertEqual(self.cluster_template.network_driver,
response['network_driver'])
self.assertEqual(self.cluster_template.volume_driver,
response['volume_driver'])
self.assertEqual(self.cluster_template.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.cluster_template.coe,
response['coe'])
self.assertEqual(self.cluster_template.http_proxy,
response['http_proxy'])
self.assertEqual(self.cluster_template.https_proxy,
response['https_proxy'])
self.assertEqual(self.cluster_template.no_proxy,
response['no_proxy'])
self.assertEqual(self.cluster_template.labels,
response['labels'])
def test_replace_cluster_template_with_no_exist_flavor_id(self):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/flavor_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_keypair_id(self):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/keypair_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_external_network_id(self):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/external_network_id',
'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_image_id(self):
self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/image_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_create_cluster_template_with_no_os_distro_image(self):
image_exce = exception.OSDistroFieldNotFound('img')
self.mock_valid_os_res.side_effect = image_exce
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/image_id', 'value': 'img',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_singular(self):
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertIsNotNone(response['dns_nameserver'])
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/dns_nameserver',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertIsNone(response['dns_nameserver'])
# Assert nothing else was changed
self.assertEqual(self.cluster_template.uuid, response['uuid'])
self.assertEqual(self.cluster_template.name, response['name'])
self.assertEqual(self.cluster_template.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.cluster_template.image_id,
response['image_id'])
self.assertEqual(self.cluster_template.fixed_network,
response['fixed_network'])
self.assertEqual(self.cluster_template.network_driver,
response['network_driver'])
self.assertEqual(self.cluster_template.volume_driver,
response['volume_driver'])
self.assertEqual(self.cluster_template.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.cluster_template.coe, response['coe'])
self.assertEqual(self.cluster_template.http_proxy,
response['http_proxy'])
self.assertEqual(self.cluster_template.https_proxy,
response['https_proxy'])
self.assertEqual(self.cluster_template.no_proxy, response['no_proxy'])
self.assertEqual(self.cluster_template.labels, response['labels'])
def test_remove_non_existent_property_fail(self):
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/non-existent',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_mandatory_property_fail(self):
mandatory_properties = ('/image_id', '/coe',
'/external_network_id', '/server_type',
'/tls_disabled', '/public',
'/registry_enabled',
'/cluster_distro', '/network_driver')
for p in mandatory_properties:
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': p, 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_add_root_non_existent(self):
response = self.patch_json(
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_remove_uuid(self):
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_cluster_template(self, mock_utcnow,
mock_image_data):
bdict = apiutils.cluster_template_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/clustertemplates/%s' % bdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_set_project_id_and_user_id(
self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
self.post_json('/clustertemplates', bdict)
cc_mock.assert_called_once_with(mock.ANY)
self.assertEqual(self.context.project_id,
cc_mock.call_args[0][0]['project_id'])
self.assertEqual(self.context.user_id,
cc_mock.call_args[0][0]['user_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_doesnt_contain_id(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(image_id='my-image')
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['image_id'], response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def _create_model_raises_app_error(self, **kwargs):
# Create mock for db and image data
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock,\
mock.patch('magnum.api.attr_validator.validate_image')\
as mock_image_data:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(**kwargs)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
self.assertFalse(cc_mock.called)
def test_create_cluster_template_with_invalid_long_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "labels", "volume_driver"]
for field in fields:
self._create_model_raises_app_error(**{field: 'i' * 256})
def test_create_cluster_template_with_invalid_empty_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "labels", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "volume_driver", "coe"]
for field in fields:
self._create_model_raises_app_error(**{field: ''})
def test_create_cluster_template_with_invalid_coe(self):
self._create_model_raises_app_error(coe='k8s')
self._create_model_raises_app_error(coe='storm')
self._create_model_raises_app_error(coe='meson')
self._create_model_raises_app_error(coe='osomatsu')
def test_create_cluster_template_with_invalid_docker_volume_size(self):
self._create_model_raises_app_error(docker_volume_size=-1)
self._create_model_raises_app_error(
docker_volume_size=1,
docker_storage_driver="devicemapper")
self._create_model_raises_app_error(
docker_volume_size=2,
docker_storage_driver="devicemapper")
self._create_model_raises_app_error(docker_volume_size='notanint')
def test_create_cluster_template_with_invalid_dns_nameserver(self):
self._create_model_raises_app_error(dns_nameserver='1.1.2')
self._create_model_raises_app_error(dns_nameserver='1.1..1')
self._create_model_raises_app_error(dns_nameserver='openstack.org')
def test_create_cluster_template_with_invalid_apiserver_port(self):
self._create_model_raises_app_error(apiserver_port=-12)
self._create_model_raises_app_error(apiserver_port=65536)
self._create_model_raises_app_error(apiserver_port=0)
self._create_model_raises_app_error(apiserver_port=1023)
self._create_model_raises_app_error(apiserver_port='not an int')
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_labels(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(
labels={'key1': 'val1', 'key2': 'val2'})
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['labels'],
response.json['labels'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_docker_volume_size(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(docker_volume_size=99)
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_overlay(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(
docker_volume_size=1, docker_storage_driver="overlay")
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def _test_create_cluster_template_network_driver_attr(
self,
cluster_template_dict,
cluster_template_config_dict,
expect_errors,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
for k, v in cluster_template_config_dict.items():
cfg.CONF.set_override(k, v, 'cluster_template')
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
bdict = apiutils.cluster_template_post_data(
**cluster_template_dict)
response = self.post_json('/clustertemplates', bdict,
expect_errors=expect_errors)
if expect_errors:
self.assertEqual(400, response.status_int)
else:
expected_driver = bdict.get('network_driver')
if not expected_driver:
expected_driver = (
cfg.CONF.cluster_template.swarm_default_network_driver)
self.assertEqual(expected_driver,
response.json['network_driver'])
self.assertEqual(bdict['image_id'],
response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_cluster_template_with_network_driver(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'flannel'}
config_dict = {} # Default config
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_no_network_driver(self):
cluster_template_dict = {}
config_dict = {}
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_network_driver_non_def_config(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'flannel'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'foo']}
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_invalid_network_driver(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'bad_driver'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']}
expect_errors_flag = True
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(volume_driver='rexray')
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_success(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_policy.return_value = True
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=True)
response = self.post_json('/clustertemplates', bdict)
self.assertTrue(response.json['public'])
mock_policy.assert_called_with(mock.ANY,
"clustertemplate:publish",
None, do_raise=False)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_fail(self, mock_policy,
mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
# make policy enforcement fail
mock_policy.return_value = False
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=True)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_not_set(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=False)
response = self.post_json('/clustertemplates', bdict)
self.assertFalse(response.json['public'])
# policy enforcement is called only once for enforce_wsgi
self.assertEqual(1, mock_policy.call_count)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertFalse(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_os_distro_image(self,
mock_image_data):
mock_image_data.side_effect = exception.OSDistroFieldNotFound('img')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_os_distro_image(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_image_name(self,
mock_image_data):
mock_image = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
mock_image_data.return_value = mock_image
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.ResourceNotFound('test-img')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_multi_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.Conflict('Multiple images')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(409, response.status_int)
def test_create_cluster_template_without_image_id(self):
bdict = apiutils.cluster_template_post_data()
del bdict['image_id']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_without_keypair_id(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
del bdict['keypair_id']
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_dns(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['dns_nameserver'],
response.json['dns_nameserver'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_keypair(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_flavor(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['flavor_id'],
response.json['flavor_id'])
self.assertEqual(bdict['master_flavor_id'],
response.json['master_flavor_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_flavor(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_external_network(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['external_network_id'],
response.json['external_network_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_no_exist_external_network(
self, mock_image_data):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_without_name(self, mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
bdict.pop('name')
resp = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, resp.status_int)
self.assertIsNotNone(resp.json['name'])
class TestDelete(api_base.FunctionalTest):
def test_delete_cluster_template(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self.delete('/clustertemplates/%s' % cluster_template.uuid)
response = self.get_json('/clustertemplates/%s' %
cluster_template.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_template_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.delete('/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(cluster_template.uuid,
response.json['errors'][0]['detail'])
def test_delete_cluster_template_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.delete('/clustertemplates/%s' % uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_template_with_name(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.delete('/clustertemplates/%s' %
cluster_template['name'],
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_cluster_template_with_name_not_found(self):
response = self.delete('/clustertemplates/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_multiple_cluster_template_by_name(self):
obj_utils.create_test_cluster_template(self.context,
name='test_cluster_template',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(self.context,
name='test_cluster_template',
uuid=uuidutils.generate_uuid())
response = self.delete('/clustertemplates/test_cluster_template',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"cluster_template:get_all", self.get_json, '/clustertemplates',
expect_errors=True)
def test_policy_disallow_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"cluster_template:get", self.get_json,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"cluster_template:detail", self.get_json,
'/clustertemplates/%s/detail' % uuidutils.generate_uuid(),
expect_errors=True)
def test_policy_disallow_update(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
name='example_A',
uuid=uuidutils.generate_uuid())
self._common_policy_check(
"cluster_template:update", self.patch_json,
'/clustertemplates/%s' % cluster_template.name,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_create(self):
bdict = apiutils.cluster_template_post_data(
name='cluster_model_example_A')
self._common_policy_check(
"cluster_template:create", self.post_json, '/clustertemplates',
bdict, expect_errors=True)
def test_policy_disallow_delete(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"cluster_template:delete", self.delete,
'/clustertemplates/%s' % cluster_template.uuid, expect_errors=True)
def _owner_check(self, rule, func, *args, **kwargs):
self.policy.set_rules({rule: "user_id:%(user_id)s"})
response = func(*args, **kwargs)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_only_owner_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check("cluster_template:get", self.get_json,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
def test_policy_only_owner_update(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check(
"cluster_template:update", self.patch_json,
'/clustertemplates/%s' % cluster_template.uuid,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_only_owner_delete(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check(
"cluster_template:delete", self.delete,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
|
py | 1a3c6ee76d66c8ea7168216cf2a9b046f453729f | # -*- coding: utf-8 -*-
import datetime
from client_1c_timesheet.api import APIServer, APIServer404
from client_1c_timesheet.decorators import request_rate_watchdog
from client_1c_timesheet.models import TimeGroup, Organization, Employee, TimeSheetLine, TimeSheetRecord, TimeSheet
from functools import lru_cache
from typing import List, Dict
class APISession:
"""Models the interaction of one user with one workspace. Caches current user, workspace and projects.
To make basic interactions quicker this class makes two simplifying assumptions:
* All actions pertain to one user, the owner of the api_key
* All actions pertain to only one workspace, the users default workspace
"""
def __init__(self, api_server: APIServer, auth: (str, str) ):
"""
Parameters
----------
api_server: APIServer
Server to use for communication
auth: (str, str)
basic auth: user and pass for 1C server
"""
self.auth = auth
self.api = API1C(api_server=api_server)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_time_groups(self) -> List[TimeGroup]:
return self.api.get_time_groups(auth=self.auth)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_organizations(self) -> List[Organization]:
return self.api.get_organizations(auth=self.auth)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_employees(self) -> List[Employee]:
return self.api.get_employees(auth=self.auth)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_time_sheet_lines(self) -> List[TimeSheetLine]:
return self.api.get_time_sheet_lines(auth=self.auth)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_time_sheets(self) -> List[TimeSheet]:
return self.api.get_time_sheets(auth=self.auth)
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def add_time_sheet(self, time_sheet: TimeSheet) -> TimeSheet:
"""Add the given time sheet document to 1C
Parameters
----------
time_sheet: TimeSheet
The time sheet to add
Returns
-------
TimeSheet
The created time sheet
"""
return self.api.add_time_sheet(auth=self.auth, time_sheet=time_sheet)
'''@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def make_workspace(self, workspace_name: str) -> Workspace:
return self.api.make_workspace(api_key=self.api_key, workspace_name=workspace_name)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_user(self):
return self.api.get_user(api_key=self.api_key)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_users(self, workspace, page_size=200) -> List[User]:
return self.api.get_users(api_key=self.api_key, workspace=workspace, page_size=page_size)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_projects(self, workspace, page_size=200) -> Project:
return self.api.get_projects(api_key=self.api_key, workspace=workspace, page_size=page_size)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_clients(self, workspace, page_size=200):
return self.api.get_clients(api_key=self.api_key, workspace=workspace, page_size=page_size)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_tasks(self, workspace, project, page_size=200):
return self.api.get_tasks(api_key=self.api_key, workspace=workspace,
project=project, page_size=page_size)
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_tags(self, workspace, page_size=200) -> List[Tag]:
return self.api.get_tags(api_key=self.api_key, workspace=workspace, page_size=page_size)
@lru_cache()
def get_projects_with_tasks(self, workspace, page_size=200) -> Dict[Project, List[Task]]:
"""Get all Projects and Tasks for the given workspace, include None if Projects
are not obligatory when entering time entry in Clockify, the same for Tasks. It is
regulated by forceProjects and forceTasks in Workspace respectively
Parameters
----------
workspace: Workspace
Returns
-------
Dict with Projects and Tasks in the workspace
"""
projects = self.get_projects(workspace=workspace, page_size=page_size)
projects_with_tasks = {} if workspace.forceProjects else {None: [None]}
for project in projects:
if workspace.forceTasks:
projects_with_tasks[project] = self.get_tasks(workspace=workspace,
project=project, page_size=page_size)
else:
projects_with_tasks[project] = [None] + self.get_tasks(workspace=workspace,
project=project, page_size=page_size)
return projects_with_tasks
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_time_entries(self, workspace, user, start_datetime, end_datetime, page_size=200):
return self.api.get_time_entries(api_key=self.api_key,
workspace=workspace,
user=user,
start_datetime=start_datetime,
end_datetime=end_datetime,
page_size=page_size)
#ToDo for Local TimeSheet...
@lru_cache()
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def get_time_entries_local(self, workspace, user, start_datetime, end_datetime, page_size=200):
return self.api.get_time_entries(api_key=self.api_key,
workspace=workspace,
user=user,
start_datetime=start_datetime,
end_datetime=end_datetime,
page_size=page_size)
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def add_time_entry_object(self, time_entry: TimeEntry):
"""Add the given time entry to the default workspace
Parameters
----------
time_entry: TimeEntry
The time entry to add
Returns
-------
TimeEntry
The created time entry
"""
return self.api.add_time_entry(api_key=self.api_key,
workspace=self.get_default_workspace(),
time_entry=time_entry)
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def add_time_entry(self, start_time, user=None, end_time=None, description=None, project=None):
"""Add a time entry to default workspace. If no end time is given stopwatch mode is activated.
This will stop any previously running stopwatch
Parameters
----------
start_time: datetime, UTC
Set start of time entry to this
user: User
current user is supposed
end_time: datetime, UTC, optional
Set end of time entry to this. If not given, activate stopwatch mode. Defaults to None
description: str, optional
Description of this time entry. Defaults to None
project: Project, optional
Set the project that this time entry belongs to. Defaults to None
Returns
-------
TimeEntry
The created time entry
"""
time_entry = TimeEntry(obj_id=None,
start=start_time,
description=description,
user=user,
project=project,
end=end_time)
return self.add_time_entry_object(time_entry=time_entry)
@request_rate_watchdog(APIServer.RATE_LIMIT_REQUESTS_PER_SECOND)
def stop_timer(self, stop_time=None):
"""Halt the current timer
Parameters
----------
stop_time: datetime, UTC, optional
Set the end date of the timed entry to this. Defaults to None, meaning time will be set to utcnow()
Returns
-------
TimeEntry:
The entry that was stopped
None:
When there was no timer running
"""
if not stop_time:
stop_time = self.now()
return self.api.set_active_time_entry_end(
api_key=self.api_key,
workspace=self.get_default_workspace(),
user=self.get_user(),
end_time=stop_time
)
@staticmethod
def now():
"""
Returns
-------
datetime.datetime
"""
return datetime.datetime.utcnow()
'''
class API1C:
"""A 1C API in the python world. Returns python objects. Does not know about http requests
Notes
-----
For lower level (http) interactions, see api.APIServer
"""
def __init__(self, api_server: APIServer):
"""
Parameters
----------
api_server: APIServer
Server to use for communication"""
self.api_server = api_server
def get_time_groups(self, auth) -> List[TimeGroup]:
"""Get all time groups for the given account
Parameters
----------
auth: (str, str)
1C basic auth
Returns
-------
List[TimeGroup]"""
response = self.api_server.get(path="Catalog_ВидыИспользованияРабочегоВремени", auth=auth)
return [TimeGroup.init_from_dict(x) for x in response]
def get_organizations(self, auth) -> List[Organization]:
"""Get all organizations for the given account
Parameters
----------
auth: (str, str)
1C basic auth
Returns
-------
List[Organization]"""
response = self.api_server.get(path="Catalog_Организации", auth=auth)
return [Organization.init_from_dict(x) for x in response]
def get_employees(self, auth) -> List[Employee]:
"""Get all employees for the given account
Parameters
----------
auth: (str, str)
1C basic auth
Returns
-------
List[Employee]"""
response = self.api_server.get(path="Catalog_Сотрудники", auth=auth)
return [Employee.init_from_dict(x) for x in response]
def get_time_sheet_lines(self, auth) -> List[TimeSheetLine]:
"""Get all time sheet lines from documents time sheet for the given account
Parameters
----------
auth: (str, str)
1C basic auth
Returns
-------
List[TimeSheetLine]"""
response = self.api_server.get(path="Document_ТабельУчетаРабочегоВремени", auth=auth)
return [TimeSheetLine.init_from_dict(x) for y in response for x in y["ДанныеОВремени"]]
def get_time_sheets(self, auth) -> List[TimeSheet]:
"""Get all time sheet documents for the given account
Parameters
----------
auth: (str, str)
1C basic auth
Returns
-------
List[TimeSheet]"""
response = self.api_server.get(path="Document_ТабельУчетаРабочегоВремени", auth=auth)
return [TimeSheet.init_from_dict(x) for x in response]
def add_time_sheet(self, auth, time_sheet: TimeSheet):
#ToDo apply single get for TimeSheet with the new TimeSheet generated Ref_Key (obj_id)
"""
Parameters
----------
auth: (str, str)
1C basic auth
time_sheet: TimeSheet
the document Time Sheet to add to 1C
Returns
-------
TimeEntry
The created time entry
"""
result = self.api_server.post(
path="Document_ТабельУчетаРабочегоВремени",
auth=auth,
data={k: v for k, v in time_sheet.to_dict().items() if k != "Ref_Key"},
)
return True #TimeSheet.init_from_dict(result) - now result has no Ref_Key for TimeSheetLines.
''' def get_user(self, api_key):
"""Get the user for the given api key
Parameters
----------
api_key: str
Clockify Api key
Returns
-------
User
"""
response = self.api_server.get(path="/user", api_key=api_key)
return User.init_from_dict(response)
def get_users(self, api_key, workspace, page_size) -> List[User]:
"""Get users for the given workspace
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get users in this workspace
page_size: int
Number of records in one response
Returns
-------
List[User]
"""
params = {'page-size': page_size}
response = self.api_server.get(path=f"/workspaces/{workspace.obj_id}/users", api_key=api_key, params=params)
return [User.init_from_dict(x) for x in response]
def make_project(self, api_key: str, project_name: str, client: Client = None,
additional_data: {str:str}=None)-> Project:
"""Post and create in Clockify project using project name with the given api key,
for the given workspace
Parameters
----------
api_key: str
Clockify Api key
project_name: str
The name of the workspace to be created
Returns
-------
Project
"""
response = self.api_server.post(path="/workspaces", api_key=api_key, data={"name": workspace_name})
return Workspace.init_from_dict(response)
def get_projects(self, api_key, workspace, page_size) -> List[Project]:
"""Get all projects for given workspace
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get projects in this workspace
page_size: int
Number of records in one response
Returns
-------
List[Project]
"""
params = {'page-size': page_size}
response = self.api_server.get(
path=f"/workspaces/{workspace.obj_id}/projects", api_key=api_key, params=params
)
return [Project.init_from_dict(x) for x in response]
def get_clients(self, api_key, workspace, page_size) -> List[Client]:
"""Get all clients for given workspace
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get clients in this workspace
page_size: int
Number of records in one response
Returns
-------
List[Client]
"""
params = {'page-size': page_size}
response = self.api_server.get(
path=f"/workspaces/{workspace.obj_id}/clients",
api_key=api_key,
params=params
)
return [Client.init_from_dict(x) for x in response]
def get_tasks(self, api_key, workspace, project, page_size) -> List[Task]:
"""Get all tasks for given project
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
project: Project
Get tasks in this project
page_size: int
Number of records in one response
Returns
-------
List[Task]
"""
params = {'page-size': page_size}
response = self.api_server.get(
path=f"/workspaces/{workspace.obj_id}/projects/{project.obj_id}/tasks",
api_key=api_key,
params=params
)
return [Task.init_from_dict(x) for x in response]
def get_tags(self, api_key, workspace, page_size) -> List[Tag]:
"""Get all tags for given workspace
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get tags in this workspace
page_size: int
Number of records in one response
Returns
-------
List[Tag]
"""
params = {'page-size': page_size}
response = self.api_server.get(
path=f"/workspaces/{workspace.obj_id}/tags",
api_key=api_key,
params=params
)
return [Tag.init_from_dict(x) for x in response]
def substitute_api_id_entities(self, time_entries, users=None, projects_with_tasks: {Project: [Task]}= None,
tags=None) -> List[TimeEntry]:
"""Fill time entries with links to users, projects with tasks and tags instead of simple API_ID entities
Parameters
----------
time_entries : List[TimeEntry]
a list of time entries to work on
users: List[User]
a list of users to set a link to
projects_with_tasks : Dict[Project, List [Task]]
a dict of projects and lists of tasks to set a link to
tags : List[Tag]
a list of tags to set a link to
page_size: int
Number of records in one response
Returns
-------
List[TimeEntry]
"""
if users:
users_dict = {user: user for user in users}
if projects_with_tasks:
projects_dict = {project: project for project in projects_with_tasks.keys()}
tasks_dict = {}
for project in projects_with_tasks.keys():
tasks_dict.update({task: task for task in projects_with_tasks[project]})
if tags:
tags_dict = {tag: tag for tag in tags}
modified_time_entries = []
for time_entry in time_entries:
if users and time_entry.user in users_dict.keys():
time_entry.user = users_dict[time_entry.user]
if projects_with_tasks and time_entry.project in projects_dict.keys():
time_entry.project = projects_dict[time_entry.project]
if projects_with_tasks and time_entry.task in tasks_dict.keys():
time_entry.task = tasks_dict[time_entry.task]
if tags and time_entry.tags:
t_e_tags = []
for tag in time_entry.tags:
if tag.__hash__() in [t_e_t.__hash__() for t_e_t in time_entry.tags]:
t_e_tags.append(tags_dict[tag])
time_entry.tags = t_e_tags
modified_time_entries.append(time_entry)
return modified_time_entries
def get_time_entries(self, api_key: str, workspace: Workspace, user: User,
start_datetime, end_datetime, page_size) -> List[TimeEntry]:
"""Get all time entries for given workspace, user within datetime UTC interval
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get time entries in this workspace
user : User
Get time entries for this user
start_datetime : datetime, UTC
start datetime for query
end_datetime : datetime, UTC
end datetime for query
page_size: int
Number of records in one response
Returns
-------
List[TimeEntry]
"""
params = {'start': ClockifyDatetime(start_datetime).clockify_datetime,
'end': ClockifyDatetime(end_datetime).clockify_datetime,
'page-size': page_size}
response = self.api_server.get(
path=f"/workspaces/{workspace.obj_id}/user/{user.obj_id}/time-entries",
api_key=api_key,
params=params
)
return [TimeEntry.init_from_dict(te) for te in response]
def add_time_entry(self, api_key: str, workspace: Workspace, time_entry: TimeEntry):
"""
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get projects in this workspace
time_entry: TimeEntry
The time entry to add
Returns
-------
TimeEntry
The created time entry
"""
result = self.api_server.post(
path=f"/workspaces/{workspace.obj_id}/time-entries",
api_key=api_key,
data=time_entry.to_dict(),
)
return TimeEntry.init_from_dict(result)
def set_active_time_entry_end(
self, api_key: str, workspace: Workspace, user: User, end_time: datetime
):
"""Set the end time for the currently active entry
Parameters
----------
api_key: str
Clockify Api key
workspace: Workspace
Get projects in this workspace
user: User
The use for which to end the active time entry
end_time: datetime
Set the end time to this
Returns
-------
TimeEntry
The updated time entry, if an active one was found
None
If there was no active time entry (if a stopwatch was not running)
"""
try:
result = self.api_server.patch(
path=f"/workspaces/{workspace.obj_id}/user/{user.obj_id}/time-entries/",
api_key=api_key,
data={"end": str(ClockifyDatetime(end_time))},
)
except APIServer404:
return None
return TimeEntry.init_from_dict(result)'''
|
py | 1a3c6fb3080be9ce6e28e9357981995f57fef5ba | import json
import os
import requests # Install with easy_install or pip install
def get_release(version_tag):
print('Getting release metadata for {version_tag}...'.format(
version_tag=version_tag))
releases = requests.get(
'https://api.github.com/repos/facebook/buck/releases').json()
for data in releases:
if 'tag_name' in data and data['tag_name'] == version_tag:
return data
raise RuntimeError(
'Unable to find release for version {version_tag}!'.format(
version_tag=version_tag))
def upload_release(bottle_file, upload_url, github_token, content_type):
fname = os.path.basename(bottle_file)
upload_url = upload_url.replace('{?name,label}', '?name=') + fname
print('Uploading release to {url}...'.format(url=upload_url))
with open(bottle_file, 'rb') as bottle_bin:
r = requests.post(
upload_url,
auth=('token', github_token),
headers=content_type,
data=bottle_bin)
print(json.dumps(r.json(), indent=2))
|
py | 1a3c70013ce9424f6ce3a3744fc57aec41811e6b | #!/usr/bin/env -S python3 -B
# Copyright (c) 2022 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import xmlrpc.client
IP = '127.0.0.1'
PORT = 9000
if sys.platform == 'linux':
IP = '10.10.10.5'
# Passing in sys.argv[2:] gets rid of the script name and key to the apps register. The remaining
# values in the list are key-value pairs, e.g. [option1, value1, option2, value2, ...]
with xmlrpc.client.ServerProxy('http://' + IP + ':' + str(PORT) + '/', allow_none=True) as proxy:
proxy.start(sys.argv[1], sys.argv[2:])
|
py | 1a3c70ddd0ed125366b0b6ecac088d8c91ffda2f | from django import forms
from common.forms import C2Form
from utilities.forms import ConnectionInfoForm
from xui.cloudendure.cloudendure_admin import CloudEndureManager
class CloudEndureConnectionForm(ConnectionInfoForm):
protocol = forms.ChoiceField(
choices=[('http', 'HTTP'), ('https', 'HTTPS')], label='Protocol')
def __init__(self, *args, **kwargs):
super(CloudEndureConnectionForm, self).__init__(*args, **kwargs)
self.fields["name"].widget = forms.HiddenInput()
self.fields["name"].initial = "CloudEndure Endpoint"
if not self.initial_instance:
self.fields["protocol"].initial = "https"
self.fields["port"].initial = "443"
# Remove connection info fields we do not need in CloudEndure credentials
del self.fields['ssh_key']
del self.fields["use_auth_headers"]
del self.fields["headers"]
# mark all fields as required
for field in list(self.fields.values()):
field.required = True
def save(self, *args, **kwargs):
credentials = super(CloudEndureConnectionForm,
self).save(*args, **kwargs)
return credentials
def clean(self):
ce = CloudEndureManager()
if not ce.verify_connection(
self.cleaned_data.get('protocol'),
self.cleaned_data.get('ip'),
self.cleaned_data.get('username'),
self.cleaned_data.get('port'),
self.cleaned_data.get('password'),
):
raise forms.ValidationError(
"Unable to connect to CloudEndure Management Endpoint using the parameters provided ")
return self.cleaned_data
class CloudEndureProjectSelectForm(C2Form):
cemanager = CloudEndureManager()
my_projects = cemanager.get_all_projects()
# get a tuple of all projects
projects_tuple = [(name, name) for name in iter(my_projects)]
project = forms.ChoiceField(choices=projects_tuple, label='Select Project')
class CloudEndureProjectNameForm(C2Form):
cemanager = CloudEndureManager()
all_clouds = cemanager.get_all_clouds()
cloud = forms.ChoiceField(choices=all_clouds, label='Target Cloud')
project_name = forms.CharField(label="Project Name")
public_key = forms.CharField(label="AWS Access Key")
private_key = forms.CharField(label="AWS Access Key Secret")
class CloudEndureLaunchTypeForm(C2Form):
launch_types = ["TEST", "RECOVERY", "CUTOVER", "DEBUG"]
launch_type = forms.ChoiceField(
choices=[(launch, launch) for launch in launch_types], label='Launch Type')
|
py | 1a3c70ffbb695089d15538f55653b1a2b913ad43 | import math
from datetime import datetime, timedelta
from .data_types import (
Header, FileControl, BatchHeader,
BatchControl, EntryDetail, AddendaRecord
)
class AchFile(object):
"""
This class is what stores the ach data. Its main external methods
are `add_batch` and `render_to_string`.
"""
def __init__(self, file_id_mod, settings):
"""
The file_id_mod should be 'A' for the first of the day, 'B'
for the second and so on.
"""
self.settings = settings
try:
self.header = Header(
settings['immediate_dest'],
settings['immediate_org'], file_id_mod,
settings['immediate_dest_name'], settings['immediate_org_name']
)
except KeyError:
raise Exception(
'Settings require: "immediate_dest", "immediate_org", \
immediate_dest_name", and "immediate_org_name"'
)
self.batches = list()
def add_batch(self, std_ent_cls_code, batch_entries=None,
credits=True, debits=False, eff_ent_date=None,
company_id=None):
"""
Use this to add batches to the file. For valid std_ent_cls_codes see:
http://en.wikipedia.org/wiki/Automated_Clearing_House#SEC_codes
"""
if batch_entries is None:
batch_entries = list()
entry_desc = self.get_entry_desc(std_ent_cls_code)
batch_count = len(self.batches) + 1
if not eff_ent_date:
eff_ent_date = datetime.today() + timedelta(days=1)
if credits and debits:
serv_cls_code = '200'
elif credits:
serv_cls_code = '220'
elif debits:
serv_cls_code = '225'
batch_header = BatchHeader(
serv_cls_code=serv_cls_code,
batch_id=batch_count,
company_id=company_id or self.settings['company_id'],
std_ent_cls_code=std_ent_cls_code,
entry_desc=entry_desc,
desc_date='',
eff_ent_date=eff_ent_date.strftime('%y%m%d'), # YYMMDD
orig_stat_code='1',
orig_dfi_id=self.settings['immediate_dest'][:8],
company_name=self.settings['immediate_org_name']
)
entries = list()
entry_counter = 1
for record in batch_entries:
entry = EntryDetail(std_ent_cls_code)
entry.transaction_code = record.get('type')
entry.recv_dfi_id = record.get('routing_number')
if len(record['routing_number']) < 9:
entry.calc_check_digit()
else:
entry.check_digit = record['routing_number'][8]
entry.dfi_acnt_num = record['account_number']
entry.amount = int(round(float(record['amount']) * 100))
entry.ind_name = record['name'].upper()[:22]
entry.trace_num = self.settings['immediate_dest'][:8] \
+ entry.validate_numeric_field(entry_counter, 7)
entries.append((entry, record.get('addenda', [])))
entry_counter += 1
self.batches.append(FileBatch(batch_header, entries))
self.set_control()
def set_control(self):
batch_count = len(self.batches)
block_count = self.get_block_count(self.batches)
entry_hash = self.get_entry_hash(self.batches)
entadd_count = self.get_entadd_count(self.batches)
debit_amount = self.get_debit_amount(self.batches)
credit_amount = self.get_credit_amount(self.batches)
self.control = FileControl(
batch_count, block_count, entadd_count,
entry_hash, debit_amount, credit_amount
)
def get_block_count(self, batches):
return int(math.ceil(self.get_lines(batches) / 10.0))
def get_lines(self, batches):
header_count = 1
control_count = 1
batch_header_count = len(batches)
batch_footer_count = batch_header_count
entadd_count = self.get_entadd_count(batches)
lines = header_count + control_count + batch_header_count \
+ batch_footer_count + entadd_count
return lines
def get_entadd_count(self, batches):
entadd_count = 0
for batch in batches:
entadd_count = entadd_count + int(batch.batch_control.entadd_count)
return entadd_count
def get_entry_hash(self, batches):
entry_hash = 0
for batch in batches:
entry_hash = entry_hash + int(batch.batch_control.entry_hash)
if len(str(entry_hash)) > 10:
pos = len(str(entry_hash)) - 10
entry_hash = str(entry_hash)[pos:]
else:
entry_hash = str(entry_hash)
return entry_hash
def get_debit_amount(self, batches):
debit_amount = 0
for batch in batches:
debit_amount = debit_amount + int(batch.batch_control.debit_amount)
return debit_amount
def get_credit_amount(self, batches):
credit_amount = 0
for batch in batches:
credit_amount = credit_amount + \
int(batch.batch_control.credit_amount)
return credit_amount
def get_nines(self, rows, line_ending):
nines = ''
for i in range(rows):
nines += '9'*94
if i == rows - 1:
continue
nines += line_ending
return nines
def get_entry_desc(self, std_ent_cls_code):
if std_ent_cls_code == 'PPD':
entry_desc = 'PAYROLL'
elif std_ent_cls_code == 'CCD':
entry_desc = 'DUES'
else:
entry_desc = 'OTHER'
return entry_desc
def render_to_string(self, force_crlf=False):
"""
Renders a nacha file as a string
"""
line_ending = "\n"
if force_crlf:
line_ending = "\r\n"
ret_string = self.header.get_row() + line_ending
for batch in self.batches:
ret_string += batch.render_to_string(force_crlf=force_crlf)
ret_string += self.control.get_row() + line_ending
lines = self.get_lines(self.batches)
nine_lines = int(round(10 * (math.ceil(lines / 10.0) - (lines / 10.0))))
ret_string += self.get_nines(nine_lines, line_ending)
return ret_string
class FileBatch(object):
"""
Holds:
BatchHeader (1)
Entry (n) <-- multiple
BatchControl (1)
"""
def __init__(self, batch_header, entries):
"""
args: batch_header (BatchHeader), entries (List[FileEntry])
"""
entadd_count = 0
self.batch_header = batch_header
self.entries = []
for entry, addenda in entries:
entadd_count += 1
entadd_count += len(addenda)
self.entries.append(FileEntry(entry, addenda))
#set up batch_control
batch_control = BatchControl(self.batch_header.serv_cls_code)
batch_control.entadd_count = entadd_count
batch_control.entry_hash = self.get_entry_hash(self.entries)
batch_control.debit_amount = self.get_debit_amount(self.entries)
batch_control.credit_amount = self.get_credit_amount(self.entries)
batch_control.company_id = self.batch_header.company_id
batch_control.orig_dfi_id = self.batch_header.orig_dfi_id
batch_control.batch_id = self.batch_header.batch_id
self.batch_control = batch_control
def get_entry_hash(self, entries):
entry_hash = 0
for entry in entries:
entry_hash += int(entry.entry_detail.recv_dfi_id[:8])
if len(str(entry_hash)) > 10:
pos = len(str(entry_hash)) - 10
entry_hash = str(entry_hash)[pos:]
else:
entry_hash = str(entry_hash)
return entry_hash
def get_debit_amount(self, entries):
debit_amount = 0
for entry in entries:
if str(entry.entry_detail.transaction_code) in \
['27', '37', '28', '38']:
debit_amount = debit_amount + int(entry.entry_detail.amount)
return debit_amount
def get_credit_amount(self, entries):
credit_amount = 0
for entry in entries:
if str(entry.entry_detail.transaction_code) in \
['22', '32', '23', '33']:
credit_amount += int(entry.entry_detail.amount)
return credit_amount
def render_to_string(self, force_crlf=False):
"""
Renders a nacha file batch to string
"""
line_ending = "\n"
if force_crlf:
line_ending = "\r\n"
ret_string = self.batch_header.get_row() + line_ending
for entry in self.entries:
ret_string += entry.render_to_string(force_crlf=force_crlf)
ret_string += self.batch_control.get_row() + line_ending
return ret_string
class FileEntry(object):
"""
Holds:
EntryDetail (1)
AddendaRecord (n) <-- for some types of entries there can be more than one
"""
def __init__(self, entry_detail, addenda_record=[]):
"""
args: entry_detail( EntryDetail), addenda_record (List[AddendaRecord])
"""
self.entry_detail = entry_detail
self.addenda_record = []
for index, addenda in enumerate(addenda_record):
self.addenda_record.append(
AddendaRecord(
self.entry_detail.std_ent_cls_code,
pmt_rel_info=addenda.get('payment_related_info').upper(),
add_seq_num=index + 1,
ent_det_seq_num=entry_detail.trace_num[-7:]
)
)
if self.addenda_record:
self.entry_detail.add_rec_ind = 1
def render_to_string(self, force_crlf=False):
"""
Renders a nacha batch entry and addenda to string
"""
line_ending = "\n"
if force_crlf:
line_ending = "\r\n"
ret_string = self.entry_detail.get_row() + line_ending
for addenda in self.addenda_record:
ret_string += addenda.get_row() + line_ending
return ret_string
|
py | 1a3c71747f619990caf7df2ec9f61c2174ceb969 | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum, IntEnum
from typing import TYPE_CHECKING, overload
from ...exceptions import EmbedOverflow
from ...utils.api_object import APIObject
from ...utils.conversion import construct_client_dict
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import List, Optional
from ..message.attachment import Attachment
from ..message.component import MessageComponent
from ..message.embed import Embed
from ..message.user_message import UserMessage
from ..message.user_message import AllowedMentions
from ..user.user import User
from ..guild.guild import Guild
from ..guild.channel import Channel
from ...utils.types import APINullable
from ...utils.snowflake import Snowflake
from ...client import Client
class WebhookCompatibility(Enum):
GitHub = "github"
Slack = "slack"
Default = ""
class WebhookType(IntEnum):
"""Represents the type of webhook.
Attributes
----------
INCOMING:
Incoming Webhooks can post messages to channel with a
generated token.
CHANNEL_FOLLOWER:
Channel Follower Webhooks are internal webhooks used with
Channel Following to post new messages into channels.
APPLICATION:
Application webhooks are webhooks used with Interactions
"""
INCOMING = 1
CHANNEL_FOLLOWER = 2
APPLICATION = 3
@dataclass(repr=False)
class Webhook(APIObject):
"""Represents a Discord channel webhook.
Attributes
----------
id: :class:`~pincer.utils.snowflake.Snowflake`
The id of the webhook
type: :class:`~pincer.objects.guild.webhook.WebhookType`
The type of the webhook
channel_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
The channel id this webhook is for, if any
name: Optional[:class:`str`]
The default name of the webhook
avatar: Optional[:class:`str`]
The default user avatar hash of the webhook
application_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
The bot/OAuth2 application that created this webhook
user: APINullable[:class:`~pincer.objects.user.user.User`]
The user this webhook was created by
(not returned when getting a webhook with its token)
token: APINullable[:class:`str`]
The secure token of the webhook
(returned for Incoming Webhooks)
source_guild: APINullable[:class:`~pincer.objects.guild.guild.Guild`]
The guild of the channel that this webhook is following
(returned for Channel Follower Webhooks)
source_channel: APINullable[:class:`~pincer.objects.guild.channel.Channel`]
The channel that this webhook is following
(returned for Channel Follower Webhooks)
url: APINullable[:class:`str`]
The url used for executing the webhook
(returned by the webhooks OAuth2 flow)
guild_id: APINullable[Optional[:class:`~pincer.objects.guild.guild.Guild`]]
The guild id this webhook is for, if any
"""
id: Snowflake
type: WebhookType
channel_id: Optional[Snowflake] = None
name: Optional[str] = None
avatar: Optional[str] = None
application_id: Optional[Snowflake] = None
user: APINullable[User] = MISSING
token: APINullable[str] = MISSING
source_guild: APINullable[Guild] = MISSING
source_channel: APINullable[Channel] = MISSING
url: APINullable[str] = MISSING
guild_id: APINullable[Optional[Snowflake]] = MISSING
async def edit(
self,
*,
name: Optional[str] = None,
avatar: Optional[str] = None,
channel_id: Optional[Snowflake] = None,
token: Optional[str] = None
) -> Webhook:
"""
Modifies a webhook and returns it.
Requires the ``MANAGE_WEBHOOKS`` permission.
Parameters
----------
name: Optional[:class:`str`]
The new name of the webhook
avatar: Optional[:class:`str`]
The new avatar hash of the webhook
channel_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
The new channel id this webhook is for
token: Optional[:class:`str`]
The new token of the webhook
"""
request_route = (
f"webhooks/{self.id}"
+ (f"/{token}" if token else "")
)
request_data = {
"name": name,
"avatar": avatar,
"channel_id": channel_id
}
if token:
del request_data["channel_id"]
data = await self._http.patch(
request_route,
data=request_data
)
return Webhook.from_dict(
construct_client_dict(self._client, data)
)
async def delete(self, token: Optional[str] = None):
"""
Deletes a webhook.
Requires the ``MANAGE_WEBHOOKS`` permission.
Parameters
----------
token: Optional[:class:`str`]
The token of the webhook
"""
await self._http.delete(
f"webhooks/{self.id}"
+ (f"/{token}" if token else "")
)
@overload
async def execute(
self,
webhook_compatibility: WebhookCompatibility = WebhookCompatibility.Default, # noqa: E501
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None,
content: Optional[str] = None,
username: Optional[str] = None,
avatar_url: Optional[str] = None,
tts: Optional[bool] = None,
embeds: Optional[List[Embed]] = None,
allowed_mentions: Optional[AllowedMentions] = None,
components: Optional[List[MessageComponent]] = None,
files: Optional[str] = None, # TODO: Add support for files
payload_json: Optional[str] = None,
attachments: Optional[List[Attachment]] = None
):
"""|coro|
Executes a webhook.
Note that when sending a message, you must provide a value
for at least one of ``content``, ``embeds``, or ``file``.
Parameters
----------
webhook_compatibility: :class:`~pincer.objects.guild.webhook.WebhookCompatibility`
The compatibility of the webhook
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread to send message in
wait: Optional[:class:`bool`]
Waits for server confirmation of message send before
response (defaults to ``true``, when ``false`` a message
that is not saved does not return an error)
content: Optional[:class:`str`]
The message contents (up to 2000 characters)
username: Optional[:class:`str`]
Override the default username of the webhook
avatar_url: Optional[:class:`str`]
Override the default avatar of the webhook
tts: Optional[:class:`bool`]
True if this is a TTS message
embeds: Optional[List[:class:`~pincer.objects.message.embed.Embed`]]
Embedded ``rich`` content, up to 10 embeds
allowed_mentions: Optional[:class:`~pincer.objects.message.user_message.AllowedMentions`]
Allowed mentions for the message
components: Optional[List[:class:`~pincer.objects.message.component.MessageComponent`]]
The components to include in the message
files: Optional[:class:`str`]
The contents of the file being sent
payload_json: Optional[:class:`str`]
JSON encoded body of non-file params
attachments: Optional[List[:class:`~pincer.objects.message.attachment.Attachment`]]
Attachment objects with filename and description
"""
...
async def execute(
self,
webhook_compatibility: WebhookCompatibility = WebhookCompatibility.Default, # noqa: E501
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None,
**kwargs
):
if len(kwargs.get("embeds", [])) > 10:
raise EmbedOverflow("You can only include up to 10 embeds")
request_route = f"webhooks/{self.id}/{self.token}"
# Adding the subdirectory
if webhook_compatibility.value:
request_route += f"/{webhook_compatibility.value}"
# Adding query params
if wait is not None:
request_route += f"?{wait=}"
if thread_id is not None:
request_route += "&?"[wait is None] + f"{thread_id=}"
if webhook_compatibility == WebhookCompatibility.Default:
request_data = kwargs
else:
request_data = None
await self._http.post(request_route, data=request_data)
async def execute_github(
self,
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None
):
"""|coro|
Executes a GitHub compatible webhook.
Parameters
----------
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread to send message in
wait: Optional[:class:`bool`]
Waits for server confirmation of message send before
response (defaults to ``true``, when ``false`` a message
that is not saved does not return an error)
"""
await self.execute(
WebhookCompatibility.GitHub,
thread_id=thread_id,
wait=wait
)
async def execute_slack(
self,
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None
):
"""|coro|
Executes a Slack compatible webhook.
Parameters
----------
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread to send message in
wait: Optional[:class:`bool`]
Waits for server confirmation of message send before
response (defaults to ``true``, when ``false`` a message
that is not saved does not return an error)
"""
await self.execute(
WebhookCompatibility.Slack,
thread_id=thread_id,
wait=wait
)
async def get_message(
self,
message_id: Snowflake,
thread_id: Snowflake
) -> UserMessage:
"""|coro|
Returns a previously-sent webhook message from the same token.
Parameters
----------
message_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the message to get
thread_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the thread to get the message from
Returns
-------
:class:`~pincer.objects.message.message.Message`
The message
"""
return UserMessage.from_dict(
construct_client_dict(
self._client,
await self._http.get(
f"webhooks/{self.id}/{self.token}/messages/{message_id}",
params={"thread_id": thread_id}
)
)
)
async def delete_message(
self,
message_id: Snowflake,
thread_id: Snowflake
):
"""|coro|
Deletes a message created by a webhook.
Parameters
----------
message_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the message to delete
thread_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the thread to delete the message from
"""
await self._http.delete(
f"webhooks/{self.id}/{self.token}/messages/{message_id}"
+ (f"?{thread_id=}" if thread_id else "")
)
@overload
async def edit_message(
self,
message_id: Snowflake,
*,
thread_id: Optional[Snowflake] = None,
content: Optional[str] = None,
embeds: Optional[List[Embed]] = None,
allowed_mentions: Optional[AllowedMentions] = None,
components: Optional[List[MessageComponent]] = None,
files: Optional[str] = None, # TODO: Add support for files
payload_json: Optional[str] = None,
attachments: Optional[List[Attachment]] = None
) -> UserMessage:
"""|coro|
Edits a previously-sent webhook message from the same token.
Parameters
----------
message_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the message to edit
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread the message is in
content: Optional[:class:`str`]
The new content of the message (up to 2000 characters)
embeds: Optional[List[:class:`~pincer.objects.message.embed.Embed`]]
Embedded ``rich`` content, up to 10 embeds
allowed_mentions: Optional[:class:`~pincer.objects.message.user_message.AllowedMentions`]
Allowed mentions for the message
components: Optional[List[:class:`~pincer.objects.message.component.MessageComponent`]]
The components to include in the message
files: Optional[:class:`str`]
The contents of the file being sent/edited
payload_json: Optional[:class:`str`]
JSON encoded body of non-file params
(multipart/form-data only)
attachments: Optional[List[:class:`~pincer.objects.message.attachment.Attachment`]]
Attached files to keep and
possible descriptions for new files
"""
...
async def edit_message(
self,
message_id: Snowflake,
*,
thread_id: Optional[Snowflake] = None,
**kwargs
) -> UserMessage:
if len(kwargs.get("embeds", [])) > 10:
raise EmbedOverflow("You can only include up to 10 embeds")
data = await self._http.patch(
f"webhooks/{self.id}/{self.token}/messages/{message_id}"
+ (f"?{thread_id=}" if thread_id else ""),
data=kwargs
)
return UserMessage.from_dict(
construct_client_dict(self._client, data)
)
@classmethod
async def from_id(
cls,
client: Client,
id: Snowflake,
token: Optional[str] = None
) -> Webhook:
"""|coro|
Gets a webhook by its ID.
Parameters
----------
client: `~pincer.client.Client`
The client to use to make the request.
id: `~pincer.utils.snowflake.Snowflake`
The ID of the webhook to get.
token: Optional[:class:`str`]
The token of the webhook to get.
Returns
-------
`~pincer.objects.guild.webhook.Webhook`
The webhook with the given ID.
"""
return cls.from_dict(
construct_client_dict(
client,
await client.http.get(
f"webhooks/{id}"
+ (f"/{token}" if token else "")
)
)
)
|
py | 1a3c7193f84bbfa2c8101683deb6ea2daf0d452c | # -*- coding: utf-8 -*-
""" pykwalify """
# python stdlib
import logging
import logging.config
import os
__author__ = 'Grokzen <[email protected]>'
__version_info__ = (1, 8, 0)
__version__ = '.'.join(map(str, __version_info__))
log_level_to_string_map = {
5: "DEBUG",
4: "INFO",
3: "WARNING",
2: "ERROR",
1: "CRITICAL",
0: "INFO"
}
def init_logging(log_level):
"""
Init logging settings with default set to INFO
"""
log_level = log_level_to_string_map[min(log_level, 5)]
msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os.environ else "%(levelname)s - %(message)s"
logging_conf = {
"version": 1,
"root": {
"level": log_level,
"handlers": ["console"]
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": log_level,
"formatter": "simple",
"stream": "ext://sys.stdout"
}
},
"formatters": {
"simple": {
"format": " {0}".format(msg)
}
}
}
logging.config.dictConfig(logging_conf)
partial_schemas = {}
|
py | 1a3c71a0d2c22df011f63a7cd4570060dc6f38d7 | # -*- coding:utf-8 -*-
import logging
def en_logging(log_file, log_level):
level = 0
if log_level == "debug":
level = logging.DEBUG
elif log_level == "info":
level = logging.INFO
elif log_level == "warn":
level = logging.WARN
elif log_level == "error":
level = logging.ERROR
elif log_level == "fatal":
level = logging.FATAL
else:
level = logging.INFO
logging.basicConfig(filename=log_file, format='%(asctime)s:%(filename)s:%(lineno)d:%(levelname)s: %(message)s',
filemode='w', level=level)
|
py | 1a3c71dd290d3184be803de1baf601842c0766b5 | # -*- coding: utf-8 -*-
import pytest
import gevent
from raiden.utils import sha3
from raiden.api.python import RaidenAPI
from raiden.messages import (
decode,
Ack,
Ping,
)
from raiden.tests.utils.transport import UnreliableTransport
from raiden.tests.utils.messages import setup_messages_cb
from raiden.tests.utils.transfer import channel
from raiden.tests.fixtures.raiden_network import CHAIN
@pytest.mark.parametrize('number_of_nodes', [2])
def test_ping(raiden_network):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
messages = setup_messages_cb()
ping_message = Ping(nonce=0)
app0.raiden.sign(ping_message)
ping_encoded = ping_message.encode()
async_result = app0.raiden.protocol.send_raw_with_result(
ping_encoded,
app1.raiden.address,
)
assert async_result.wait(2), 'The message was not acknowledged'
expected_echohash = sha3(ping_encoded + app1.raiden.address)
messages_decoded = [decode(m) for m in messages]
ack_message = next(
decoded
for decoded in messages_decoded
if isinstance(decoded, Ack) and decoded.echo == expected_echohash
)
# the ping message was sent and acknowledged
assert ping_encoded in messages
assert ack_message
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('transport_class', [UnreliableTransport])
def test_ping_unreachable(raiden_network):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
# drop everything to force disabling of re-sends
app0.raiden.protocol.transport.droprate = 1
app1.raiden.protocol.transport.droprate = 1
app0.raiden.protocol.retry_interval = 0.1 # for fast tests
messages = setup_messages_cb()
ping_message = Ping(nonce=0)
app0.raiden.sign(ping_message)
ping_encoded = ping_message.encode()
async_result = app0.raiden.protocol.send_raw_with_result(
ping_encoded,
app1.raiden.address,
)
assert async_result.wait(2) is None, "The message was dropped, it can't be acknowledged"
# Raiden node will start pinging as soon as a new channel
# is established. We need to test if
# a) there is our original message in the queue
# b) there are only Ping message types in
messages_decoded = [decode(m) for m in messages]
assert ping_message in messages_decoded
for message in messages_decoded:
assert isinstance(message, Ping)
@pytest.mark.parametrize('deposit', [0])
def test_receive_direct_before_deposit(raiden_network):
"""Regression test that ensures we accept incoming direct transfers, even if we don't have
any back channel balance. """
app0, app1, _ = raiden_network
token_address = app0.raiden.default_registry.token_addresses()[0]
channel_0_1 = channel(app0, app1, token_address)
back_channel = channel(app1, app0, token_address)
assert not channel_0_1.can_transfer
assert not back_channel.can_transfer
deposit_amount = 2
transfer_amount = 1
api0 = RaidenAPI(app0.raiden)
api0.deposit(token_address, app1.raiden.address, deposit_amount)
app0.raiden.chain.next_block()
gevent.sleep(app0.raiden.alarm.wait_time)
assert channel_0_1.can_transfer
assert not back_channel.can_transfer
assert back_channel.distributable == 0
api0.transfer_and_wait(token_address, transfer_amount, app1.raiden.address)
gevent.sleep(app1.raiden.alarm.wait_time)
assert back_channel.can_transfer
assert back_channel.distributable == transfer_amount
@pytest.mark.parametrize('deposit', [0])
@pytest.mark.parametrize('channels_per_node', [CHAIN])
def test_receive_mediated_before_deposit(raiden_network, token_addresses):
"""Regression test that ensures we accept incoming mediated transfers, even if we don't have
any back channel balance. """
app_alice, app_bob, app_charlie = raiden_network
token_address = token_addresses[0]
# path alice -> bob -> charlie
alice_bob = channel(app_alice, app_bob, token_address)
bob_alice = channel(app_bob, app_alice, token_address)
bob_charlie = channel(app_bob, app_charlie, token_address)
charlie_bob = channel(app_charlie, app_bob, token_address)
# ensure alice charlie is mediated
with pytest.raises(KeyError):
channel(app_alice, app_charlie, token_address)
assert not alice_bob.can_transfer
assert not bob_charlie.can_transfer
assert not bob_alice.can_transfer
deposit_amount = 3
RaidenAPI(app_alice.raiden).deposit(
token_address,
app_bob.raiden.address,
deposit_amount,
)
RaidenAPI(app_bob.raiden).deposit(
token_address,
app_charlie.raiden.address,
deposit_amount,
)
# catch up with the Balance events
for app in raiden_network:
app.raiden.poll_blockchain_events()
assert alice_bob.can_transfer
assert bob_charlie.can_transfer
assert not bob_alice.can_transfer
assert alice_bob.distributable == deposit_amount
assert bob_charlie.distributable == deposit_amount
transfer_amount = 1
async_result = app_alice.raiden.mediated_transfer_async(
token_address,
transfer_amount,
app_charlie.raiden.address,
1,
)
assert async_result.wait(10)
# give extra time for the intermediaries to process the secret messages and
# withdraw the tokens
gevent.sleep(1)
assert alice_bob.distributable == deposit_amount - transfer_amount
assert bob_charlie.distributable == deposit_amount - transfer_amount
assert bob_alice.distributable == transfer_amount
assert charlie_bob.distributable == transfer_amount
assert alice_bob.can_transfer
assert bob_alice.can_transfer
assert charlie_bob.can_transfer
|
py | 1a3c73567fbd3780b8d79dd08e6037c852b12992 | # coding=utf-8
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cached tf.Transform analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
# GOOGLE-INITIALIZATION
import apache_beam as beam
from apache_beam.testing import util as beam_test_util
import numpy as np
import six
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import impl_helper
from tensorflow_transform import nodes
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import analysis_graph_builder
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform import test_case
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
def _get_counter_value(metrics, name):
metric = metrics.query(
beam.metrics.metric.MetricsFilter().with_name(name))['counters']
committed = sum([r.committed for r in metric])
attempted = sum([r.attempted for r in metric])
assert committed == attempted, '{} != {}'.format(committed, attempted)
return committed
class _TestPipeline(beam.Pipeline):
@property
def has_ran(self):
return hasattr(self, '_run_result')
@property
def metrics(self):
if not self.has_ran:
raise RuntimeError('Pipeline has to run before accessing its metrics')
return self._run_result.metrics()
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
assert not self.has_ran
self._run_result = self.run()
self._run_result.wait_until_finish()
def _preprocessing_fn_for_common_optimize_traversal(inputs):
_ = tft.vocabulary(inputs['s'])
x = inputs['x']
x_mean = tft.mean(x, name='x')
x_square_deviations = tf.square(x - x_mean)
# 2nd analysis phase defined here.
x_var = tft.mean(x_square_deviations, name='x_square_deviations')
x_normalized = (x - x_mean) / tf.sqrt(x_var)
return {'x_normalized': x_normalized}
_OPTIMIZE_TRAVERSAL_COMMON_CASE = dict(
testcase_name='common',
feature_spec={
'x': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string)
},
preprocessing_fn=_preprocessing_fn_for_common_optimize_traversal,
dataset_input_cache_dict={
b'__v0__CacheableCombineAccumulate[x/mean_and_var]-/Y\xe8\xd6\x1a\xb8OxZ_\xb4\xbes\x17AK&mXg':
'cache hit',
},
expected_dot_graph_str=r"""digraph G {
directed=True;
node [shape=Mrecord];
"CreateSavedModelForAnalyzerInputs[0]" [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('vocabulary/Reshape', \"Tensor\<shape: [None], \<dtype: 'string'\>\>\"), ('x/mean_and_var/Cast', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x/mean_and_var/truediv', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x/mean_and_var/truediv_1', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x/mean_and_var/zeros', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\")])|label: CreateSavedModelForAnalyzerInputs[0]}"];
"ApplySavedModel[0][span-0]" [label="{ApplySavedModel|dataset_key: span-0|phase: 0|label: ApplySavedModel[0][span-0]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-0]";
"TensorSource[vocabulary][span-0]" [label="{ExtractFromDict|keys: ('vocabulary/Reshape',)|label: TensorSource[vocabulary][span-0]|partitionable: True}"];
"ApplySavedModel[0][span-0]" -> "TensorSource[vocabulary][span-0]";
"VocabularyAccumulate[vocabulary][span-0]" [label="{VocabularyAccumulate|vocab_ordering_type: 1|input_dtype: string|label: VocabularyAccumulate[vocabulary][span-0]|partitionable: True}"];
"TensorSource[vocabulary][span-0]" -> "VocabularyAccumulate[vocabulary][span-0]";
"ApplySavedModel[0][span-1]" [label="{ApplySavedModel|dataset_key: span-1|phase: 0|label: ApplySavedModel[0][span-1]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-1]";
"TensorSource[vocabulary][span-1]" [label="{ExtractFromDict|keys: ('vocabulary/Reshape',)|label: TensorSource[vocabulary][span-1]|partitionable: True}"];
"ApplySavedModel[0][span-1]" -> "TensorSource[vocabulary][span-1]";
"VocabularyAccumulate[vocabulary][span-1]" [label="{VocabularyAccumulate|vocab_ordering_type: 1|input_dtype: string|label: VocabularyAccumulate[vocabulary][span-1]|partitionable: True}"];
"TensorSource[vocabulary][span-1]" -> "VocabularyAccumulate[vocabulary][span-1]";
"FlattenCache[VocabularyMerge[vocabulary]]" [label="{Flatten|label: FlattenCache[VocabularyMerge[vocabulary]]|partitionable: True}"];
"VocabularyAccumulate[vocabulary][span-0]" -> "FlattenCache[VocabularyMerge[vocabulary]]";
"VocabularyAccumulate[vocabulary][span-1]" -> "FlattenCache[VocabularyMerge[vocabulary]]";
"VocabularyMerge[vocabulary]" [label="{VocabularyMerge|vocab_ordering_type: 1|use_adjusted_mutual_info: False|min_diff_from_avg: None|label: VocabularyMerge[vocabulary]}"];
"FlattenCache[VocabularyMerge[vocabulary]]" -> "VocabularyMerge[vocabulary]";
"VocabularyOrderAndFilter[vocabulary]" [label="{VocabularyOrderAndFilter|top_k: None|frequency_threshold: None|coverage_top_k: None|coverage_frequency_threshold: None|key_fn: None|label: VocabularyOrderAndFilter[vocabulary]}"];
"VocabularyMerge[vocabulary]" -> "VocabularyOrderAndFilter[vocabulary]";
"VocabularyWrite[vocabulary]" [label="{VocabularyWrite|vocab_filename: vocab_vocabulary|store_frequency: False|input_dtype: string|label: VocabularyWrite[vocabulary]|fingerprint_shuffle: False}"];
"VocabularyOrderAndFilter[vocabulary]" -> "VocabularyWrite[vocabulary]";
"CreateTensorBinding[vocabulary/Placeholder]" [label="{CreateTensorBinding|tensor: vocabulary/Placeholder:0|is_asset_filepath: True|label: CreateTensorBinding[vocabulary/Placeholder]}"];
"VocabularyWrite[vocabulary]" -> "CreateTensorBinding[vocabulary/Placeholder]";
"DecodeCache[span-0][CacheableCombineAccumulate[x/mean_and_var]]" [label="{DecodeCache|dataset_key: span-0|cache_key: \<bytes\>|cache_entry_identifier: CacheableCombineAccumulate[x/mean_and_var]|coder: \<JsonNumpyCacheCoder\>|label: DecodeCache[span-0][CacheableCombineAccumulate[x/mean_and_var]]|partitionable: True}"];
"TensorSource[x/mean_and_var][span-1]" [label="{ExtractFromDict|keys: ('x/mean_and_var/Cast', 'x/mean_and_var/truediv', 'x/mean_and_var/truediv_1', 'x/mean_and_var/zeros')|label: TensorSource[x/mean_and_var][span-1]|partitionable: True}"];
"ApplySavedModel[0][span-1]" -> "TensorSource[x/mean_and_var][span-1]";
"CacheableCombineAccumulate[x/mean_and_var][span-1]" [label="{CacheableCombineAccumulate|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineAccumulate[x/mean_and_var][span-1]|partitionable: True}"];
"TensorSource[x/mean_and_var][span-1]" -> "CacheableCombineAccumulate[x/mean_and_var][span-1]";
"FlattenCache[CacheableCombineMerge[x/mean_and_var]]" [label="{Flatten|label: FlattenCache[CacheableCombineMerge[x/mean_and_var]]|partitionable: True}"];
"DecodeCache[span-0][CacheableCombineAccumulate[x/mean_and_var]]" -> "FlattenCache[CacheableCombineMerge[x/mean_and_var]]";
"CacheableCombineAccumulate[x/mean_and_var][span-1]" -> "FlattenCache[CacheableCombineMerge[x/mean_and_var]]";
"CacheableCombineMerge[x/mean_and_var]" [label="{CacheableCombineMerge|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineMerge[x/mean_and_var]|{<0>0|<1>1}}"];
"FlattenCache[CacheableCombineMerge[x/mean_and_var]]" -> "CacheableCombineMerge[x/mean_and_var]";
"CreateTensorBinding[x/mean_and_var/Placeholder]" [label="{CreateTensorBinding|tensor: x/mean_and_var/Placeholder:0|is_asset_filepath: False|label: CreateTensorBinding[x/mean_and_var/Placeholder]}"];
"CacheableCombineMerge[x/mean_and_var]":0 -> "CreateTensorBinding[x/mean_and_var/Placeholder]";
"CreateTensorBinding[x/mean_and_var/Placeholder_1]" [label="{CreateTensorBinding|tensor: x/mean_and_var/Placeholder_1:0|is_asset_filepath: False|label: CreateTensorBinding[x/mean_and_var/Placeholder_1]}"];
"CacheableCombineMerge[x/mean_and_var]":1 -> "CreateTensorBinding[x/mean_and_var/Placeholder_1]";
"CreateSavedModelForAnalyzerInputs[1]" [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('x_square_deviations/mean_and_var/Cast', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x_square_deviations/mean_and_var/truediv', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x_square_deviations/mean_and_var/truediv_1', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x_square_deviations/mean_and_var/zeros', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\")])|label: CreateSavedModelForAnalyzerInputs[1]}"];
"CreateTensorBinding[vocabulary/Placeholder]" -> "CreateSavedModelForAnalyzerInputs[1]";
"CreateTensorBinding[x/mean_and_var/Placeholder]" -> "CreateSavedModelForAnalyzerInputs[1]";
"CreateTensorBinding[x/mean_and_var/Placeholder_1]" -> "CreateSavedModelForAnalyzerInputs[1]";
"ApplySavedModel[1]" [label="{ApplySavedModel|dataset_key: None|phase: 1|label: ApplySavedModel[1]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[1]" -> "ApplySavedModel[1]";
"TensorSource[x_square_deviations/mean_and_var]" [label="{ExtractFromDict|keys: ('x_square_deviations/mean_and_var/Cast', 'x_square_deviations/mean_and_var/truediv', 'x_square_deviations/mean_and_var/truediv_1', 'x_square_deviations/mean_and_var/zeros')|label: TensorSource[x_square_deviations/mean_and_var]|partitionable: True}"];
"ApplySavedModel[1]" -> "TensorSource[x_square_deviations/mean_and_var]";
"CacheableCombineAccumulate[x_square_deviations/mean_and_var]" [label="{CacheableCombineAccumulate|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineAccumulate[x_square_deviations/mean_and_var]|partitionable: True}"];
"TensorSource[x_square_deviations/mean_and_var]" -> "CacheableCombineAccumulate[x_square_deviations/mean_and_var]";
"CacheableCombineMerge[x_square_deviations/mean_and_var]" [label="{CacheableCombineMerge|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineMerge[x_square_deviations/mean_and_var]|{<0>0|<1>1}}"];
"CacheableCombineAccumulate[x_square_deviations/mean_and_var]" -> "CacheableCombineMerge[x_square_deviations/mean_and_var]";
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]" [label="{CreateTensorBinding|tensor: x_square_deviations/mean_and_var/Placeholder:0|is_asset_filepath: False|label: CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]}"];
"CacheableCombineMerge[x_square_deviations/mean_and_var]":0 -> "CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]";
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]" [label="{CreateTensorBinding|tensor: x_square_deviations/mean_and_var/Placeholder_1:0|is_asset_filepath: False|label: CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]}"];
"CacheableCombineMerge[x_square_deviations/mean_and_var]":1 -> "CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]";
CreateSavedModel [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('x_normalized', \"Tensor\<shape: [None], \<dtype: 'float32'\>\>\")])|label: CreateSavedModel}"];
"CreateTensorBinding[vocabulary/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x/mean_and_var/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x/mean_and_var/Placeholder_1]" -> CreateSavedModel;
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]" -> CreateSavedModel;
"EncodeCache[CacheableCombineAccumulate[x/mean_and_var]][span-1]" [label="{EncodeCache|coder: \<JsonNumpyCacheCoder\>|label: EncodeCache[CacheableCombineAccumulate[x/mean_and_var]][span-1]|partitionable: True}"];
"CacheableCombineAccumulate[x/mean_and_var][span-1]" -> "EncodeCache[CacheableCombineAccumulate[x/mean_and_var]][span-1]";
"EncodeCache[VocabularyAccumulate[vocabulary]][span-0]" [label="{EncodeCache|coder: \<_VocabularyAccumulatorCoder\>|label: EncodeCache[VocabularyAccumulate[vocabulary]][span-0]|partitionable: True}"];
"VocabularyAccumulate[vocabulary][span-0]" -> "EncodeCache[VocabularyAccumulate[vocabulary]][span-0]";
"EncodeCache[VocabularyAccumulate[vocabulary]][span-1]" [label="{EncodeCache|coder: \<_VocabularyAccumulatorCoder\>|label: EncodeCache[VocabularyAccumulate[vocabulary]][span-1]|partitionable: True}"];
"VocabularyAccumulate[vocabulary][span-1]" -> "EncodeCache[VocabularyAccumulate[vocabulary]][span-1]";
}
""")
def _preprocessing_fn_for_generalized_chained_ptransforms(inputs):
class FakeChainablePartitionable(
collections.namedtuple('FakeChainablePartitionable', ['label']),
nodes.OperationDef):
def __new__(cls, label=None):
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return super(FakeChainablePartitionable, cls).__new__(cls, label=label)
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
class FakeChainableCacheable(
collections.namedtuple('FakeChainableCacheable', ['label']),
nodes.OperationDef):
def __new__(cls, label=None):
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return super(FakeChainableCacheable, cls).__new__(cls, label=label)
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
@property
def cache_coder(self):
return 'Not-a-coder-but-thats-ok!'
class FakeChainable(
collections.namedtuple('FakeChainable', ['label']), nodes.OperationDef):
def __new__(cls, label=None):
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return super(FakeChainable, cls).__new__(cls, label=label)
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return False
with tf.compat.v1.name_scope('x'):
input_values_node = nodes.apply_operation(
analyzer_nodes.TensorSource, tensors=[inputs['x']])
with tf.compat.v1.name_scope('partitionable1'):
partitionable_outputs = nodes.apply_multi_output_operation(
FakeChainablePartitionable, input_values_node)
with tf.compat.v1.name_scope('cacheable1'):
intermediate_cached_value_node = nodes.apply_multi_output_operation(
FakeChainableCacheable, *partitionable_outputs)
with tf.compat.v1.name_scope('partitionable2'):
partitionable_outputs = nodes.apply_multi_output_operation(
FakeChainablePartitionable, *intermediate_cached_value_node)
with tf.compat.v1.name_scope('cacheable2'):
cached_value_node = nodes.apply_multi_output_operation(
FakeChainableCacheable, *partitionable_outputs)
with tf.compat.v1.name_scope('partitionable3'):
output_value_node = nodes.apply_multi_output_operation(
FakeChainablePartitionable, *cached_value_node)
with tf.compat.v1.name_scope('merge'):
output_value_node = nodes.apply_operation(FakeChainable,
*output_value_node)
with tf.compat.v1.name_scope('not-cacheable'):
non_cached_output = nodes.apply_operation(FakeChainable,
input_values_node)
x_chained = analyzer_nodes.bind_future_as_tensor(
output_value_node, analyzer_nodes.TensorInfo(tf.float32, (17, 27),
False))
x_plain = analyzer_nodes.bind_future_as_tensor(
non_cached_output, analyzer_nodes.TensorInfo(tf.int64, (7, 13), False))
return {'x_chained': x_chained, 'x_plain': x_plain}
_OPTIMIZE_TRAVERSAL_GENERALIZED_CHAINED_PTRANSFORMS_CASE = dict(
testcase_name='generalized_chained_ptransforms',
feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)},
preprocessing_fn=_preprocessing_fn_for_generalized_chained_ptransforms,
dataset_input_cache_dict=None,
expected_dot_graph_str=r"""digraph G {
directed=True;
node [shape=Mrecord];
"CreateSavedModelForAnalyzerInputs[0]" [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('inputs/x', \"Tensor\<shape: [None], \<dtype: 'float32'\>\>\")])|label: CreateSavedModelForAnalyzerInputs[0]}"];
"ApplySavedModel[0][span-0]" [label="{ApplySavedModel|dataset_key: span-0|phase: 0|label: ApplySavedModel[0][span-0]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-0]";
"TensorSource[x][span-0]" [label="{ExtractFromDict|keys: ('inputs/x',)|label: TensorSource[x][span-0]|partitionable: True}"];
"ApplySavedModel[0][span-0]" -> "TensorSource[x][span-0]";
"FakeChainablePartitionable[x/partitionable1][span-0]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable1][span-0]|partitionable: True}"];
"TensorSource[x][span-0]" -> "FakeChainablePartitionable[x/partitionable1][span-0]";
"FakeChainableCacheable[x/cacheable1][span-0]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable1][span-0]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable1][span-0]" -> "FakeChainableCacheable[x/cacheable1][span-0]";
"FakeChainablePartitionable[x/partitionable2][span-0]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable2][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-0]" -> "FakeChainablePartitionable[x/partitionable2][span-0]";
"FakeChainableCacheable[x/cacheable2][span-0]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable2][span-0]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable2][span-0]" -> "FakeChainableCacheable[x/cacheable2][span-0]";
"FakeChainablePartitionable[x/partitionable3][span-0]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable3][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-0]" -> "FakeChainablePartitionable[x/partitionable3][span-0]";
"ApplySavedModel[0][span-1]" [label="{ApplySavedModel|dataset_key: span-1|phase: 0|label: ApplySavedModel[0][span-1]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-1]";
"TensorSource[x][span-1]" [label="{ExtractFromDict|keys: ('inputs/x',)|label: TensorSource[x][span-1]|partitionable: True}"];
"ApplySavedModel[0][span-1]" -> "TensorSource[x][span-1]";
"FakeChainablePartitionable[x/partitionable1][span-1]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable1][span-1]|partitionable: True}"];
"TensorSource[x][span-1]" -> "FakeChainablePartitionable[x/partitionable1][span-1]";
"FakeChainableCacheable[x/cacheable1][span-1]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable1][span-1]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable1][span-1]" -> "FakeChainableCacheable[x/cacheable1][span-1]";
"FakeChainablePartitionable[x/partitionable2][span-1]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable2][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-1]" -> "FakeChainablePartitionable[x/partitionable2][span-1]";
"FakeChainableCacheable[x/cacheable2][span-1]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable2][span-1]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable2][span-1]" -> "FakeChainableCacheable[x/cacheable2][span-1]";
"FakeChainablePartitionable[x/partitionable3][span-1]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable3][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-1]" -> "FakeChainablePartitionable[x/partitionable3][span-1]";
"FlattenCache[FakeChainable[x/merge]]" [label="{Flatten|label: FlattenCache[FakeChainable[x/merge]]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable3][span-0]" -> "FlattenCache[FakeChainable[x/merge]]";
"FakeChainablePartitionable[x/partitionable3][span-1]" -> "FlattenCache[FakeChainable[x/merge]]";
"FakeChainable[x/merge]" [label="{FakeChainable|label: FakeChainable[x/merge]}"];
"FlattenCache[FakeChainable[x/merge]]" -> "FakeChainable[x/merge]";
"CreateTensorBinding[x/Placeholder]" [label="{CreateTensorBinding|tensor: x/Placeholder:0|is_asset_filepath: False|label: CreateTensorBinding[x/Placeholder]}"];
"FakeChainable[x/merge]" -> "CreateTensorBinding[x/Placeholder]";
"ApplySavedModel[0]" [label="{ApplySavedModel|dataset_key: None|phase: 0|label: ApplySavedModel[0]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0]";
"TensorSource[x]" [label="{ExtractFromDict|keys: ('inputs/x',)|label: TensorSource[x]|partitionable: True}"];
"ApplySavedModel[0]" -> "TensorSource[x]";
"FakeChainable[x/not-cacheable]" [label="{FakeChainable|label: FakeChainable[x/not-cacheable]}"];
"TensorSource[x]" -> "FakeChainable[x/not-cacheable]";
"CreateTensorBinding[x/Placeholder_1]" [label="{CreateTensorBinding|tensor: x/Placeholder_1:0|is_asset_filepath: False|label: CreateTensorBinding[x/Placeholder_1]}"];
"FakeChainable[x/not-cacheable]" -> "CreateTensorBinding[x/Placeholder_1]";
CreateSavedModel [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('x_chained', \"Tensor\<shape: [17, 27], \<dtype: 'float32'\>\>\"), ('x_plain', \"Tensor\<shape: [7, 13], \<dtype: 'int64'\>\>\")])|label: CreateSavedModel}"];
"CreateTensorBinding[x/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x/Placeholder_1]" -> CreateSavedModel;
"EncodeCache[FakeChainableCacheable[x/cacheable1]][span-0]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable1]][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-0]" -> "EncodeCache[FakeChainableCacheable[x/cacheable1]][span-0]";
"EncodeCache[FakeChainableCacheable[x/cacheable1]][span-1]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable1]][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-1]" -> "EncodeCache[FakeChainableCacheable[x/cacheable1]][span-1]";
"EncodeCache[FakeChainableCacheable[x/cacheable2]][span-0]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable2]][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-0]" -> "EncodeCache[FakeChainableCacheable[x/cacheable2]][span-0]";
"EncodeCache[FakeChainableCacheable[x/cacheable2]][span-1]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable2]][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-1]" -> "EncodeCache[FakeChainableCacheable[x/cacheable2]][span-1]";
}
""")
_OPTIMIZE_TRAVERSAL_TEST_CASES = [
_OPTIMIZE_TRAVERSAL_COMMON_CASE,
_OPTIMIZE_TRAVERSAL_GENERALIZED_CHAINED_PTRANSFORMS_CASE,
]
class CachedImplTest(test_case.TransformTestCase):
def setUp(self):
super(CachedImplTest, self).setUp()
self.base_test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._cache_dir = os.path.join(self.base_test_dir, 'cache')
self._context = tft_beam.Context(temp_dir=self.get_temp_dir())
self._context.__enter__()
def tearDown(self):
self._context.__exit__()
def test_single_phase_mixed_analyzer_run_once(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
integerized_s = tft.compute_and_apply_vocabulary(inputs['s'])
_ = tft.bucketize(inputs['x'], 2, name='bucketize')
return {
'integerized_s':
integerized_s,
'x_min':
tft.min(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'x_mean':
tft.mean(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'y_min':
tft.min(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
'y_mean':
tft.mean(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
}
# Run AnalyzeAndTransform on some input data and compare with expected
# output.
input_data = [{'x': 12, 'y': 1, 's': 'd'}, {'x': 10, 'y': 1, 's': 'c'}]
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string),
}))
input_data_dict = {
span_0_key: [{
'x': -2,
'y': 1,
's': 'b',
}, {
'x': 4,
'y': -4,
's': 'b',
}],
span_1_key: input_data,
}
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
cache_dict = {
span_0_key: {
b'__v0__CacheableCombineAccumulate[x_1/mean_and_var]-.\xc4t>ZBv\xea\xa5SU\xf4\x065\xc6\x1c\x81W\xf9\x1b':
p | 'CreateA' >> beam.Create([b'[2.0, 1.0, 9.0, 0.0]']),
b'__v0__CacheableCombineAccumulate[x/x]-\x95\xc5w\x88\x85\x8b5V\xc9\x00\xe0\x0f\x03\x1a\xdaL\x9d\xd5\xb3\xe3':
p | 'CreateB' >> beam.Create([b'[2.0, 4.0]']),
b'__v0__CacheableCombineAccumulate[y_1/mean_and_var]-E^\xb7VZ\xeew4rm\xab\xa3\xa4k|J\x80ck\x16':
p | 'CreateC' >> beam.Create([b'[2.0, -1.5, 6.25, 0.0]']),
b'__v0__CacheableCombineAccumulate[y/y]-\xdf\x1ey\x03\x1c\x96\xd5'
b' e\x9bJ\xa1\xd2\xfc\x9c\x03\x0fM \xdb':
p | 'CreateD' >> beam.Create([b'[4.0, 1.0]']),
},
span_1_key: {},
}
transform_fn, cache_output = (
(flat_data, input_data_dict, cache_dict, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
_ = (cache_output | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir))
transformed_dataset = ((
(input_data_dict[span_1_key], input_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
dot_string = nodes.get_dot_graph([analysis_graph_builder._ANALYSIS_GRAPH
]).to_string()
self.WriteRenderedDotFile(dot_string)
# The output cache should not have entries for the cache that is present
# in the input cache.
self.assertEqual(
len(cache_output[span_0_key]),
len(cache_output[span_1_key]) - 4)
transformed_data, unused_transformed_metadata = transformed_dataset
expected_transformed = [
{
'x_mean': 6.0,
'x_min': -2.0,
'y_mean': -0.25,
'y_min': -4.0,
'integerized_s': 1,
},
{
'x_mean': 6.0,
'x_min': -2.0,
'y_mean': -0.25,
'y_min': -4.0,
'integerized_s': 2,
},
]
beam_test_util.assert_that(transformed_data,
beam_test_util.equal_to(expected_transformed))
transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn')
_ = transform_fn | tft_beam.WriteTransformFn(transform_fn_dir)
# 4 from analyzing 2 spans, and 2 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 6)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 4)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
def test_single_phase_run_twice(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
_ = tft.vocabulary(inputs['s'], vocab_filename='vocab1')
_ = tft.bucketize(inputs['x'], 2, name='bucketize')
return {
'x_min':
tft.min(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'x_mean':
tft.mean(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'y_min':
tft.min(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
'y_mean':
tft.mean(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
's_integerized':
tft.compute_and_apply_vocabulary(
inputs['s'],
labels=inputs['label'],
use_adjusted_mutual_info=True),
}
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
}))
input_data_dict = {
span_0_key: [{
'x': -2,
'y': 1,
's': 'a',
'label': 0,
}, {
'x': 4,
'y': -4,
's': 'a',
'label': 1,
}, {
'x': 5,
'y': 11,
's': 'a',
'label': 1,
}, {
'x': 1,
'y': -4,
's': u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'),
'label': 1,
}],
span_1_key: [{
'x': 12,
'y': 1,
's': u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'),
'label': 0
}, {
'x': 10,
'y': 1,
's': 'c',
'label': 1
}],
}
expected_vocabulary_contents = np.array(
[b'a', u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'), b'c'],
dtype=object)
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
# wrap each value in input_data_dict as a pcoll.
input_data_pcoll_dict = {}
for a, b in six.iteritems(input_data_dict):
input_data_pcoll_dict[a] = p | a >> beam.Create(b)
transform_fn_1, cache_output = (
(flat_data, input_data_pcoll_dict, {}, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
_ = (
cache_output
| 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir))
transformed_dataset = ((
(input_data_pcoll_dict[span_1_key], input_metadata), transform_fn_1)
| 'Transform' >> tft_beam.TransformDataset())
del input_data_pcoll_dict
transformed_data, unused_transformed_metadata = transformed_dataset
expected_transformed_data = [
{
'x_mean': 5.0,
'x_min': -2.0,
'y_mean': 1.0,
'y_min': -4.0,
's_integerized': 0,
},
{
'x_mean': 5.0,
'x_min': -2.0,
'y_mean': 1.0,
'y_min': -4.0,
's_integerized': 2,
},
]
beam_test_util.assert_that(
transformed_data,
beam_test_util.equal_to(expected_transformed_data),
label='first')
transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn_1')
_ = transform_fn_1 | tft_beam.WriteTransformFn(transform_fn_dir)
for key in input_data_dict:
self.assertIn(key, cache_output)
self.assertEqual(7, len(cache_output[key]))
tf_transform_output = tft.TFTransformOutput(transform_fn_dir)
vocab1_path = tf_transform_output.vocabulary_file_by_name('vocab1')
self.AssertVocabularyContents(vocab1_path, expected_vocabulary_contents)
# 4 from analyzing 2 spans, and 2 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 14)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
# wrap each value in input_data_dict as a pcoll.
input_data_pcoll_dict = {}
for a, b in six.iteritems(input_data_dict):
input_data_pcoll_dict[a] = p | a >> beam.Create(b)
input_cache = p | analyzer_cache.ReadAnalysisCacheFromFS(
self._cache_dir, list(input_data_dict.keys()))
transform_fn_2, second_output_cache = (
(flat_data, input_data_pcoll_dict, input_cache, input_metadata)
| 'AnalyzeAgain' >>
(tft_beam.AnalyzeDatasetWithCache(preprocessing_fn)))
_ = (
second_output_cache
| 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir))
dot_string = nodes.get_dot_graph([analysis_graph_builder._ANALYSIS_GRAPH
]).to_string()
self.WriteRenderedDotFile(dot_string)
transformed_dataset = ((
(input_data_dict[span_1_key], input_metadata), transform_fn_2)
| 'TransformAgain' >> tft_beam.TransformDataset())
transformed_data, unused_transformed_metadata = transformed_dataset
beam_test_util.assert_that(
transformed_data,
beam_test_util.equal_to(expected_transformed_data),
label='second')
transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn_2')
_ = transform_fn_2 | tft_beam.WriteTransformFn(transform_fn_dir)
tf_transform_output = tft.TFTransformOutput(transform_fn_dir)
vocab1_path = tf_transform_output.vocabulary_file_by_name('vocab1')
self.AssertVocabularyContents(vocab1_path, expected_vocabulary_contents)
self.assertFalse(second_output_cache)
# Only 2 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 2)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 14)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 0)
# The root CreateSavedModel is optimized away because the data doesn't get
# processed at all (only cache).
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 1)
def test_caching_vocab_for_integer_categorical(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
return {
'x_vocab':
tft.compute_and_apply_vocabulary(
inputs['x'], frequency_threshold=2)
}
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.FixedLenFeature([], tf.int64),
}))
input_data_dict = {
span_0_key: [{
'x': -2,
}, {
'x': -4,
}, {
'x': -1,
}, {
'x': 4,
}],
span_1_key: [{
'x': -2,
}, {
'x': -1,
}, {
'x': 6,
}, {
'x': 7,
}],
}
expected_transformed_data = [{
'x_vocab': 0,
}, {
'x_vocab': 1,
}, {
'x_vocab': -1,
}, {
'x_vocab': -1,
}]
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
cache_dict = {
span_0_key: {
b'__v0__VocabularyAccumulate[compute_and_apply_vocabulary/vocabulary]-\x05e\xfe4\x03H.P\xb5\xcb\xd22\xe3\x16\x15\xf8\xf5\xe38\xd9':
p | 'CreateB' >> beam.Create(
[b'[-2, 2]', b'[-4, 1]', b'[-1, 1]', b'[4, 1]']),
},
span_1_key: {},
}
transform_fn, cache_output = (
(flat_data, input_data_dict, cache_dict, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
dot_string = nodes.get_dot_graph(
[analysis_graph_builder._ANALYSIS_GRAPH]).to_string()
self.WriteRenderedDotFile(dot_string)
self.assertNotIn(span_0_key, cache_output)
_ = cache_output | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir)
transformed_dataset = ((
(input_data_dict[span_1_key], input_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
transformed_data, _ = transformed_dataset
beam_test_util.assert_that(
transformed_data,
beam_test_util.equal_to(expected_transformed_data),
label='first')
# 4 from analysis since 1 span was completely cached, and 4 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 1)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 1)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
def test_non_frequency_vocabulary_merge(self):
"""This test compares vocabularies produced with and without cache."""
mi_vocab_name = 'mutual_information_vocab'
adjusted_mi_vocab_name = 'adjusted_mutual_information_vocab'
weighted_frequency_vocab_name = 'weighted_frequency_vocab'
def preprocessing_fn(inputs):
_ = tft.vocabulary(
inputs['s'],
labels=inputs['label'],
store_frequency=True,
vocab_filename=mi_vocab_name,
min_diff_from_avg=0.1,
use_adjusted_mutual_info=False)
_ = tft.vocabulary(
inputs['s'],
labels=inputs['label'],
store_frequency=True,
vocab_filename=adjusted_mi_vocab_name,
min_diff_from_avg=1.0,
use_adjusted_mutual_info=True)
_ = tft.vocabulary(
inputs['s'],
weights=inputs['weight'],
store_frequency=True,
vocab_filename=weighted_frequency_vocab_name,
use_adjusted_mutual_info=False)
return inputs
span_0_key = 'span-0'
span_1_key = 'span-1'
input_data = [
dict(s='a', weight=1, label=1),
dict(s='a', weight=0.5, label=1),
dict(s='b', weight=0.75, label=1),
dict(s='b', weight=1, label=0),
]
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
's': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'weight': tf.io.FixedLenFeature([], tf.float32),
}))
input_data_dict = {
span_0_key: input_data,
span_1_key: input_data,
}
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
# wrap each value in input_data_dict as a pcoll.
input_data_pcoll_dict = {}
for a, b in six.iteritems(input_data_dict):
input_data_pcoll_dict[a] = p | a >> beam.Create(b)
transform_fn_with_cache, output_cache = (
(flat_data, input_data_pcoll_dict, {}, input_metadata)
| tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
transform_fn_with_cache_dir = os.path.join(self.base_test_dir,
'transform_fn_with_cache')
_ = transform_fn_with_cache | tft_beam.WriteTransformFn(
transform_fn_with_cache_dir)
expected_accumulators = {
b'__v0__VocabularyAccumulate[vocabulary]-<GhZ\xac\xb8\xa9\x8c\xce\x1c\xb2-ck\xca\xe8\xec\t%\x8f':
[
b'["a", [2, [0.0, 1.0], [0.0, 0.0], 1.0]]',
b'["b", [2, [0.5, 0.5], [0.0, 0.0], 1.0]]',
b'["global_y_count_sentinel", [4, [0.25, 0.75], [0.0, 0.0], '
b'1.0]]'
],
b'__v0__VocabularyAccumulate[vocabulary_1]-\xa6\xae\nd\xe3\xd1\x9f\xa0\xe2\xb4\x05j\xa5\xfd\x8c\xfaeN\xd1\x1f':
[
b'["a", [2, [0.0, 1.0], [0.0, 0.0], 1.0]]',
b'["b", [2, [0.5, 0.5], [0.0, 0.0], 1.0]]',
b'["global_y_count_sentinel", [4, [0.25, 0.75], [0.0, 0.0], '
b'1.0]]'
],
b"__v0__VocabularyAccumulate[vocabulary_2]-\x97\x1c>\x851\x94'\xdc\xdf\xfd\xcc\x86\xb7\xb8\xe1\xe8*\x89B\t":
[b'["a", 1.5]', b'["b", 1.75]'],
}
spans = [span_0_key, span_1_key]
self.assertCountEqual(output_cache.keys(), spans)
for span in spans:
self.assertCountEqual(output_cache[span].keys(),
expected_accumulators.keys())
for idx, (key,
value) in enumerate(six.iteritems(expected_accumulators)):
beam_test_util.assert_that(
output_cache[span][key],
beam_test_util.equal_to(value),
label='AssertCache[{}][{}]'.format(span, idx))
# 4 from analysis on each of the input spans.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 6)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(input_data * 2)
transform_fn_no_cache = ((flat_data, input_metadata)
| tft_beam.AnalyzeDataset(preprocessing_fn))
transform_fn_no_cache_dir = os.path.join(self.base_test_dir,
'transform_fn_no_cache')
_ = transform_fn_no_cache | tft_beam.WriteTransformFn(
transform_fn_no_cache_dir)
# 4 from analysis on each of the input spans.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
tft_output_cache = tft.TFTransformOutput(transform_fn_with_cache_dir)
tft_output_no_cache = tft.TFTransformOutput(transform_fn_no_cache_dir)
for vocab_filename in (mi_vocab_name, adjusted_mi_vocab_name,
weighted_frequency_vocab_name):
cache_path = tft_output_cache.vocabulary_file_by_name(vocab_filename)
no_cache_path = tft_output_no_cache.vocabulary_file_by_name(
vocab_filename)
with tf.io.gfile.GFile(cache_path, 'rb') as f1, tf.io.gfile.GFile(
no_cache_path, 'rb') as f2:
self.assertEqual(
f1.readlines(), f2.readlines(),
'vocab with cache != vocab without cache for: {}'.format(
vocab_filename))
@test_case.named_parameters(*_OPTIMIZE_TRAVERSAL_TEST_CASES)
def test_optimize_traversal(self, feature_spec, preprocessing_fn,
dataset_input_cache_dict, expected_dot_graph_str):
span_0_key, span_1_key = 'span-0', 'span-1'
if dataset_input_cache_dict is not None:
cache = {span_0_key: dataset_input_cache_dict}
else:
cache = {}
with tf.compat.v1.name_scope('inputs'):
input_signature = impl_helper.feature_spec_as_batched_placeholders(
feature_spec)
output_signature = preprocessing_fn(input_signature)
transform_fn_future, cache_output_dict = analysis_graph_builder.build(
tf.compat.v1.get_default_graph(), input_signature, output_signature,
{span_0_key, span_1_key}, cache)
leaf_nodes = [transform_fn_future] + sorted(
cache_output_dict.values(), key=str)
dot_string = nodes.get_dot_graph(leaf_nodes).to_string()
self.WriteRenderedDotFile(dot_string)
self.assertSameElements(
dot_string.split('\n'),
expected_dot_graph_str.split('\n'),
msg='Result dot graph is:\n{}\nCache output dict keys are: {}'.format(
dot_string, cache_output_dict.keys()))
def test_no_data_needed(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
return {k: tf.identity(v) for k, v in six.iteritems(inputs)}
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
}))
input_data_dict = {
span_0_key: None,
span_1_key: None,
}
with _TestPipeline() as p:
flat_data = None
cache_dict = {
span_0_key: {},
span_1_key: {},
}
_, output_cache = (
(flat_data, input_data_dict, cache_dict, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(
preprocessing_fn, pipeline=p))
self.assertFalse(output_cache)
if __name__ == '__main__':
# TODO(b/133440043): Remove this once TFT supports eager execution.
tf.compat.v1.disable_eager_execution()
test_case.main()
|
py | 1a3c737ad48174233f0ce34314dbd2d831b06fcc | set_name(0x80135544, "PresOnlyTestRoutine__Fv", SN_NOWARN)
set_name(0x8013556C, "FeInitBuffer__Fv", SN_NOWARN)
set_name(0x80135594, "FeAddEntry__Fii8TXT_JUSTUsP7FeTableP5CFont", SN_NOWARN)
set_name(0x80135608, "FeAddTable__FP11FeMenuTablei", SN_NOWARN)
set_name(0x80135684, "FeAddNameTable__FPUci", SN_NOWARN)
set_name(0x801357B4, "FeDrawBuffer__Fv", SN_NOWARN)
set_name(0x80135BC8, "FeNewMenu__FP7FeTable", SN_NOWARN)
set_name(0x80135C48, "FePrevMenu__Fv", SN_NOWARN)
set_name(0x80135D04, "FeSelUp__Fi", SN_NOWARN)
set_name(0x80135DEC, "FeSelDown__Fi", SN_NOWARN)
set_name(0x80135ED0, "FeGetCursor__Fv", SN_NOWARN)
set_name(0x80135EE4, "FeSelect__Fv", SN_NOWARN)
set_name(0x80135F28, "FeMainKeyCtrl__FP7CScreen", SN_NOWARN)
set_name(0x801360D4, "InitDummyMenu__Fv", SN_NOWARN)
set_name(0x801360DC, "InitFrontEnd__FP9FE_CREATE", SN_NOWARN)
set_name(0x8013619C, "FeInitMainMenu__Fv", SN_NOWARN)
set_name(0x801361FC, "FeInitNewGameMenu__Fv", SN_NOWARN)
set_name(0x8013624C, "FeNewGameMenuCtrl__Fv", SN_NOWARN)
set_name(0x80136380, "FeInitPlayer1ClassMenu__Fv", SN_NOWARN)
set_name(0x801363F4, "FeInitPlayer2ClassMenu__Fv", SN_NOWARN)
set_name(0x80136468, "FePlayerClassMenuCtrl__Fv", SN_NOWARN)
set_name(0x801364B0, "FeDrawChrClass__Fv", SN_NOWARN)
set_name(0x8013694C, "FeInitNewP1NameMenu__Fv", SN_NOWARN)
set_name(0x80136994, "FeInitNewP2NameMenu__Fv", SN_NOWARN)
set_name(0x801369DC, "FeNewNameMenuCtrl__Fv", SN_NOWARN)
set_name(0x80136F6C, "FeCopyPlayerInfoForReturn__Fv", SN_NOWARN)
set_name(0x8013703C, "FeEnterGame__Fv", SN_NOWARN)
set_name(0x80137064, "FeInitLoadMemcardSelect__Fv", SN_NOWARN)
set_name(0x801370CC, "FeInitLoadChar1Menu__Fv", SN_NOWARN)
set_name(0x80137138, "FeInitLoadChar2Menu__Fv", SN_NOWARN)
set_name(0x801371A4, "FeInitDifficultyMenu__Fv", SN_NOWARN)
set_name(0x801371EC, "FeDifficultyMenuCtrl__Fv", SN_NOWARN)
set_name(0x801372A4, "FeInitBackgroundMenu__Fv", SN_NOWARN)
set_name(0x801372EC, "FeInitBook1Menu__Fv", SN_NOWARN)
set_name(0x80137338, "FeInitBook2Menu__Fv", SN_NOWARN)
set_name(0x80137384, "FeBackBookMenuCtrl__Fv", SN_NOWARN)
set_name(0x80137580, "PlayDemo__Fv", SN_NOWARN)
set_name(0x80137594, "FadeFEOut__Fv", SN_NOWARN)
set_name(0x80137658, "DrawBackTSK__FP4TASK", SN_NOWARN)
set_name(0x80137750, "FrontEndTask__FP4TASK", SN_NOWARN)
set_name(0x80137AC8, "McMainCharKeyCtrl__Fv", SN_NOWARN)
set_name(0x80137ED0, "DrawFeTwinkle__Fii", SN_NOWARN)
set_name(0x80137F90, "___6Dialog", SN_NOWARN)
set_name(0x80137FB8, "__6Dialog", SN_NOWARN)
set_name(0x80138014, "___7CScreen", SN_NOWARN)
set_name(0x80138034, "CheckActive__4CPad", SN_NOWARN)
set_name(0x80138BE4, "InitCredits__Fv", SN_NOWARN)
set_name(0x80138C20, "PrintCredits__FPciiiii", SN_NOWARN)
set_name(0x8013944C, "DrawCreditsTitle__Fiiiii", SN_NOWARN)
set_name(0x80139518, "DrawCreditsSubTitle__Fiiiii", SN_NOWARN)
set_name(0x801395F4, "DoCredits__Fv", SN_NOWARN)
set_name(0x80139878, "PRIM_GetPrim__FPP8POLY_FT4", SN_NOWARN)
set_name(0x801398F4, "GetCharHeight__5CFontUc", SN_NOWARN)
set_name(0x80139934, "GetCharWidth__5CFontUc", SN_NOWARN)
set_name(0x80139988, "___7CScreen_addr_80139988", SN_NOWARN)
set_name(0x801399A8, "GetFr__7TextDati", SN_NOWARN)
set_name(0x8013DF64, "endian_swap__FPUci", SN_NOWARN)
set_name(0x8013DF98, "to_sjis__Fc", SN_NOWARN)
set_name(0x8013E018, "to_ascii__FUs", SN_NOWARN)
set_name(0x8013E098, "ascii_to_sjis__FPcPUs", SN_NOWARN)
set_name(0x8013E11C, "sjis_to_ascii__FPUsPc", SN_NOWARN)
set_name(0x8013E194, "read_card_directory__Fi", SN_NOWARN)
set_name(0x8013E3A0, "test_card_format__Fi", SN_NOWARN)
set_name(0x8013E490, "checksum_data__FPci", SN_NOWARN)
set_name(0x8013E4CC, "delete_card_file__Fii", SN_NOWARN)
set_name(0x8013E5C4, "read_card_file__FiiiPc", SN_NOWARN)
set_name(0x8013E788, "format_card__Fi", SN_NOWARN)
set_name(0x8013E84C, "write_card_file__FiiPcT2PUcPUsiT4", SN_NOWARN)
set_name(0x8013EBA4, "new_card__Fi", SN_NOWARN)
set_name(0x8013EC38, "service_card__Fi", SN_NOWARN)
set_name(0x80158E78, "GetFileNumber__FiPc", SN_NOWARN)
set_name(0x80158F38, "DoSaveOptions__Fv", SN_NOWARN)
set_name(0x80158F8C, "DoSaveCharacter__FPc", SN_NOWARN)
set_name(0x8015905C, "DoSaveGame__Fv", SN_NOWARN)
set_name(0x8015911C, "DoLoadGame__Fv", SN_NOWARN)
set_name(0x801591C8, "DoFrontEndLoadCharacter__FPc", SN_NOWARN)
set_name(0x80159224, "McInitLoadCard1Menu__Fv", SN_NOWARN)
set_name(0x80159270, "McInitLoadCard2Menu__Fv", SN_NOWARN)
set_name(0x801592BC, "ChooseCardLoad__Fv", SN_NOWARN)
set_name(0x80159370, "McInitLoadCharMenu__Fv", SN_NOWARN)
set_name(0x80159398, "McInitLoadGameMenu__Fv", SN_NOWARN)
set_name(0x801593F4, "McMainKeyCtrl__Fv", SN_NOWARN)
set_name(0x80159530, "ShowAlertBox__Fv", SN_NOWARN)
set_name(0x80159704, "ShowCardActionText__FPc", SN_NOWARN)
set_name(0x80159848, "GetLoadStatusMessage__FPc", SN_NOWARN)
set_name(0x801598EC, "GetSaveStatusMessage__FiPc", SN_NOWARN)
set_name(0x801599C4, "ShowGameFiles__FPciiG4RECT", SN_NOWARN)
set_name(0x80159B2C, "SetRGB__6DialogUcUcUc", SN_NOWARN)
set_name(0x80159B4C, "SetBack__6Dialogi", SN_NOWARN)
set_name(0x80159B54, "SetBorder__6Dialogi", SN_NOWARN)
set_name(0x80159B5C, "SetOTpos__6Dialogi", SN_NOWARN)
set_name(0x80159B68, "___6Dialog_addr_80159B68", SN_NOWARN)
set_name(0x80159B90, "__6Dialog_addr_80159B90", SN_NOWARN)
set_name(0x80159BEC, "ILoad__Fv", SN_NOWARN)
set_name(0x80159C40, "LoadQuest__Fi", SN_NOWARN)
set_name(0x80159D08, "ISave__Fi", SN_NOWARN)
set_name(0x80159D68, "SaveQuest__Fi", SN_NOWARN)
set_name(0x80159E34, "PSX_GM_SaveGame__FiPcT1", SN_NOWARN)
set_name(0x8015A0D4, "PSX_GM_LoadGame__FUcii", SN_NOWARN)
set_name(0x8015A3C0, "PSX_CH_LoadGame__Fii", SN_NOWARN)
set_name(0x8015A524, "PSX_CH_SaveGame__FiPcT1", SN_NOWARN)
set_name(0x8015A6A4, "RestorePads__Fv", SN_NOWARN)
set_name(0x8015A764, "StorePads__Fv", SN_NOWARN)
set_name(0x8015A820, "GetIcon__Fv", SN_NOWARN)
set_name(0x8015A85C, "PSX_OPT_LoadGame__Fiib", SN_NOWARN)
set_name(0x8015A8C0, "PSX_OPT_SaveGame__FiPc", SN_NOWARN)
set_name(0x8015A958, "LoadOptions__Fv", SN_NOWARN)
set_name(0x8015A9C8, "SaveOptions__Fv", SN_NOWARN)
set_name(0x801380F4, "CreditsTitle", SN_NOWARN)
set_name(0x8013829C, "CreditsSubTitle", SN_NOWARN)
set_name(0x80138738, "CreditsText", SN_NOWARN)
set_name(0x80138850, "CreditsTable", SN_NOWARN)
set_name(0x80139A64, "card_dir", SN_NOWARN)
set_name(0x80139F64, "card_header", SN_NOWARN)
set_name(0x801399C4, "sjis_table", SN_NOWARN)
set_name(0x8013EE78, "save_buffer", SN_NOWARN)
set_name(0x8013EDF4, "McLoadGameMenu", SN_NOWARN)
set_name(0x8013EDD4, "CharFileList", SN_NOWARN)
set_name(0x8013EDE8, "Classes", SN_NOWARN)
set_name(0x8013EE10, "McLoadCharMenu", SN_NOWARN)
set_name(0x8013EE2C, "McLoadCard1Menu", SN_NOWARN)
set_name(0x8013EE48, "McLoadCard2Menu", SN_NOWARN)
|
py | 1a3c7415270443febde66e66f560b29d1aa869f1 | """
Script used to build the tiles databases for the Sentinel2,
Landsat5, and Landsat8 spacecrafts.
"""
import os
import geopandas as gpd
from pathlib import Path
def build_sentinel2_db():
"""Extract the Sentinel2 tiles information and store it in pickle format."""
data_dir = Path(__file__).parent
wrs_file = os.path.join(
data_dir, "./sentinel2/sentinel2_tiles_world.zip!sentinel2_tiles_world.shp"
)
gpd_ = gpd.read_file(wrs_file)
gpd_.columns = ["TILE", "geometry"]
gpd_.to_file(
os.path.join(data_dir, "sentinel2/sentinel2_tiles.shp"), driver="ESRI Shapefile"
)
gpd_ = None
def build_lansat_db():
"""Extract the Landsat tiles (path/row) information and store it in pickle format."""
data_dir = Path(__file__).parent
wrs_file = os.path.join(
data_dir, "landsat/WRS2_descending_0.zip!WRS2_descending.shp"
)
gpd_ = gpd.read_file(wrs_file)
gpd_["PATH#ROW"] = (
gpd_["PATH"].apply(lambda x: f"{x:003d}")
+ "#"
+ gpd_["ROW"].apply(lambda x: f"{x:003d}")
)
gpd_[["PATH#ROW", "geometry"]].to_file(
os.path.join(data_dir, "landsat/landsat_tiles.shp"), driver="ESRI Shapefile"
)
gpd_ = None
if __name__ == "__main__":
build_sentinel2_db()
build_lansat_db()
|
py | 1a3c741e13750fcf880f2d939d57dca652938fee | """Representation of an IHM mmCIF file as a set of Python classes.
Generally class names correspond to mmCIF table names and class
attributes to mmCIF attributes (with prefixes like `pdbx_` stripped).
For example, the data item _entity.details is found in the
:class:`Entity` class, as the `details` member.
Ordinals and IDs are generally not used in this representation (instead,
pointers to objects are used).
"""
import itertools
import re
import sys
# Handle different naming of urllib in Python 2/3
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import json
__version__ = '0.21'
class __UnknownValue(object):
# Represent the mmCIF 'unknown' special value
def __str__(self):
return '?'
__repr__ = __str__
def __bool__(self):
return False
# Python2 compatibility
__nonzero__ = __bool__
# Needs to be hashable so that classes like Software (that might
# use unknown values as attributes) are hashable
def __hash__(self):
return 0
# Unknown value is a singleton and should only compare equal to itself
def __eq__(self, other):
return self is other
def __lt__(self, other):
return False
__gt__ = __lt__
__le__ = __ge__ = __eq__
#: A value that isn't known. Note that this is distinct from a value that
#: is deliberately omitted, which is represented by Python None.
unknown = __UnknownValue()
def _remove_identical(gen):
"""Return only unique objects from `gen`.
Objects that are identical are only returned once, although multiple
non-identical objects that compare equal may be returned."""
seen_objs = {}
for obj in gen:
if id(obj) in seen_objs:
continue
seen_objs[id(obj)] = None
yield obj
class System(object):
"""Top-level class representing a complete modeled system.
:param str title: Title (longer text description) of the system.
:param str id: Unique identifier for this system in the mmCIF file.
"""
def __init__(self, title=None, id='model'):
self.id = id
self.title = title
#: List of plain text comments. These will be added to the top of
#: the mmCIF file.
self.comments = []
#: List of all software used in the modeling. See :class:`Software`.
self.software = []
#: List of all authors of this system, as a list of strings (last name
#: followed by initials, e.g. "Smith AJ"). When writing out a file,
#: if this is list is empty, the set of all citation authors (see
#: :attr:`Citation.authors`) is used instead.
self.authors = []
#: List of all grants that supported this work. See :class:`Grant`.
self.grants = []
#: List of all citations. See :class:`Citation`.
self.citations = []
#: All entities used in the system. See :class:`Entity`.
self.entities = []
#: All asymmetric units used in the system. See :class:`AsymUnit`.
self.asym_units = []
#: All orphaned chemical descriptors in the system.
#: See :class:`ChemDescriptor`. This can be used to track descriptors
#: that are not otherwise used - normally one is assigned to a
#: :class:`ihm.restraint.CrossLinkRestraint`.
self.orphan_chem_descriptors = []
#: All orphaned assemblies in the system. See :class:`Assembly`.
#: This can be used to keep track of all assemblies that are not
#: otherwise used - normally one is assigned to a
#: :class:`~ihm.model.Model`,
#: :class:`ihm.protocol.Step`, or
#: :class:`~ihm.restraint.Restraint`.
self.orphan_assemblies = []
#: The assembly of the entire system. By convention this is always
#: the first assembly in the mmCIF file (assembly_id=1). Note that
#: currently this isn't filled in on output until dumper.write()
#: is called. See :class:`Assembly`.
self.complete_assembly = Assembly((), name='Complete assembly',
description='All known components')
#: Locations of all extra resources.
#: See :class:`~ihm.location.Location`.
self.locations = []
#: All orphaned datasets.
#: This can be used to keep track of all datasets that are not
#: otherwise used - normally a dataset is assigned to a
#: :class:`~ihm.dataset.DatasetGroup`,
#: :class:`~ihm.startmodel.StartingModel`,
#: :class:`~ihm.restraint.Restraint`,
#: :class:`~ihm.startmodel.Template`,
#: or as the parent of another :class:`~ihm.dataset.Dataset`.
#: See :class:`~ihm.dataset.Dataset`.
self.orphan_datasets = []
#: All orphaned groups of datasets.
#: This can be used to keep track of all dataset groups that are not
#: otherwise used - normally a group is assigned to a
#: :class:`~ihm.protocol.Protocol`.
#: See :class:`~ihm.dataset.DatasetGroup`.
self.orphan_dataset_groups = []
#: All orphaned representations of the system.
#: This can be used to keep track of all representations that are not
#: otherwise used - normally one is assigned to a
#: :class:`~ihm.model.Model`.
#: See :class:`~ihm.representation.Representation`.
self.orphan_representations = []
#: All orphaned starting models for the system.
#: This can be used to keep track of all starting models that are not
#: otherwise used - normally one is assigned to an
#: :class:`ihm.representation.Segment`.
#: See :class:`~ihm.startmodel.StartingModel`.
self.orphan_starting_models = []
#: All restraints on the system.
#: See :class:`~ihm.restraint.Restraint`.
self.restraints = []
#: All restraint groups.
#: See :class:`~ihm.restraint.RestraintGroup`.
self.restraint_groups = []
#: All orphaned modeling protocols.
#: This can be used to keep track of all protocols that are not
#: otherwise used - normally a protocol is assigned to a
#: :class:`~ihm.model.Model`.
#: See :class:`~ihm.protocol.Protocol`.
self.orphan_protocols = []
#: All ensembles.
#: See :class:`~ihm.model.Ensemble`.
self.ensembles = []
#: All ordered processes.
#: See :class:`~ihm.model.OrderedProcess`.
self.ordered_processes = []
#: All state groups (collections of models).
#: See :class:`~ihm.model.StateGroup`.
self.state_groups = []
#: All orphaned geometric objects.
#: This can be used to keep track of all objects that are not
#: otherwise used - normally an object is assigned to a
#: :class:`~ihm.restraint.GeometricRestraint`.
#: See :class:`~ihm.geometry.GeometricObject`.
self.orphan_geometric_objects = []
#: All orphaned features.
#: This can be used to keep track of all features that are not
#: otherwise used - normally a feature is assigned to a
#: :class:`~ihm.restraint.GeometricRestraint`.
#: See :class:`~ihm.restraint.Feature`.
self.orphan_features = []
#: All orphaned pseudo sites.
#: This can be used to keep track of all pseudo sites that are not
#: otherwise used - normally a site is used in a
#: :class:`~ihm.restraint.PseudoSiteFeature` or a
#: :class:`~ihm.restraint.CrossLinkPseudoSite`.
self.orphan_pseudo_sites = []
#: Contains the fluorescence (FLR) part.
#: See :class:`~ihm.flr.FLRData`.
self.flr_data = []
def update_locations_in_repositories(self, repos):
"""Update all :class:`Location` objects in the system that lie within
a checked-out :class:`Repository` to point to that repository.
This is intended for the use case where the current working
directory is a checkout of a repository which is archived somewhere
with a DOI. Locations can then be simply constructed pointing to
local files, and retroactively updated with this method to point
to the DOI if appropriate.
For each Location, if it points to a local file that is below the
`root` of one of the `repos`, update it to point to that repository.
If is under multiple roots, pick the one that gives the shortest
path. For example, if run in a subdirectory `foo` of a repository
archived as `repo.zip`, the local path `simple.pdb` will
be updated to be `repo-top/foo/simple.pdb` in `repo.zip`::
l = ihm.location.InputFileLocation("simple.pdb")
system.locations.append(l)
r = ihm.location.Repository(doi='1.2.3.4',
url='https://example.com/repo.zip',)
top_directory="repo-top", root="..")
system.update_locations_in_repositories([r])
"""
import ihm.location
for loc in self._all_locations():
if isinstance(loc, ihm.location.FileLocation):
ihm.location.Repository._update_in_repos(loc, repos)
def _all_restraints(self):
"""Iterate over all Restraints in the system.
Duplicates may be present."""
def _all_restraints_in_groups():
for rg in self.restraint_groups:
for r in rg:
yield r
return itertools.chain(self.restraints, _all_restraints_in_groups())
def _all_chem_descriptors(self):
"""Iterate over all ChemDescriptors in the system.
Duplicates may be present."""
return itertools.chain(
self.orphan_chem_descriptors,
(restraint.linker for restraint in self._all_restraints()
if hasattr(restraint, 'linker') and restraint.linker),
(itertools.chain.from_iterable(
f._all_flr_chemical_descriptors() for f in self.flr_data)))
def _all_model_groups(self, only_in_states=True):
"""Iterate over all ModelGroups in the system.
If only_in_states is True, only return ModelGroups referenced
by a State object; otherwise, also include ModelGroups referenced
by an OrderedProcess or Ensemble."""
# todo: raise an error if a modelgroup is present in multiple states
for state_group in self.state_groups:
for state in state_group:
for model_group in state:
yield model_group
if not only_in_states:
for ensemble in self.ensembles:
if ensemble.model_group:
yield ensemble.model_group
for ss in ensemble.subsamples:
if ss.model_group:
yield ss.model_group
for proc in self.ordered_processes:
for step in proc.steps:
for edge in step:
yield edge.group_begin
yield edge.group_end
def _all_models(self):
"""Iterate over all Models in the system"""
# todo: raise an error if a model is present in multiple groups
for group in self._all_model_groups():
seen_models = {}
for model in group:
if model in seen_models:
continue
seen_models[model] = None
yield group, model
def _all_representations(self):
"""Iterate over all Representations in the system.
This includes all Representations referenced from other objects,
plus any orphaned Representations. Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.orphan_representations,
(model.representation for group, model in self._all_models()
if model.representation)))
def _all_segments(self):
for representation in self._all_representations():
for segment in representation:
yield segment
def _all_starting_models(self):
"""Iterate over all StartingModels in the system.
This includes all StartingModels referenced from other objects, plus
any orphaned StartingModels. Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.orphan_starting_models,
(segment.starting_model for segment in self._all_segments()
if segment.starting_model)))
def _all_protocols(self):
"""Iterate over all Protocols in the system.
This includes all Protocols referenced from other objects, plus
any orphaned Protocols. Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.orphan_protocols,
(model.protocol for group, model in self._all_models()
if model.protocol)))
def _all_protocol_steps(self):
for protocol in self._all_protocols():
for step in protocol.steps:
yield step
def _all_analysis_steps(self):
for protocol in self._all_protocols():
for analysis in protocol.analyses:
for step in analysis.steps:
yield step
def _all_assemblies(self):
"""Iterate over all Assemblies in the system.
This includes all Assemblies referenced from other objects, plus
any orphaned Assemblies. Duplicates may be present."""
return itertools.chain(
# Complete assembly is always first
(self.complete_assembly,),
self.orphan_assemblies,
(model.assembly for group, model in self._all_models()
if model.assembly),
(step.assembly for step in self._all_protocol_steps()
if step.assembly),
(step.assembly for step in self._all_analysis_steps()
if step.assembly),
(restraint.assembly
for restraint in self._all_restraints() if restraint.assembly))
def _all_dataset_groups(self):
"""Iterate over all DatasetGroups in the system.
This includes all DatasetGroups referenced from other objects, plus
any orphaned groups. Duplicates may be present."""
return itertools.chain(
self.orphan_dataset_groups,
(step.dataset_group for step in self._all_protocol_steps()
if step.dataset_group),
(step.dataset_group for step in self._all_analysis_steps()
if step.dataset_group))
def _all_templates(self):
"""Iterate over all Templates in the system."""
for startmodel in self._all_starting_models():
for template in startmodel.templates:
yield template
def _all_datasets_except_parents(self):
"""Iterate over all Datasets except those referenced only
as the parent of another Dataset. Duplicates may be present."""
def _all_datasets_in_groups():
for dg in self._all_dataset_groups():
for d in dg:
yield d
return itertools.chain(
self.orphan_datasets,
_all_datasets_in_groups(),
(sm.dataset for sm in self._all_starting_models()
if sm.dataset),
(restraint.dataset for restraint in self._all_restraints()
if restraint.dataset),
(template.dataset for template in self._all_templates()
if template.dataset))
def _all_datasets(self):
"""Iterate over all Datasets in the system.
This includes all Datasets referenced from other objects, plus
any orphaned datasets. Duplicates may be present."""
def _all_datasets_and_parents(d):
for p in d.parents:
# Handle transformed datasets
if hasattr(p, 'dataset'):
pd = p.dataset
else:
pd = p
for alld in _all_datasets_and_parents(pd):
yield alld
yield d
for d in self._all_datasets_except_parents():
for alld in _all_datasets_and_parents(d):
yield alld
def _all_densities(self):
for ensemble in self.ensembles:
for density in ensemble.densities:
yield density
def _all_locations(self):
"""Iterate over all Locations in the system.
This includes all Locations referenced from other objects, plus
any referenced from the top-level system.
Duplicates may be present."""
def _all_ensemble_locations():
for ensemble in self.ensembles:
if ensemble.file:
yield ensemble.file
for ss in ensemble.subsamples:
if ss.file:
yield ss.file
return itertools.chain(
self.locations,
(dataset.location for dataset in self._all_datasets()
if hasattr(dataset, 'location') and dataset.location),
_all_ensemble_locations(),
(density.file for density in self._all_densities()
if density.file),
(sm.script_file for sm in self._all_starting_models()
if sm.script_file),
(template.alignment_file for template in self._all_templates()
if template.alignment_file),
(step.script_file for step in self._all_protocol_steps()
if step.script_file),
(step.script_file for step in self._all_analysis_steps()
if step.script_file))
def _all_geometric_objects(self):
"""Iterate over all GeometricObjects in the system.
This includes all GeometricObjects referenced from other objects,
plus any referenced from the top-level system.
Duplicates may be present."""
return itertools.chain(
self.orphan_geometric_objects,
(restraint.geometric_object
for restraint in self._all_restraints()
if hasattr(restraint, 'geometric_object')
and restraint.geometric_object))
def _all_features(self):
"""Iterate over all Features in the system.
This includes all Features referenced from other objects,
plus any referenced from the top-level system.
Duplicates may be present."""
def _all_restraint_features():
for r in self._all_restraints():
if hasattr(r, '_all_features'):
for feature in r._all_features:
if feature:
yield feature
return itertools.chain(self.orphan_features, _all_restraint_features())
def _all_pseudo_sites(self):
"""Iterate over all PseudoSites in the system.
This includes all PseudoSites referenced from other objects,
plus any referenced from the top-level system.
Duplicates may be present."""
def _all_restraint_sites():
for r in self._all_restraints():
if hasattr(r, 'cross_links'):
for xl in r.cross_links:
if xl.pseudo1:
for x in xl.pseudo1:
yield x.site
if xl.pseudo2:
for x in xl.pseudo2:
yield x.site
return itertools.chain(self.orphan_pseudo_sites,
_all_restraint_sites(),
(f.site for f in self._all_features()
if hasattr(f, 'site') and f.site))
def _all_software(self):
"""Iterate over all Software in the system.
This includes all Software referenced from other objects, plus
any referenced from the top-level system.
Duplicates may be present."""
return (itertools.chain(
self.software,
(sm.software for sm in self._all_starting_models()
if sm.software),
(step.software for step in self._all_protocol_steps()
if step.software),
(step.software for step in self._all_analysis_steps()
if step.software),
(r.software for r in self._all_restraints()
if hasattr(r, 'software') and r.software)))
def _all_citations(self):
"""Iterate over all Citations in the system.
This includes all Citations referenced from other objects, plus
any referenced from the top-level system.
Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.citations,
(software.citation for software in self._all_software()
if software.citation),
(restraint.fitting_method_citation_id
for restraint in self._all_restraints()
if hasattr(restraint, 'fitting_method_citation_id')
and restraint.fitting_method_citation_id)))
def _all_entity_ranges(self):
"""Iterate over all Entity ranges in the system (these may be
:class:`Entity`, :class:`AsymUnit`, :class:`EntityRange` or
:class:`AsymUnitRange` objects).
Note that we don't include self.entities or self.asym_units here,
as we only want ranges that were actually used.
Duplicates may be present."""
return (itertools.chain(
(sm.asym_unit for sm in self._all_starting_models()),
(seg.asym_unit for seg in self._all_segments()),
(comp for a in self._all_assemblies() for comp in a),
(comp for f in self._all_features()
for comp in f._all_entities_or_asyms()),
(d.asym_unit for d in self._all_densities())))
def _make_complete_assembly(self):
"""Fill in the complete assembly with all asym units"""
# Clear out any existing components
self.complete_assembly[:] = []
# Include all asym units
for asym in self.asym_units:
self.complete_assembly.append(asym)
class Software(object):
"""Software used as part of the modeling protocol.
:param str name: The name of the software.
:param str classification: The major function of the sofware, for
example 'model building', 'sample preparation',
'data collection'.
:param str description: A longer text description of the software.
:param str location: Place where the software can be found (e.g. URL).
:param str type: Type of software (program/package/library/other).
:param str version: The version used.
:param citation: Publication describing the software.
:type citation: :class:`Citation`
Generally these objects are added to :attr:`System.software` or
passed to :class:`ihm.startmodel.StartingModel`,
:class:`ihm.protocol.Step`,
:class:`ihm.analysis.Step`, or
:class:`ihm.restraint.PredictedContactResstraint` objects.
"""
def __init__(self, name, classification, description, location,
type='program', version=None, citation=None):
self.name = name
self.classification = classification
self.description = description
self.location = location
self.type = type
self.version = version
self.citation = citation
# Software compares equal if the names and versions are the same
def _eq_vals(self):
return (self.name, self.version)
def __eq__(self, other):
return self._eq_vals() == other._eq_vals()
def __hash__(self):
return hash(self._eq_vals())
class Grant(object):
"""Information on funding support for the modeling.
See :attr:`System.grants`.
:param str funding_organization: The name of the organization providing
the funding, e.g. "National Institutes of Health".
:param str country: The country that hosts the funding organization,
e.g. "United States".
:param str grant_number: Identifying information for the grant, e.g.
"1R01GM072999-01".
"""
def __init__(self, funding_organization, country, grant_number):
self.funding_organization = funding_organization
self.country = country
self.grant_number = grant_number
class Citation(object):
"""A publication that describes the modeling.
Generally citations are added to :attr:`System.citations` or
passed to :class:`ihm.Software` or
:class:`ihm.restraint.EM3DRestraint` objects.
:param str pmid: The PubMed ID.
:param str title: Full title of the publication.
:param str journal: Abbreviated journal name.
:param int volume: Journal volume number.
:param page_range: The page (int) or page range (as a 2-element
int tuple).
:param int year: Year of publication.
:param authors: All authors in order, as a list of strings (last name
followed by initials, e.g. "Smith AJ").
:param str doi: Digital Object Identifier of the publication.
"""
def __init__(self, pmid, title, journal, volume, page_range, year, authors,
doi):
self.title, self.journal, self.volume = title, journal, volume
self.page_range, self.year = page_range, year
self.pmid, self.doi = pmid, doi
self.authors = authors if authors is not None else []
@classmethod
def from_pubmed_id(cls, pubmed_id):
"""Create a Citation from just a PubMed ID.
This is done by querying NCBI's web API, so requires network access.
:param int pubmed_id: The PubMed identifier.
:return: A new Citation for the given identifier.
:rtype: :class:`Citation`
"""
def get_doi(ref):
for art_id in ref['articleids']:
if art_id['idtype'] == 'doi':
return enc(art_id['value'])
def get_page_range(ref):
rng = enc(ref['pages']).split('-')
if len(rng) == 2 and len(rng[1]) < len(rng[0]):
# map ranges like "2730-43" to 2730,2743 not 2730, 43
rng[1] = rng[0][:len(rng[0]) - len(rng[1])] + rng[1]
# Handle one page or empty page range
if len(rng) == 1:
rng = rng[0]
if rng == '':
rng = None
return rng
# JSON values are always Unicode, but on Python 2 we want non-Unicode
# strings, so convert to ASCII
if sys.version_info[0] < 3:
def enc(s):
return s.encode('ascii')
else:
def enc(s):
return s
url = ('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi'
'?db=pubmed&retmode=json&rettype=abstract&id=%s' % pubmed_id)
fh = urllib2.urlopen(url)
j = json.load(fh)
fh.close()
ref = j['result'][str(pubmed_id)]
authors = [enc(x['name']) for x in ref['authors']
if x['authtype'] == 'Author']
return cls(pmid=pubmed_id, title=enc(ref['title']),
journal=enc(ref['source']),
volume=enc(ref['volume']) or None,
page_range=get_page_range(ref),
year=enc(ref['pubdate']).split()[0],
authors=authors, doi=get_doi(ref))
class ChemComp(object):
"""A chemical component from which :class:`Entity` objects are constructed.
Usually these are amino acids (see :class:`LPeptideChemComp`) or
nucleic acids (see :class:`DNAChemComp` and :class:`RNAChemComp`).
For standard amino and nucleic acids, it is generally easier to use
a :class:`Alphabet` and refer to the components with their one-letter
(amino acids, RNA) or two-letter (DNA) codes.
:param str id: A globally unique identifier for this component (usually
three letters).
:param str code: A shorter identifier (usually one letter) that only
needs to be unique in the entity.
:param str code_canonical: Canonical version of `code` (which need not
be unique).
:param str name: A longer human-readable name for the component.
:param str formula: The chemical formula. This is a space-separated
list of the element symbols in the component, each followed
by an optional count (if omitted, 1 is assumed). The formula
is terminated with the formal charge (if not zero). The element
list should be sorted alphabetically, unless carbon is present,
in which case C and H precede the rest of the elements. For
example, water would be "H2 O" and arginine (with +1 formal
charge) "C6 H15 N4 O2 1".
For example, glycine would have
``id='GLY', code='G', code_canonical='G'`` while selenomethionine would
use ``id='MSE', code='MSE', code_canonical='M'``, guanosine (RNA)
``id='G', code='G', code_canonical='G'``, and deoxyguanosine (DNA)
``id='DG', code='DG', code_canonical='G'``.
"""
type = 'other'
_element_mass = {'H': 1.008, 'C': 12.011, 'N': 14.007, 'O': 15.999,
'P': 30.974, 'S': 32.060, 'Se': 78.971, 'Fe': 55.845}
def __init__(self, id, code, code_canonical, name=None, formula=None):
self.id = id
self.code, self.code_canonical, self.name = code, code_canonical, name
self.formula = formula
def __str__(self):
return ('<%s.%s(%s)>'
% (self.__class__.__module__, self.__class__.__name__,
self.id))
def __get_weight(self):
# Calculate weight from formula
if self.formula is None:
return
spl = self.formula.split()
# Remove formal charge if present
if len(spl) > 0 and spl[-1].isdigit():
del spl[-1]
r = re.compile(r'(\D+)(\d*)$')
weight = 0.
for s in spl:
m = r.match(s)
if m is None:
raise ValueError("Bad formula fragment: %s" % s)
emass = self._element_mass.get(m.group(1), None)
if emass:
weight += emass * (int(m.group(2)) if m.group(2) else 1)
else:
# If element is unknown, weight is unknown too
return None
return weight
formula_weight = property(
__get_weight,
doc="Formula weight (dalton). This is calculated automatically from "
"the chemical formula and known atomic masses.")
# Equal if all identifiers are the same
def __eq__(self, other):
return ((self.code, self.code_canonical, self.id, self.type) ==
(other.code, other.code_canonical, other.id, other.type))
def __hash__(self):
return hash((self.code, self.code_canonical, self.id, self.type))
class PeptideChemComp(ChemComp):
"""A single peptide component. Usually :class:`LPeptideChemComp` is used
instead (except for glycine) to specify chirality.
See :class:`ChemComp` for a description of the parameters."""
type = 'peptide linking'
class LPeptideChemComp(PeptideChemComp):
"""A single peptide component with (normal) L- chirality.
See :class:`ChemComp` for a description of the parameters."""
type = 'L-peptide linking'
class DPeptideChemComp(PeptideChemComp):
"""A single peptide component with (unusual) D- chirality.
See :class:`ChemComp` for a description of the parameters."""
type = 'D-peptide linking'
class DNAChemComp(ChemComp):
"""A single DNA component.
See :class:`ChemComp` for a description of the parameters."""
type = 'DNA linking'
class RNAChemComp(ChemComp):
"""A single RNA component.
See :class:`ChemComp` for a description of the parameters."""
type = 'RNA linking'
class NonPolymerChemComp(ChemComp):
"""A non-polymer chemical component, such as a ligand
(for crystal waters, use :class:`WaterChemComp`).
:param str id: A globally unique identifier for this component.
:param str name: A longer human-readable name for the component.
:param str formula: The chemical formula. See :class:`ChemComp` for
more details.
"""
type = "non-polymer"
def __init__(self, id, name=None, formula=None):
super(NonPolymerChemComp, self).__init__(id, id, id, name=name,
formula=formula)
class WaterChemComp(NonPolymerChemComp):
"""The chemical component for crystal water.
"""
def __init__(self):
super(WaterChemComp, self).__init__('HOH', name='WATER',
formula="H2 O")
class Alphabet(object):
"""A mapping from codes (usually one-letter, or two-letter for DNA) to
chemical components.
These classes can be used to construct sequences of components
when creating an :class:`Entity`. They can also be used like a Python
dict to get standard components, e.g.::
a = ihm.LPeptideAlphabet()
met = a['M']
gly = a['G']
See :class:`LPeptideAlphabet`, :class:`RNAAlphabet`,
:class:`DNAAlphabet`.
"""
def __getitem__(self, key):
return self._comps[key]
def __contains__(self, key):
return key in self._comps
keys = property(lambda self: self._comps.keys())
values = property(lambda self: self._comps.values())
items = property(lambda self: self._comps.items())
class LPeptideAlphabet(Alphabet):
"""A mapping from one-letter amino acid codes (e.g. H, M) to
L-amino acids (as :class:`LPeptideChemComp` objects, except for achiral
glycine which maps to :class:`PeptideChemComp`). Some other common
modified residues are also included (e.g. MSE). For these their full
name rather than a one-letter code is used.
"""
_comps = dict([code, LPeptideChemComp(id, code, code, name,
formula)]
for code, id, name, formula in [
('A', 'ALA', 'ALANINE', 'C3 H7 N O2'),
('C', 'CYS', 'CYSTEINE', 'C3 H7 N O2 S'),
('D', 'ASP', 'ASPARTIC ACID', 'C4 H7 N O4'),
('E', 'GLU', 'GLUTAMIC ACID', 'C5 H9 N O4'),
('F', 'PHE', 'PHENYLALANINE', 'C9 H11 N O2'),
('H', 'HIS', 'HISTIDINE', 'C6 H10 N3 O2 1'),
('I', 'ILE', 'ISOLEUCINE', 'C6 H13 N O2'),
('K', 'LYS', 'LYSINE', 'C6 H15 N2 O2 1'),
('L', 'LEU', 'LEUCINE', 'C6 H13 N O2'),
('M', 'MET', 'METHIONINE', 'C5 H11 N O2 S'),
('N', 'ASN', 'ASPARAGINE', 'C4 H8 N2 O3'),
('P', 'PRO', 'PROLINE', 'C5 H9 N O2'),
('Q', 'GLN', 'GLUTAMINE', 'C5 H10 N2 O3'),
('R', 'ARG', 'ARGININE', 'C6 H15 N4 O2 1'),
('S', 'SER', 'SERINE', 'C3 H7 N O3'),
('T', 'THR', 'THREONINE', 'C4 H9 N O3'),
('V', 'VAL', 'VALINE', 'C5 H11 N O2'),
('W', 'TRP', 'TRYPTOPHAN', 'C11 H12 N2 O2'),
('Y', 'TYR', 'TYROSINE', 'C9 H11 N O3')])
_comps['G'] = PeptideChemComp('GLY', 'G', 'G', name='GLYCINE',
formula="C2 H5 N O2")
# common non-standard L-amino acids
_comps.update([id, LPeptideChemComp(id, id, canon, name, formula)]
for id, canon, name, formula in [
('MSE', 'M', 'SELENOMETHIONINE', 'C5 H11 N O2 Se'),
('UNK', 'X', 'UNKNOWN', 'C4 H9 N O2')])
class DPeptideAlphabet(Alphabet):
"""A mapping from D-amino acid codes (e.g. DHI, MED) to
D-amino acids (as :class:`DPeptideChemComp` objects, except for achiral
glycine which maps to :class:`PeptideChemComp`). See
:class:`LPeptideAlphabet` for more details.
"""
_comps = dict([code, DPeptideChemComp(code, code, canon, name, formula)]
for canon, code, name, formula in [
('A', 'DAL', 'D-ALANINE', 'C3 H7 N O2'),
('C', 'DCY', 'D-CYSTEINE', 'C3 H7 N O2 S'),
('D', 'DAS', 'D-ASPARTIC ACID', 'C4 H7 N O4'),
('E', 'DGL', 'D-GLUTAMIC ACID', 'C5 H9 N O4'),
('F', 'DPN', 'D-PHENYLALANINE', 'C9 H11 N O2'),
('H', 'DHI', 'D-HISTIDINE', 'C6 H10 N3 O2 1'),
('I', 'DIL', 'D-ISOLEUCINE', 'C6 H13 N O2'),
('K', 'DLY', 'D-LYSINE', 'C6 H14 N2 O2'),
('L', 'DLE', 'D-LEUCINE', 'C6 H13 N O2'),
('M', 'MED', 'D-METHIONINE', 'C5 H11 N O2 S'),
('N', 'DSG', 'D-ASPARAGINE', 'C4 H8 N2 O3'),
('P', 'DPR', 'D-PROLINE', 'C5 H9 N O2'),
('Q', 'DGN', 'D-GLUTAMINE', 'C5 H10 N2 O3'),
('R', 'DAR', 'D-ARGININE', 'C6 H15 N4 O2 1'),
('S', 'DSN', 'D-SERINE', 'C3 H7 N O3'),
('T', 'DTH', 'D-THREONINE', 'C4 H9 N O3'),
('V', 'DVA', 'D-VALINE', 'C5 H11 N O2'),
('W', 'DTR', 'D-TRYPTOPHAN', 'C11 H12 N2 O2'),
('Y', 'DTY', 'D-TYROSINE', 'C9 H11 N O3')])
_comps['G'] = PeptideChemComp('GLY', 'G', 'G', name='GLYCINE',
formula="C2 H5 N O2")
class RNAAlphabet(Alphabet):
"""A mapping from one-letter nucleic acid codes (e.g. A) to
RNA (as :class:`RNAChemComp` objects)."""
_comps = dict([id, RNAChemComp(id, id, id, name, formula)]
for id, name, formula in [
('A', "ADENOSINE-5'-MONOPHOSPHATE", 'C10 H14 N5 O7 P'),
('C', "CYTIDINE-5'-MONOPHOSPHATE", 'C9 H14 N3 O8 P'),
('G', "GUANOSINE-5'-MONOPHOSPHATE", 'C10 H14 N5 O8 P'),
('U', "URIDINE-5'-MONOPHOSPHATE", 'C9 H13 N2 O9 P')])
class DNAAlphabet(Alphabet):
"""A mapping from two-letter nucleic acid codes (e.g. DA) to
DNA (as :class:`DNAChemComp` objects)."""
_comps = dict([code, DNAChemComp(code, code, canon, name, formula)]
for code, canon, name, formula in [
('DA', 'A', "2'-DEOXYADENOSINE-5'-MONOPHOSPHATE",
'C10 H14 N5 O6 P'),
('DC', 'C', "2'-DEOXYCYTIDINE-5'-MONOPHOSPHATE",
'C9 H14 N3 O7 P'),
('DG', 'G', "2'-DEOXYGUANOSINE-5'-MONOPHOSPHATE",
'C10 H14 N5 O7 P'),
('DT', 'T', "THYMIDINE-5'-MONOPHOSPHATE",
'C10 H15 N2 O8 P')])
class EntityRange(object):
"""Part of an entity. Usually these objects are created from
an :class:`Entity`, e.g. to get a range covering residues 4 through
7 in `entity` use::
entity = ihm.Entity(sequence=...)
rng = entity(4,7)
"""
def __init__(self, entity, seq_id_begin, seq_id_end):
if not entity.is_polymeric():
raise TypeError("Can only create ranges for polymeric entities")
self.entity = entity
# todo: check range for validity (at property read time)
self.seq_id_range = (seq_id_begin, seq_id_end)
def __eq__(self, other):
try:
return (self.entity is other.entity
and self.seq_id_range == other.seq_id_range)
except AttributeError:
return False
def __hash__(self):
return hash((id(self.entity), self.seq_id_range))
# Use same ID as the original entity
_id = property(lambda self: self.entity._id)
class Atom(object):
"""A single atom in an entity or asymmetric unit. Usually these objects
are created by calling :meth:`Residue.atom`.
Note that this class does not store atomic coordinates of a given
atom in a given model; for that, see :class:`ihm.model.Atom`.
"""
__slots__ = ['residue', 'id']
def __init__(self, residue, id):
self.residue, self.id = residue, id
entity = property(lambda self: self.residue.entity)
asym = property(lambda self: self.residue.asym)
seq_id = property(lambda self: self.residue.seq_id)
class Residue(object):
"""A single residue in an entity or asymmetric unit. Usually these objects
are created by calling :meth:`Entity.residue` or
:meth:`AsymUnit.residue`.
"""
__slots__ = ['entity', 'asym', 'seq_id', '_range_id']
def __init__(self, seq_id, entity=None, asym=None):
self.entity = entity
self.asym = asym
# todo: check id for validity (at property read time)
self.seq_id = seq_id
def atom(self, atom_id):
"""Get a :class:`Atom` in this residue with the given name."""
return Atom(residue=self, id=atom_id)
def _get_auth_seq_id(self):
return self.asym._get_auth_seq_id(self.seq_id)
auth_seq_id = property(_get_auth_seq_id,
doc="Author-provided seq_id; only makes sense "
"for asymmetric units")
# Allow passing residues where a range is requested
# (e.g. to ResidueFeature)
seq_id_range = property(lambda self: (self.seq_id, self.seq_id))
class Entity(object):
"""Represent a CIF entity (with a unique sequence)
:param sequence sequence: The primary sequence, as a sequence of
:class:`ChemComp` objects, and/or codes looked up in `alphabet`.
:param alphabet: The mapping from code to chemical components to use
(it is not necessary to instantiate this class).
:type alphabet: :class:`Alphabet`
:param str description: A short text name for the sequence.
:param str details: Longer text describing the sequence.
:param source: The method by which the sample for this entity was
produced.
:type source: :class:`ihm.source.Source`
:param references: Information about this entity stored in external
databases (for example the sequence in UniProt)
:type references: sequence of :class:`ihm.reference.Reference` objects
The sequence for an entity can be specified explicitly as a list of
chemical components, or (more usually) as a list or string of codes,
or a mixture of both.
For example::
# Construct with a string of one-letter amino acid codes
protein = ihm.Entity('AHMD')
# Some less common amino acids (e.g. MSE) have three-letter codes
protein_with_mse = ihm.Entity(['A', 'H', 'MSE', 'D'])
# Can use a non-default alphabet to make DNA or RNA sequences
dna = ihm.Entity(('DA', 'DC'), alphabet=ihm.DNAAlphabet)
rna = ihm.Entity('AC', alphabet=ihm.RNAAlphabet)
# Can pass explicit ChemComp objects by looking them up in Alphabets
dna_al = ihm.DNAAlphabet()
rna_al = ihm.RNAAlphabet()
dna_rna_hybrid = ihm.Entity((dna_al['DG'], rna_al['C']))
# For unusual components (e.g. modified residues or ligands),
# new ChemComp objects can be constructed
psu = ihm.RNAChemComp(id='PSU', code='PSU', code_canonical='U',
name="PSEUDOURIDINE-5'-MONOPHOSPHATE",
formula='C9 H13 N2 O9 P')
rna_with_psu = ihm.Entity(('A', 'C', psu), alphabet=ihm.RNAAlphabet)
For more examples, see the
`ligands and water example <https://github.com/ihmwg/python-ihm/blob/main/examples/ligands_water.py>`_.
All entities should be stored in the top-level System object;
see :attr:`System.entities`.
""" # noqa: E501
number_of_molecules = 1
def __get_type(self):
if self.is_polymeric():
return 'polymer'
else:
return 'water' if self.sequence[0].code == 'HOH' else 'non-polymer'
type = property(__get_type)
def __get_src_method(self):
if self.source:
return self.source.src_method
elif self.type == 'water':
return 'nat'
else:
return 'man'
def __set_src_method(self, val):
raise TypeError("src_method is read-only; assign an appropriate "
"subclass of ihm.source.Source to source instead")
src_method = property(__get_src_method, __set_src_method)
def __get_weight(self):
weight = 0.
for s in self.sequence:
w = s.formula_weight
# If any component's weight is unknown, the total is too
if w:
weight += w
else:
return None
return weight
formula_weight = property(
__get_weight,
doc="Formula weight (dalton). This is calculated automatically "
"from that of the chemical components.")
def __init__(self, sequence, alphabet=LPeptideAlphabet,
description=None, details=None, source=None, references=[]):
def get_chem_comp(s):
if isinstance(s, ChemComp):
return s
else:
return alphabet._comps[s]
self.sequence = tuple(get_chem_comp(s) for s in sequence)
self.description, self.details = description, details
self.source = source
self.references = []
self.references.extend(references)
def __str__(self):
return "<ihm.Entity(%s)>" % self.description
def is_polymeric(self):
"""Return True iff this entity represents a polymer, such as an
amino acid sequence or DNA/RNA chain (and not a ligand or water)"""
return len(self.sequence) != 1 or not isinstance(self.sequence[0],
NonPolymerChemComp)
def residue(self, seq_id):
"""Get a :class:`Residue` at the given sequence position"""
return Residue(entity=self, seq_id=seq_id)
# Entities are considered identical if they have the same sequence
def __eq__(self, other):
return self.sequence == other.sequence
def __hash__(self):
return hash(self.sequence)
def __call__(self, seq_id_begin, seq_id_end):
return EntityRange(self, seq_id_begin, seq_id_end)
def __get_seq_id_range(self):
if self.is_polymeric():
return (1, len(self.sequence))
else:
# Nonpolymers don't have the concept of seq_id
return (None, None)
seq_id_range = property(__get_seq_id_range, doc="Sequence range")
class AsymUnitRange(object):
"""Part of an asymmetric unit. Usually these objects are created from
an :class:`AsymUnit`, e.g. to get a range covering residues 4 through
7 in `asym` use::
asym = ihm.AsymUnit(entity)
rng = asym(4,7)
"""
def __init__(self, asym, seq_id_begin, seq_id_end):
if asym.entity is not None and not asym.entity.is_polymeric():
raise TypeError("Can only create ranges for polymeric entities")
self.asym = asym
# todo: check range for validity (at property read time)
self.seq_id_range = (seq_id_begin, seq_id_end)
def __eq__(self, other):
try:
return (self.asym is other.asym
and self.seq_id_range == other.seq_id_range)
except AttributeError:
return False
def __hash__(self):
return hash((id(self.asym), self.seq_id_range))
# Use same ID and entity as the original asym unit
_id = property(lambda self: self.asym._id)
_ordinal = property(lambda self: self.asym._ordinal)
entity = property(lambda self: self.asym.entity)
class AsymUnit(object):
"""An asymmetric unit, i.e. a unique instance of an Entity that
was modeled.
:param entity: The unique sequence of this asymmetric unit.
:type entity: :class:`Entity`
:param str details: Longer text description of this unit.
:param auth_seq_id_map: Mapping from internal 1-based consecutive
residue numbering (`seq_id`) to "author-provided" numbering
(`auth_seq_id`). This can be either be an int offset, in
which case ``auth_seq_id = seq_id + auth_seq_id_map``, or
a mapping type (dict, list, tuple) in which case
``auth_seq_id = auth_seq_id_map[seq_id]``. (Note that if a `list`
or `tuple` is used, the first element in the list or tuple does
**not** correspond to the first residue and will never be used -
since `seq_id` can never be zero.) The default if
not specified, or not in the mapping, is for
``auth_seq_id == seq_id``.
:param str id: User-specified ID (usually a string of one or more
upper-case letters, e.g. A, B, C, AA). If not specified,
IDs are automatically assigned alphabetically.
See :attr:`System.asym_units`.
"""
def __init__(self, entity, details=None, auth_seq_id_map=0, id=None):
self.entity, self.details = entity, details
self.auth_seq_id_map = auth_seq_id_map
self.id = id
def _get_auth_seq_id(self, seq_id):
if isinstance(self.auth_seq_id_map, int):
return seq_id + self.auth_seq_id_map
else:
try:
return self.auth_seq_id_map[seq_id]
except (KeyError, IndexError):
return seq_id
def __call__(self, seq_id_begin, seq_id_end):
return AsymUnitRange(self, seq_id_begin, seq_id_end)
def residue(self, seq_id):
"""Get a :class:`Residue` at the given sequence position"""
return Residue(asym=self, seq_id=seq_id)
seq_id_range = property(lambda self: self.entity.seq_id_range,
doc="Sequence range")
class Assembly(list):
"""A collection of parts of the system that were modeled or probed
together.
:param sequence elements: Initial set of parts of the system.
:param str name: Short text name of this assembly.
:param str description: Longer text that describes this assembly.
This is implemented as a simple list of asymmetric units (or parts of
them), i.e. a list of :class:`AsymUnit` and/or :class:`AsymUnitRange`
objects. An Assembly is typically assigned to one or more of
- :class:`~ihm.model.Model`
- :class:`ihm.protocol.Step`
- :class:`ihm.analysis.Step`
- :class:`~ihm.restraint.Restraint`
See also :attr:`System.complete_assembly`
and :attr:`System.orphan_assemblies`.
Note that any duplicate assemblies will be pruned on output."""
#: :class:`Assembly` that is the immediate parent in a hierarchy, or `None`
parent = None
def __init__(self, elements=(), name=None, description=None):
super(Assembly, self).__init__(elements)
self.name, self.description = name, description
class ChemDescriptor(object):
"""Description of a non-polymeric chemical component used in the experiment.
For example, this might be a fluorescent probe or cross-linking agent.
This class describes the chemical structure of the component, for
example with a SMILES or INCHI descriptor, so that it is uniquely
defined. A descriptor is typically assigned to a
:class:`ihm.restraint.CrossLinkRestraint`.
See :mod:`ihm.cross_linkers` for chemical descriptors of some
commonly-used cross-linking agents.
:param str auth_name: Author-provided name
:param str chem_comp_id: If this chemical is listed in the Chemical
Component Dictionary, its three-letter identifier
:param str chemical_name: The systematic (IUPAC) chemical name
:param str common_name: Common name for the component
:param str smiles: SMILES string
:param str smiles_canonical: Canonical SMILES string
:param str inchi: IUPAC INCHI descriptor
:param str inchi_key: Hashed INCHI key
See also :attr:`System.orphan_chem_descriptors`.
"""
def __init__(self, auth_name, chem_comp_id=None, chemical_name=None,
common_name=None, smiles=None, smiles_canonical=None,
inchi=None, inchi_key=None):
self.auth_name, self.chem_comp_id = auth_name, chem_comp_id
self.chemical_name, self.common_name = chemical_name, common_name
self.smiles, self.smiles_canonical = smiles, smiles_canonical
self.inchi, self.inchi_key = inchi, inchi_key
|
py | 1a3c7479e48a30ff57aaafd5e9e4947a38c2ce19 | import tkinter
import csv
f = open('class.csv')
csv_f = csv.reader(f)
myList = []
myList1 = []
myList2 = []
myList3 = []
for row in csv_f:
#print (row[2])
#myList.append(row[2])
myList.append(row)
myList1.append(row[0])
myList2.append(row[1])
myList3.append(row[2])
#print (myList)
class createGUIClass:
def __init__(self,master):
master.minsize(width=800, height=500)
master.maxsize(width=1000, height=700)
root = tkinter.Tk()
createGUI = createGUIClass(root)
for r in range(5):
tkinter.Label(root, text='%s'%(myList1[1]),
borderwidth=10 ).grid(row=r,column=1)
for c in range(len(myList)):
tkinter.Label(root, text='%s'%(myList1[r]),
borderwidth=10 ).grid(row=r,column=c)
root.mainloop( )
|
py | 1a3c74cf4c76a020d83816c5967534700d3fa67e | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 17:56:23 2019
@author: Khizar Anjum
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, MaxPooling1D, Dropout, Flatten, Add, Conv1D
from keras.models import Model
#%%
drp = [0.3,0.5,0.7,0.9];
files = [pd.read_csv('spline_log'+str(i)+'.csv',index_col=0) for i in drp]
#%%
[plt.plot(df.index.values,df['loss'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Training loss plot')
plt.show()
#%%
[plt.plot(df.index.values,df['val_loss'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Validation loss plot')
plt.show()
#%%
[plt.plot(df.index.values,df['sensitivity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Sensitivity plot')
plt.show()
#%%
[plt.plot(df.index.values,df['val_sensitivity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('validaton sensitivity plot')
plt.show()
#%%
prev_train = 0.2201
[plt.plot(df.index.values,np.array(df['sensitivity'].values)*prev_train + np.array(df['specificity'].values)*(1-prev_train)) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Training accuracy plot')
plt.show()
#%%
prev_val = 0.2136
[plt.plot(df.index.values,np.array(df['val_sensitivity'].values)*prev_val + np.array(df['val_specificity'].values)*(1-prev_val)) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Validation accuracy plot')
plt.show()
#%%
[plt.plot(df.index.values,df['specificity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Specificity plot')
plt.show()
#%%
[plt.plot(df.index.values,df['val_specificity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('validaton specificity plot')
plt.show()
#%%
def spline_model(J = 2, Q = 128, T = 200):
inputs = Input(shape=(22050,1))
#
#
x = Conv1D(filters=int(J*Q),kernel_size=int(T),padding='valid',strides=10, activation='square_activation')(inputs)#,kernel_initializer=real_sp_initializer
#y = Conv1D(filters=int(J*Q),kernel_size=int(T),padding='valid',strides=10, activation='square_activation')(inputs)#,kernel_initializer=imag_sp_initializer
#xy = Add()([x,y])
#print(xy)
#c1 = Conv1D(24,128,activation='relu',strides=1,padding='valid')(xy)
#p1 = MaxPooling1D(pool_size=2,strides=1, padding='valid')(c1)
d1 = Dropout(drp)(x)
c2 = Conv1D(128,4,activation='relu',strides=10,padding='valid')(d1)
#p2 = MaxPooling1D(pool_size=100,strides=10, padding='valid')(c2)
d2 = Dropout(drp)(c2)
c3 = Conv1D(128,4,activation='relu',strides=10,padding='valid')(d2)
#print(c3)
p3 = MaxPooling1D(pool_size=10,strides=5, padding='valid')(c3)
#print(p3)
#d3 = Dropout(0.1)(p3)
#print(d3)
#c4 = Conv1D(4,16,activation='relu',strides=1,padding='valid')(d2)
f1 = Flatten()(p3)
#print(f1)
dn1 = Dense(128,activation='sigmoid')(f1)
d4 = Dropout(drp)(dn1)
dn2 = Dense(32,activation='sigmoid')(d4)
d5 = Dropout(drp)(dn2)
predictions = Dense(2,activation='softmax')(d5)
#training and evaluating the model
model = Model(inputs=inputs, outputs=predictions)
return model
#%% |
py | 1a3c75922190ad17eb0709745cd0dbda992f7593 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core import PipelineClient
from azure.core.rest import HttpRequest, HttpResponse
from msrest import Deserializer, Serializer
from ._configuration import AutoRestReportServiceConfiguration
from ._operations import AutoRestReportServiceOperationsMixin
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Dict
class AutoRestReportService(AutoRestReportServiceOperationsMixin):
"""Test Infrastructure for AutoRest.
:keyword endpoint: Service URL. Default value is 'http://localhost:3000'.
:paramtype endpoint: str
"""
def __init__(self, *, endpoint: str = "http://localhost:3000", **kwargs: Any) -> None:
self._config = AutoRestReportServiceConfiguration(**kwargs)
self._client = PipelineClient(base_url=endpoint, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
def send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AutoRestReportService
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
py | 1a3c75b1693a7e70d8c9aff6f7aaa4b5491ad8d4 | # Artificial Intelligence
# Grado en Ingeniería Informática
# 2017-18
# play_tennis.py (Unit 3, slide 8)
attributes=[('Outlook',['Sunny','Overcast','Rainy']),
('Temperature',['High','Low','Mild']),
('Humidity',['High','Normal']),
('Wind',['Weak','Strong'])]
class_name='Play Tennis'
classes=['yes','no']
train=[['Sunny' , 'High' , 'High' , 'Weak' , 'no'],
['Sunny' , 'High' , 'High' , 'Strong', 'no'],
['Overcast','High' , 'High' , 'Weak' , 'yes'],
['Rainy' , 'Mild' , 'High' , 'Weak' , 'yes'],
['Rainy' , 'Low' , 'Normal' , 'Weak' , 'yes'],
['Rainy' , 'Low' , 'Normal' , 'Strong', 'no'],
['Overcast','Low' , 'Normal' , 'Strong', 'yes'],
['Sunny' , 'Mild' , 'High' , 'Weak' , 'no'],
['Sunny' , 'Low' , 'Normal' , 'Weak' , 'yes'],
['Rainy' , 'Mild' , 'Normal' , 'Weak' , 'yes'],
['Sunny' , 'Mild' , 'Normal' , 'Strong', 'yes'],
['Overcast','Mild' , 'High' , 'Strong', 'yes'],
['Overcast','High' , 'Normal' , 'Weak' , 'yes'],
['Rainy', 'Mild' , 'High' , 'Strong', 'no']]
|
py | 1a3c75e6186fb74517de841aaf6580468c3c9405 | """ Named entity recognition fine-tuning: utilities to work with CLUENER task. """
import torch
import logging
import os
import copy
import json
import random
import math
from .utils_ner import DataProcessor
logger = logging.getLogger(__name__)
def iob_iobes(tags):
"""
IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == "O":
new_tags.append(tag)
elif tag.split("-")[0] == "B":
if i + 1 != len(tags) and tags[i + 1].split("-")[0] == "I":
new_tags.append(tag)
else:
new_tags.append(tag.replace("B-", "S-"))
elif tag.split("-")[0] == "I":
if i + 1 < len(tags) and tags[i + 1].split("-")[0] == "I":
new_tags.append(tag)
else:
new_tags.append(tag.replace("I-", "E-"))
else:
raise Exception("Invalid IOB format!")
return new_tags
def markup_for_gpt2_english(tokens, label_ids, label_all_tokens):
j = 0
new_label = [0] * len(tokens)
for i in range(len(tokens)):
if 'Ġ' in tokens[i]:
new_label[i] = label_ids[j]
j = j+1
else:
if label_all_tokens:
new_label[i] = new_label[i-1]
else:
new_label[i] = -100# note:the convention is -100 not O!
return tokens, new_label, label_ids, j
def remove_entity(tokens, entity_place, replace_token):
removed_entity_token = [i for i in tokens]
for i in entity_place:
removed_entity_token[i] = replace_token[0]
return removed_entity_token
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, text_a, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.labels = labels
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, input_len,segment_ids, label_ids, removed_input_ids=None,tokens=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.input_len = input_len
self.tokens = tokens
self.removed_input_ids = removed_input_ids
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def collate_fn(batch):
"""
batch should be a list of (sequence, target, length) tuples...
Returns a padded tensor of sequences sorted from longest to shortest,
"""
all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels, all_remove_input_ids = map(torch.stack, zip(*batch))
max_len = max(all_lens).item()
all_input_ids = all_input_ids[:, :max_len]
all_attention_mask = all_attention_mask[:, :max_len]
all_token_type_ids = all_token_type_ids[:, :max_len]
all_labels = all_labels[:, :max_len]
if all_remove_input_ids[0] != None:
all_remove_input_ids = all_remove_input_ids[:, :max_len]
return all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_lens, all_remove_input_ids
# from transformers import AutoTokenizer
# tokenizer = AutoTokenizer.from_pretrained("andi611/bert-base-cased-ner")
def convert_examples_to_features(dataset, use_random, duplicate_train_data, english, markup, label_all_tokens, tokenizer_name, task_name, examples, label_list, max_seq_length, tokenizer,
cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1,
sep_token="[SEP]", pad_on_left=False, pad_token=0, pad_token_segment_id=0,
sequence_a_segment_id=0, mask_padding_with_zero=True,):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
count = 0
the_no_entity_number = 0
label_map = {label: i for i, label in enumerate(label_list)}
features = []
sum_length_of_example = 0
# if task_name == 'train':
# if use_random:
# tokenizer_name = 'random add **'
# elif duplicate_train_data:
# tokenizer_name = 'duplicate train data and add **'
# else:
# tokenizer_name = 'gpt2'
# if task_name == 'train' or task_name == 'eval':
# tokenizer_name = 'filling entity'
# else:
# tokenizer_name = 'gpt2'
if english:
# todo can also be [blank] (50257) use resize embedding
replace_token = tokenizer.tokenize((' *'))
if 'gpt2' in tokenizer_name:
print("gpt2_english tokenizer ")
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
if type(example.text_a) == list:
new_text = ' '.join(example.text_a)
tokens = tokenizer.tokenize(' ' + new_text)# 在每句话开头加上空格,保证第一个单词可以被tokenized as G开头
sum_length_of_example += len(example.text_a)
else:
raise(NotImplementedError)
if len(tokens) == 0:# for the empty tokens list: pass!
count += 1# count such abnormal tokens
continue
if markup == 'bieso':
example.labels = iob_iobes(example.labels)
label_ids = [label_map[x] for x in example.labels]
flag = 1
for i in label_ids:
if i != label_map['O']:
flag = 0# 表示该example含有entity
break
the_no_entity_number += flag
# align the label_ids with tokens
tokens, new_label, label_ids, j = markup_for_gpt2_english(tokens, label_ids, label_all_tokens)
# truncate
special_tokens_count = 0
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
segment_ids = [sequence_a_segment_id] * len(tokens)
# # todo 1 仿照bert在input的前面后面加上特殊的fix-token(不随continuous prompt变化)目前看结果没什么变化 那就去掉吧
# new_label += [label_map['O']]
# segment_ids += [0]
# if cls_token_at_end:
# new_label += [label_map['O']]
# segment_ids += [0]
# else:
# new_label = [label_map['O']] + new_label
# segment_ids = [0] + segment_ids
# gpt2 tokenizer 不添加cls和sep 且special_tokens_count=0
pad_token = 0
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# input_ids += [102]
# input_ids = [101]+input_ids
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(new_label)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
new_label = ([-100] * padding_length) + new_label
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
new_label += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s", example.guid)
# logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
# logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
# logger.info("label_ids: %s", " ".join([str(x) for x in new_label]))
if j == len(label_ids):# 保证label ids中所有的id都已转换到new_label中
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=new_label))# tokens = tokens
else:
count += 1
print("**************** the total no entity example number: "+str(the_no_entity_number)+' ******************')
print("**************** average length of examples(not truncated): "+str(sum_length_of_example/ex_index) + ' ******************')
return features, count
elif 'random add **' in tokenizer_name and task_name == 'train':
all_random_samples = random.sample(range(0, len(examples)), len(examples)//2)# 产生一半
print("only for train dataset, gpt2_english tokenizer random add ** around half of the entities, randomly chosen each epoch! ")
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
if type(example.text_a) != list:
raise(NotImplementedError)
if len(example.labels) == 0:
count += 1
continue
random_labels = [example.labels[m] for m in range(len(example.labels))]
random_text = [example.text_a[m] for m in range(len(example.text_a))]
shift_place = 0
if ex_index in all_random_samples:
for k in range(1, len(example.labels)-1):
if example.labels[k] != 'O':
if example.labels[k-1] == 'O':
random_text.insert(k+shift_place, '*')
random_labels.insert(k+shift_place, 'O')
shift_place += 1
if example.labels[k+1] == 'O':
random_text.insert(k+1+shift_place, '*')
random_labels.insert(k+1+shift_place, 'O')
shift_place += 1
if example.labels[0] != 'O':
random_text.insert(0, '*')
random_labels.insert(0, 'O')
if example.labels[-1] != 'O':
random_text.append('*')
random_labels.append('O')
else:
if len(example.labels) >= 2 and example.labels[-2] != 'O':
random_text.insert(-1, '*')
random_labels.insert(-1, 'O')
new_text = ' '.join(random_text)
tokens = tokenizer.tokenize(' ' + new_text)# 在每句话开头加上空格,保证第一个单词可以被tokenized as G开头
sum_length_of_example += len(random_text)
if len(tokens) == 0:# for the empty tokens list: pass!
count += 1# count such abnormal tokens
continue
if markup == 'bieso':
random_labels = iob_iobes(random_labels)
label_ids = [label_map[x] for x in random_labels]
flag = 1
for i in label_ids:
if i != label_map['O']:
flag = 0# 表示该example含有entity
break
the_no_entity_number += flag
# align the label_ids with tokens
tokens, new_label, label_ids, j = markup_for_gpt2_english(tokens, label_ids, label_all_tokens)
# truncate
special_tokens_count = 0
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
segment_ids = [sequence_a_segment_id] * len(tokens)
pad_token = 0
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# input_ids += [102]
# input_ids = [101]+input_ids
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(new_label)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
new_label = ([-100] * padding_length) + new_label
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
new_label += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s", example.guid)
# logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
# logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
# logger.info("label_ids: %s", " ".join([str(x) for x in new_label]))
if j == len(label_ids):# 保证label ids中所有的id都已转换到new_label中
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=new_label))# tokens = tokens
else:
count += 1
print("**************** the total no entity example number: "+str(the_no_entity_number)+' ******************')
print("**************** average length of examples(not truncated): "+str(sum_length_of_example/ex_index) + ' ******************')
return features, count
elif 'duplicate train data and add **' in tokenizer_name and task_name == 'train':
print("only for train dataset, gpt2_english tokenizer random add ** around half of the entities, randomly chosen each epoch! ")
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
if type(example.text_a) == list:
new_text = ' '.join(example.text_a)
tokens = tokenizer.tokenize(' ' + new_text)# 在每句话开头加上空格,保证第一个单词可以被tokenized as G开头
sum_length_of_example += len(example.text_a)
else:
raise(NotImplementedError)
if len(tokens) == 0:# for the empty tokens list: pass!
count += 1# count such abnormal tokens
continue
if markup == 'bieso':
example.labels = iob_iobes(example.labels)
label_ids = [label_map[x] for x in example.labels]
flag = 1
for i in label_ids:
if i != 0:
flag = 0# 表示该example含有entity
break
the_no_entity_number += flag
# align the label_ids with tokens
tokens, new_label, label_ids, j1 = markup_for_gpt2_english(tokens, label_ids, label_all_tokens)
# truncate
special_tokens_count = 0
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
segment_ids = [sequence_a_segment_id] * len(tokens)
pad_token = 0
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(new_label)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
new_label = ([-100] * padding_length) + new_label
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
new_label += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if j1 == len(label_ids):# 保证label ids中所有的id都已转换到new_label中
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=new_label))# tokens = tokens
else:
count += 1
if len(example.labels) == 0:
count += 1
continue
random_labels = [example.labels[m] for m in range(len(example.labels))]
random_text = [example.text_a[m] for m in range(len(example.text_a))]
shift_place = 0
for k in range(1, len(example.labels)-1):
if example.labels[k] != 'O':
if example.labels[k-1] == 'O':
random_text.insert(k+shift_place, '*')
random_labels.insert(k+shift_place, 'O')
shift_place += 1
if example.labels[k+1] == 'O':
random_text.insert(k+1+shift_place, '*')
random_labels.insert(k+1+shift_place, 'O')
shift_place += 1
if example.labels[0] != 'O':
random_text.insert(0, '*')
random_labels.insert(0, 'O')
if example.labels[-1] != 'O':
random_text.append('*')
random_labels.append('O')
else:
if len(example.labels) >= 2 and example.labels[-2] != 'O':
random_text.insert(-1, '*')
random_labels.insert(-1, 'O')
new_text = ' '.join(random_text)
tokens = tokenizer.tokenize(' ' + new_text)# 在每句话开头加上空格,保证第一个单词可以被tokenized as G开头
sum_length_of_example += len(random_text)
if len(tokens) == 0:# for the empty tokens list: pass!
count += 1# count such abnormal tokens
continue
if markup == 'bieso':
random_labels = iob_iobes(random_labels)
label_ids = [label_map[x] for x in random_labels]
flag = 1
for i in label_ids:
if i != label_map['O']:
flag = 0# 表示该example含有entity
break
the_no_entity_number += flag
# align the label_ids with tokens
tokens, new_label, label_ids, j2 = markup_for_gpt2_english(tokens, label_ids, label_all_tokens)
# truncate
special_tokens_count = 0
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
segment_ids = [sequence_a_segment_id] * len(tokens)
pad_token = 0
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(new_label)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
new_label = ([-100] * padding_length) + new_label
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
new_label += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if j2 == len(label_ids):# 保证label ids中所有的id都已转换到new_label中
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=new_label))# tokens = tokens
else:
count += 1
print("**************** the total no entity example number: "+str(the_no_entity_number)+' ******************')
print("**************** average length of examples(not truncated): "+str(sum_length_of_example/ex_index) + ' ******************')
return features, count
elif 'filling entity' in tokenizer_name and task_name in ['train', 'dev']:
print("gpt2_english tokenizer for filling in the entities in sequences ")
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
if type(example.text_a) == list:
new_text = ' '.join(example.text_a)
tokens = tokenizer.tokenize(' ' + new_text)# 在每句话开头加上空格,保证第一个单词可以被tokenized as G开头
sum_length_of_example += len(example.text_a)
else:
raise(NotImplementedError)
if len(tokens) == 0:# for the empty tokens list: pass!
count += 1# count such abnormal tokens
continue
if markup == 'bieso':
example.labels = iob_iobes(example.labels)
label_ids = [label_map[x] for x in example.labels]
flag = 1
for i in label_ids:
if i != label_map['O']:
flag = 0# 表示该example含有entity
break
the_no_entity_number += flag
# align the label_ids with tokens
tokens, new_label, label_ids, j = markup_for_gpt2_english(tokens, label_ids, label_all_tokens)
entity_place = [k for k in range(len(new_label)) if new_label[k] != label_map['O']]
# replace entity with special token, such as *, or special
# todo removed_entity_tokens对应的attention 需要修改吗?好像是不用的 mask掉的应该只有pad token
removed_entity_tokens = remove_entity(tokens, entity_place, replace_token)
# truncate
special_tokens_count = 0
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
removed_entity_tokens = removed_entity_tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
segment_ids = [sequence_a_segment_id] * len(tokens)
pad_token = 0
input_ids = tokenizer.convert_tokens_to_ids(tokens)
removed_input_ids = tokenizer.convert_tokens_to_ids(removed_entity_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(new_label)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
removed_input_ids = ([pad_token] * padding_length) + removed_input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
new_label = ([-100] * padding_length) + new_label
else:
input_ids += [pad_token] * padding_length
removed_input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
new_label += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(removed_input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if j == len(label_ids):# 保证label ids中所有的id都已转换到new_label中
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=new_label, removed_input_ids=removed_input_ids))# tokens = tokens
else:
count += 1
print("**************** the total no entity example number: "+str(the_no_entity_number)+' ******************')
print("**************** average length of examples(not truncated): "+str(sum_length_of_example/ex_index) + ' ******************')
return features, count
elif "bert" in tokenizer_name:
dataset, percent = dataset
all_random_samples = random.sample(range(0, len(examples)), math.ceil(len(examples)*int(percent)/100))
print('bert english tokenizer')
for (ex_index, example) in enumerate(examples):
if task_name == 'train':
if ex_index not in all_random_samples:
continue
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
label_ids = [label_map[x] for x in example.labels]
flag = 1
for i in label_ids:
if i != label_map['O']:
flag = 0
the_no_entity_number += flag
tokens = []
new_label = []
# align the label_ids with tokens (仿照别人发布的bert-ner
# https://github.com/kyzhouhzau/BERT-NER/blob/master/BERT_NER.py 每个word单独tokenize之后拼接)
for i in range(len(example.text_a)):
token = tokenizer.tokenize(example.text_a[i])
tokens.extend(token)
for j, _ in enumerate(token):
if j == 0:
new_label.append(label_ids[i])
else:
new_label.append(label_ids[i])
# new_label.append(-100)
# todo 5e-5 时是label off, 1e-4时是label on
assert len(tokens) == len(new_label)
# new_label = [0] * len(tokens)
# j = 0
# for i in range(len(tokens)):
# if '##' not in tokens[i]:
# new_label[i] = label_ids[j]
# j = j+1
# if j == len(label_ids):
# # something wrong here!
# # ids that cannot be converted should be passed, such examples include:
# # [' 's ', ...]
# break
# else:
# new_label[i] = -100# new_label[i-1]
# Account for [CLS] and [SEP] with "- 2".
special_tokens_count = 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
tokens += [sep_token]
new_label += [label_map['O']]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
new_label += [label_map['O']]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
new_label = [label_map['O']] + new_label
segment_ids = [cls_token_segment_id] + segment_ids
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
new_label = new_label[: (max_seq_length - special_tokens_count)]
segment_ids = segment_ids[: (max_seq_length - special_tokens_count)]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
new_label = ([-100] * padding_length) + new_label
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
new_label += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
input_len = min(len(new_label), max_seq_length)
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=new_label))# tokens = tokens
print("the_no_entity_number: "+str(the_no_entity_number))
return features, count
else:
raise(ValueError("tokenizer not implemented, English dataset only support gpt2 model and gpt2 tokenizer"))
else:# 中文
print("chinese:only use bert-base-chinese tokenizer")
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
if type(example.text_a) == list:
new_text = ''.join(example.text_a)
else:
raise(NotImplementedError)
tokens = tokenizer.tokenize(new_text)
sum_length_of_example += len(tokens)
label_ids = [label_map[x] for x in example.labels]
flag = 1
for i in label_ids:
if i != ['O']:
flag = 0# 表示该example含有entity
break
the_no_entity_number += flag
# Account for [CLS] and [SEP] with "- 2".
special_tokens_count = 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# label_ids += [label_map['O']]
# label_ids = [label_map['O']] + label_ids
# tokens = ['*']+tokens
# tokens = tokens + ['*']
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [label_map['O']]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [label_map['O']]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [label_map['O']] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(label_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([-100] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [-100] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# assert len(label_ids) == max_seq_length
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s", example.guid)
# logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
# logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
# logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
if len(label_ids) == max_seq_length:
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, input_len=input_len,
segment_ids=segment_ids, label_ids=label_ids))# tokens = tokens
else:
count = count + 1
print("**************** the total no entity example number: "+str(the_no_entity_number)+' ******************')
print("**************** average length of examples(not truncated): "+str(sum_length_of_example/ex_index) + ' ******************')
return features, count
class CnerProcessor(DataProcessor):
"""Processor for the chinese ner data set."""
def get_train_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "train.char.bmes")), "train", limit)
def get_dev_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "dev.char.bmes")), "dev", limit)
def get_test_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "test.char.bmes")), "test", limit)
def get_labels(self, markup='biso'):
"""See base class."""
assert markup=='biso'
if markup == 'bio':
return ["O",
'B-CONT','I-CONT',
'B-EDU', 'I-EDU',
'B-LOC', 'I-LOC',
'B-NAME', 'I-NAME',
'B-ORG', 'I-ORG',
'B-PRO', 'I-PRO',
'B-RACE', 'I-RACE',
'B-TITLE', 'I-TITLE']#, 'X', "[START]", "[END]"
elif markup == 'biso':
return ["O",
'S-CONT','B-CONT','I-CONT',
'S-EDU','B-EDU', 'I-EDU',
'S-LOC','B-LOC', 'I-LOC',
'S-NAME','B-NAME', 'I-NAME',
'S-ORG', 'B-ORG', 'I-ORG',
'S-PRO','B-PRO', 'I-PRO',
'S-RACE', 'B-RACE', 'I-RACE',
'S-TITLE', 'B-TITLE', 'I-TITLE',
]
elif markup == 'bieso':
return ["O",
'S-CONT','B-CONT','I-CONT', 'E-CONT',
'S-EDU','B-EDU', 'I-EDU', 'E-EDU',
'S-LOC','B-LOC', 'I-LOC', 'E-LOC',
'S-NAME','B-NAME', 'I-NAME', 'E-NAME',
'S-ORG', 'B-ORG', 'I-ORG', 'E-ORG',
'S-PRO','B-PRO', 'I-PRO', 'E-PRO',
'S-RACE', 'B-RACE', 'I-RACE', 'E-RACE',
'S-TITLE','B-TITLE', 'I-TITLE', 'E-TITLE',
]
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a= line['words']
# BIOS
labels = []
for x in line['labels']:
# change the labels in cner dataset to BIO style
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class CluenerProcessor(DataProcessor):
"""Processor for the chinese ner data set."""
def get_train_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train", limit)
def get_dev_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev", limit)
def get_test_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "test", limit)
def get_labels(self, markup='biso'):
"""See base class."""
assert markup=='biso'
if markup == 'biso':
return ["O",
"S-address", "B-address", "I-address",
"S-book", "B-book", "I-book",
"S-company", "B-company", "I-company",
'S-game', 'B-game', 'I-game',
'S-government', 'B-government', 'I-government',
'S-movie', 'B-movie', 'I-movie',
'S-name', 'B-name', 'I-name',
'S-organization', 'B-organization', 'I-organization',
'S-position', 'B-position', 'I-position',
'S-scene', 'B-scene', 'I-scene',
]
elif markup == 'bio':
return ["O",
"B-address", "I-address",
"B-book", "I-book",
"B-company", "I-company",
'B-game', 'I-game',
'B-government','I-government',
'B-movie','I-movie',
'B-name','I-name',
'B-organization','I-organization',
'B-position', 'I-position',
'B-scene', 'I-scene',
]
elif markup == 'bieso':
return ["O",
"S-address","B-address", "I-address", "E-address",
"S-book", "B-book", "I-book","E-book",
"S-company","B-company", "I-company", "E-company",
'S-game','B-game', 'I-game','E-game',
'S-government','B-government','I-government','E-government',
'S-movie','B-movie','I-movie','E-movie',
'S-name','B-name','I-name','E-name',
'S-organization','B-organization','I-organization','E-organization',
'S-position','B-position', 'I-position', 'E-position',
'S-scene''B-scene', 'I-scene','E-scene',
]
else:
raise (NotImplementedError)
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a= line['words']
# BIOS
labels = line['labels']
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class MovieProcessor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "train")), "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "train")), "dev", limit)
def get_test_examples(self, data_dir,limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "test")), "test", limit)
def get_labels(self, markup='bio'):
"""See base class."""
return ['O',
'B-GENRE', 'I-GENRE',
'B-YEAR', 'I-YEAR',
'B-TITLE', 'I-TITLE',
'B-SONG', 'I-SONG',
'B-ACTOR', 'I-ACTOR',
'B-CHARACTER', 'I-CHARACTER',
'B-RATING', 'I-RATING',
'B-PLOT', 'I-PLOT',
'B-REVIEW', 'I-REVIEW',
'B-DIRECTOR', 'I-DIRECTOR',
'B-RATINGS_AVERAGE', 'I-RATINGS_AVERAGE',
'B-TRAILER', "I-TRAILER"
]
# donot change the order!
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class RestaurantProcessor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "train")), "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "train")), "dev", limit)
def get_test_examples(self, data_dir,limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "test")), "test", limit)
def get_labels(self, markup='bio'):
"""See base class."""
return ['O',
'B-Rating', 'I-Rating',
'B-Location', 'I-Location',
'B-Amenity', 'I-Amenity',
'B-Cuisine', 'I-Cuisine',
'B-Hours', 'I-Hours',
'B-Price', 'I-Price',
'B-Dish', 'I-Dish',
'B-Restaurant_Name', 'I-Restaurant_Name'
]
# donot change the order!
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class Ontonote4Processor(DataProcessor):
"""Processor for the chinese ner data set."""
def get_train_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_text2(os.path.join(data_dir, "train.char.bmes")), "train", limit)
def get_dev_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_text2(os.path.join(data_dir, "dev.char.bmes")), "dev", limit)
def get_test_examples(self, data_dir, limit):
"""See base class."""
return self._create_examples(self._read_text2(os.path.join(data_dir, "test.char.bmes")), "test", limit)
def get_labels(self, markup='biso'):
"""See base class."""
assert markup == 'biso'
if markup == 'bieso':
return ["O",
'S-GPE', 'B-GPE', 'I-GPE', 'E-GPE',
'S-PER', 'B-PER', 'I-PER', 'E-PER',
'S-ORG', 'B-ORG', 'I-ORG', 'E-ORG',
'S-LOC', 'B-LOC', 'I-LOC', 'E-LOC',
] #'X', "[START]", "[END]"
elif markup == 'bio':
return ["O",
'B-GPE', 'I-GPE',
'B-PER', 'I-PER',
'B-ORG', 'I-ORG',
'B-LOC', 'I-LOC',
]# 'X', "[START]", "[END]"
elif markup == 'biso':
return ["O",
'S-GPE', 'B-GPE', 'I-GPE',
'S-PER', 'B-PER', 'I-PER',
'S-ORG', 'B-ORG', 'I-ORG',
'S-LOC', 'B-LOC', 'I-LOC',
] #'X', "[START]", "[END]"
# note: should be in this order!
else:
raise(NotImplementedError)
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
x = x.strip('\n')
# change the labels in cner dataset to BIO style
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
# labels[-1] = 'O'
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class Conll2003Processor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "train.txt")), "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "testa.txt")), "dev", limit)
def get_test_examples(self, data_dir,limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "testb.txt")), "test", limit)
def get_labels(self, markup='bios'):
"""See base class.
type can be choose from [bio bieso bios]"""
if markup == 'bieso':
return ['O',
'S-LOC', 'B-LOC', 'I-LOC', 'E-LOC',
'S-PER', 'B-PER', 'I-PER', 'E-PER',
'S-MISC', 'B-MISC', 'I-MISC', 'E-MISC',
'S-ORG', 'B-ORG', 'I-ORG', 'E-ORG'
] #'X', "[START]", "[END]"
# note: should be in this order!
elif markup == 'biso':
return ['O',
'S-LOC', 'B-LOC', 'I-LOC',
'S-PER', 'B-PER', 'I-PER',
'S-MISC', 'B-MISC', 'I-MISC',
'S-ORG', 'B-ORG', 'I-ORG',
] #'X', "[START]", "[END]"
elif markup=='bio':
return ['O',
'B-LOC', 'I-LOC',
'B-PER', 'I-PER',
'B-MISC', 'I-MISC',
'B-ORG', 'I-ORG',
] #'X', "[START]", "[END]"
else:
raise(NotImplementedError)
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a= line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class OntonoteProcessor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "train.sd.conllx"), 'ontonote'), "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "dev.sd.conllx"), 'ontonote'), "dev", limit)
def get_test_examples(self, data_dir,limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "test.sd.conllx"), 'ontonote'), "test", limit)
def get_labels(self, markup='bio'):
"""See base class.
type can be choose from [bio bieso bios]"""
if markup == 'bieso':
return ["O",
'S-NORP', 'B-NORP', 'I-NORP', 'E-NORP',
'S-GPE', 'B-GPE', 'I-GPE', 'E-GPE',
'S-FAC', 'B-FAC', 'I-FAC', 'E-FAC',
'S-PERSON', 'B-PERSON', 'I-PERSON', 'E-PERSON',
'S-DATE', 'B-DATE', 'I-DATE', 'E-DATE',
'S-ORG', 'B-ORG', 'I-ORG', 'E-ORG',
'S-LOC', 'B-LOC', 'I-LOC', 'E-LOC',
'S-WORK_OF_ART', 'B-WORK_OF_ART', 'I-WORK_OF_ART', 'E-WORK_OF_ART',
'S-EVENT', 'B-EVENT', 'I-EVENT', 'E-EVENT',
'S-CARDINAL', 'B-CARDINAL', 'I-CARDINAL', 'E-CARDINAL',
'S-ORDINAL', 'B-ORDINAL', 'I-ORDINAL', 'E-ORDINAL',
'S-PRODUCT', 'B-PRODUCT', 'I-PRODUCT', 'E-PRODUCT',
'S-QUANTITY', 'B-QUANTITY', 'I-QUANTITY', 'E-QUANTITY',
'S-TIME', 'B-TIME', 'I-TIME', 'E-TIME',
'S-PERCENT', 'B-PERCENT', 'I-PERCENT', 'E-PERCENT',
'S-MONEY', 'B-MONEY', 'I-MONEY', 'E-MONEY',
'S-LAW', 'B-LAW', 'I-LAW', 'E-LAW',
'S-LANGUAGE', 'B-LANGUAGE', 'I-LANGUAGE', 'E-LANGUAGE',
] #'X', "[START]", "[END]"
elif markup == 'bio':
return ["O",
'B-NORP', 'I-NORP',
'B-GPE', 'I-GPE',
'B-FAC', 'I-FAC',
'B-PERSON', 'I-PERSON',
'B-DATE', 'I-DATE',
'B-ORG', 'I-ORG',
'B-LOC', 'I-LOC',
'B-WORK_OF_ART', 'I-WORK_OF_ART',
'B-CARDINAL', 'I-CARDINAL',
'B-ORDINAL', 'I-ORDINAL',
'B-PRODUCT', 'I-PRODUCT',
'B-QUANTITY', 'I-QUANTITY',
'B-TIME', 'I-TIME',
'B-EVENT', 'I-EVENT',
'B-PERCENT', 'I-PERCENT',
'B-MONEY', 'I-MONEY',
'B-LAW', 'I-LAW',
'B-LANGUAGE', 'I-LANGUAGE',
]# 'X', "[START]", "[END]"
elif markup == 'biso':
return ["O",
'S-NORP', 'B-NORP', 'I-NORP',
'S-GPE', 'B-GPE', 'I-GPE',
'S-FAC', 'B-FAC', 'I-FAC',
'S-PERSON', 'B-PERSON', 'I-PERSON',
'S-DATE', 'B-DATE', 'I-DATE',
'S-ORG', 'B-ORG', 'I-ORG',
'S-LOC', 'B-LOC', 'I-LOC',
'S-WORK_OF_ART', 'B-WORK_OF_ART', 'I-WORK_OF_ART',
'S-EVENT', 'B-EVENT', 'I-EVENT',
'S-CARDINAL', 'B-CARDINAL', 'I-CARDINAL',
'S-ORDINAL', 'B-ORDINAL', 'I-ORDINAL',
'S-PRODUCT', 'B-PRODUCT', 'I-PRODUCT',
'S-QUANTITY', 'B-QUANTITY', 'I-QUANTITY',
'S-TIME', 'B-TIME', 'I-TIME',
'S-PERCENT', 'B-PERCENT', 'I-PERCENT',
'S-MONEY', 'B-MONEY', 'I-MONEY',
'S-LAW', 'B-LAW', 'I-LAW',
'S-LANGUAGE', 'B-LANGUAGE', 'I-LANGUAGE',
] #'X', "[START]", "[END]"
# note: should be in this order!
else:
raise(NotImplementedError)
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class MovieTProcessor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "train")), "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "train")), "dev", limit)
def get_test_examples(self, data_dir,limit=None):
"""See base class."""
return self._create_examples(self._read_text3(os.path.join(data_dir, "test")), "test", limit)
def get_labels(self,markup=None):
"""See base class."""
return ['O',
'B-Actor', 'I-Actor',
'B-Character_Name', 'I-Character_Name',
'B-Director', 'I-Director',
'B-Genre', 'I-Genre',
'B-Plot', 'I-Plot',
'B-Year', 'I-Year',
'B-Soundtrack', 'I-Soundtrack',
'B-Opinion', 'I-Opinion',
'B-Award', 'I-Award',
'B-Origin', 'I-Origin',
'B-Quote', 'I-Quote',
'B-Relationship', "I-Relationship",
]
# donot change the order!
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class WnutProcessor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "train.conll"), 'wnut'), "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "dev.conll"), 'wnut'), "dev", limit)
def get_test_examples(self, data_dir,limit=None):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "test.conll"), 'wnut'), "test", limit)
def get_labels(self, markup=None):
"""See base class."""
return ['O',
'B-person', 'I-person',
'B-location', 'I-location',
'B-group', 'I-group',
'B-corporation', 'I-corporation',
'B-product', 'I-product',
'B-creative_work', 'I-creative_work',
]
# donot change the order!
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
class Conll2003MRCProcessor(DataProcessor):
"""Processor for an english ner data set."""
def get_train_examples(self, data_dir, limit=None):
"""See base class."""
all_data = self._read_json3(os.path.join(data_dir, "mrc-ner.train"))
labels = self._read_text(os.path.join('datasets/conll_03_english', "train.txt"))
lines = []
for i in range(len(labels)):
words = all_data[4*i]['context'].split(' ')
lines.append({"words": words, "labels": labels[i]['labels']})
return self._create_examples(lines, "train", limit)
def get_dev_examples(self, data_dir, limit=None):
"""See base class."""
all_data = self._read_json3(os.path.join(data_dir, "mrc-ner.dev"))
labels = self._read_text(os.path.join('datasets/conll_03_english', "testa.txt"))
lines = []
for i in range(len(labels)):
words = all_data[4*i]['context'].split(' ')
lines.append({"words": words, "labels": labels[i]['labels']})
return self._create_examples(lines, "dev", limit)
def get_test_examples(self, data_dir, limit=None):
"""See base class."""
all_data = self._read_json3(os.path.join(data_dir, "mrc-ner.test"))
labels = self._read_text(os.path.join('datasets/conll_03_english', "testb.txt"))
lines = []
for i in range(len(labels)):
words = all_data[4*i]['context'].split(' ')
lines.append({"words": words, "labels": labels[i]['labels']})
return self._create_examples(lines, "test", limit)
def get_labels(self, markup=None):
"""See base class."""
return ['O','B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC', 'B-ORG', 'I-ORG']# "X", "[START]", "[END]"
# donot change the order!
def _create_examples(self, lines, set_type, limit=None):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if limit != None:
if i > limit:
break
guid = "%s-%s" % (set_type, i)
text_a = line['words']
# BIOS
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
examples.append(InputExample(guid=guid, text_a=text_a, labels=labels))
return examples
ner_processors = {
"cner": CnerProcessor,
'cluener': CluenerProcessor,
'ontonote4': Ontonote4Processor,
'conll': Conll2003Processor,
'ontonote': OntonoteProcessor,
'movie': MovieProcessor,
'restaurant': RestaurantProcessor,
'movie-t': MovieTProcessor,
'wnut': WnutProcessor,
'conll-mrc': Conll2003MRCProcessor
} |
py | 1a3c763339bce57c23183b9ac266d3617f7998de | """
Command Line Interface of the checker
"""
import argparse
import sys
import termcolor
def colored(text, color):
"""Returns string with colored text depending on platform"""
colored_text = text
if 'win' not in sys.platform:
# termcolor works only on linux
colored_text = termcolor.colored(text, color)
return colored_text
def print_error(message):
"""Prints message red colored"""
print(colored(message, "red"))
def print_success(message):
"""Prints message green colored"""
print(colored(message, "green"))
def get_parsed_args():
"""Parses arguments from stdin"""
parser = argparse.ArgumentParser(description='Simple test runner')
parser.add_argument(
'-p',
metavar='program',
default='./main' if 'win' not in sys.platform else 'main',
help='path to the tested program')
parser.add_argument('-d',
metavar='directory',
default='tests',
help='path to directory containing tests')
parser.add_argument('-g',
metavar='groups',
nargs='+',
help="groups in given directory that should be tested")
parser.add_argument('-v',
metavar='verifier',
help="path to custom verifier")
parser.add_argument('-b',
metavar='break',
default='true',
choices=['true', 'false'],
help='break on failed tests [true/false]')
parser.add_argument('-t',
metavar='timeout',
default=None,
type=float,
help='time limit after which TLE is raised')
parser.add_argument(
'--timer',
help="run program will have a line 'Time: [float]' on stderr",
action='store_true')
parser.add_argument('--sha',
help='calculate sha-256 instead of veryfying',
action='store_true')
return parser.parse_args()
|
py | 1a3c76f3bd37b010bfb3c8f33106d266ed2e11a9 | class Point:
def __init__(self, x, y, info=None):
self.x = x
self.y = y
self._info = info
def get_color(self):
return self._info[0]
def get_type(self):
return self._info[1]
def get_figure(self):
return self._info
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_info(self, figure):
self._info = figure
def set_x(self, x):
self.x = x
def set_y(self, y):
self.y = y
def set_point(self, x, y):
self.x = x
self.y = y
def increase(self, x, y):
self.x += x
self.y += y
def normalize_point(self):
self.x = int(self.x / 32)
self.y = int(self.y / 24)
def prepare_first_move_black_pawn(self, selected_position):
if selected_position.get_y() < 2 and self.get_y() == 2:
self.set_y(1)
print("move black pawn 2 cells 52")
def prepare_first_move_white_pawn(self, selected_position):
if selected_position.get_y() > 5 and self.get_y() == -2:
print("move white pawn 2 cells 56")
self.set_y(-1)
|
py | 1a3c77da6e5d587501758c6997d2e3321c985bc4 | from __future__ import unicode_literals
# For backwards-compatibility. keep this file.
# (Many people are going to have key bindings that rely on this file.)
from .app import *
__all__ = [
# Old names.
'HasArg',
'HasCompletions',
'HasFocus',
'HasSelection',
'HasValidationError',
'IsDone',
'IsReadOnly',
'IsMultiline',
'RendererHeightIsKnown',
'InEditingMode',
'InPasteMode',
'ViMode',
'ViNavigationMode',
'ViInsertMode',
'ViInsertMultipleMode',
'ViReplaceMode',
'ViSelectionMode',
'ViWaitingForTextObjectMode',
'ViDigraphMode',
'EmacsMode',
'EmacsInsertMode',
'EmacsSelectionMode',
'IsSearching',
'HasSearch',
'ControlIsSearchable',
]
# Keep the original classnames for backwards compatibility.
HasValidationError = lambda: has_validation_error
HasArg = lambda: has_arg
IsDone = lambda: is_done
RendererHeightIsKnown = lambda: renderer_height_is_known
ViNavigationMode = lambda: vi_navigation_mode
InPasteMode = lambda: in_paste_mode
EmacsMode = lambda: emacs_mode
EmacsInsertMode = lambda: emacs_insert_mode
ViMode = lambda: vi_mode
IsSearching = lambda: is_searching
HasSearch = lambda: is_searching
ControlIsSearchable = lambda: control_is_searchable
EmacsSelectionMode = lambda: emacs_selection_mode
ViDigraphMode = lambda: vi_digraph_mode
ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode
ViSelectionMode = lambda: vi_selection_mode
ViReplaceMode = lambda: vi_replace_mode
ViInsertMultipleMode = lambda: vi_insert_multiple_mode
ViInsertMode = lambda: vi_insert_mode
HasSelection = lambda: has_selection
HasCompletions = lambda: has_completions
IsReadOnly = lambda: is_read_only
IsMultiline = lambda: is_multiline
HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.)
InEditingMode = in_editing_mode
|
py | 1a3c78b55826aec5a6625d33e9b9394f51650080 | try:
from . import generic as g
except BaseException:
import generic as g
class GLTFTest(g.unittest.TestCase):
def test_duck(self):
scene = g.get_mesh('Duck.glb', process=False)
# should have one mesh
assert len(scene.geometry) == 1
# get the mesh
geom = next(iter(scene.geometry.values()))
# should not be watertight
assert not geom.is_volume
# make sure export doesn't crash
export = scene.export(file_type='glb')
assert len(export) > 0
# check a roundtrip
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(scene, reloaded)
# if we merge ugly it should now be watertight
geom.merge_vertices(textured=False)
assert geom.is_volume
def test_tex_export(self):
# load textured PLY
mesh = g.get_mesh('fuze.ply')
assert hasattr(mesh.visual, 'uv')
# make sure export as GLB doesn't crash on scenes
export = mesh.scene().export(file_type='glb')
assert len(export) > 0
# make sure it works on meshes
export = mesh.export(file_type='glb')
assert len(export) > 0
def test_cesium(self):
"""
A GLTF with a multi- primitive mesh
"""
s = g.get_mesh('CesiumMilkTruck.glb')
# should be one Trimesh object per GLTF "primitive"
assert len(s.geometry) == 4
# every geometry displayed once, except wheels twice
assert len(s.graph.nodes_geometry) == 5
# make sure export doesn't crash
export = s.export(file_type='glb')
assert len(export) > 0
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(s, reloaded)
def test_units(self):
"""
Trimesh will store units as a GLTF extra if they
are defined so check that.
"""
original = g.get_mesh('pins.glb')
# export it as a a GLB file
export = original.export(file_type='glb')
kwargs = g.trimesh.exchange.gltf.load_glb(
g.trimesh.util.wrap_as_stream(export))
# roundtrip it
reloaded = g.trimesh.exchange.load.load_kwargs(kwargs)
# make basic assertions
g.scene_equal(original, reloaded)
# make assertions on original and reloaded
for scene in [original, reloaded]:
# units should be stored as an extra
assert scene.units == 'mm'
# make sure we have two unique geometries
assert len(scene.geometry) == 2
# that should have seven instances
assert len(scene.graph.nodes_geometry) == 7
# all meshes should be well constructed
assert all(m.is_volume for m in
scene.geometry.values())
# check unit conversions for fun
extents = scene.extents.copy()
as_in = scene.convert_units('in')
# should all be exactly mm -> in conversion factor
assert g.np.allclose(
extents / as_in.extents, 25.4, atol=.001)
m = g.get_mesh('testplate.glb')
assert m.units == 'meters'
def test_gltf(self):
# split a multibody mesh into a scene
scene = g.trimesh.scene.split_scene(
g.get_mesh('cycloidal.ply'))
# should be 117 geometries
assert len(scene.geometry) >= 117
# a dict with {file name: str}
export = scene.export(file_type='gltf')
# load from just resolver
r = g.trimesh.load(file_obj=None,
file_type='gltf',
resolver=export)
# will assert round trip is roughly equal
g.scene_equal(r, scene)
# try loading from a ZIP archive
zipped = g.trimesh.util.compress(export)
r = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(zipped),
file_type='zip')
# try loading from a file name
# will require a file path resolver
with g.TemporaryDirectory() as d:
for file_name, data in export.items():
with open(g.os.path.join(d, file_name), 'wb') as f:
f.write(data)
# load from file path of header GLTF
rd = g.trimesh.load(
g.os.path.join(d, 'model.gltf'))
# will assert round trip is roughly equal
g.scene_equal(rd, scene)
def test_gltf_pole(self):
scene = g.get_mesh('simple_pole.glb')
# should have multiple primitives
assert len(scene.geometry) == 11
export = scene.export(file_type='glb')
assert len(export) > 0
# check a roundtrip
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(scene, reloaded)
def test_material_hash(self):
# load mesh twice independently
a = g.get_mesh('fuze.obj')
b = g.get_mesh('fuze.obj')
# move one of the meshes away from the other
a.apply_translation([a.scale, 0, 0])
# materials should not be the same object
assert id(a.visual.material) != id(b.visual.material)
# despite being loaded separately material hash should match
assert hash(a.visual.material) == hash(b.visual.material)
# create a scene with two meshes
scene = g.trimesh.Scene([a, b])
# get the exported GLTF header of a scene with both meshes
header = g.json.loads(scene.export(
file_type='gltf')['model.gltf'].decode('utf-8'))
# header should contain exactly one material
assert len(header['materials']) == 1
# both meshes should be contained in the export
assert len(header['meshes']) == 2
# get a reloaded version
reloaded = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(
scene.export(file_type='glb')),
file_type='glb')
# meshes should have survived
assert len(reloaded.geometry) == 2
# get meshes back
ar, br = reloaded.geometry.values()
# should have been loaded as a PBR material
assert isinstance(ar.visual.material,
g.trimesh.visual.material.PBRMaterial)
# materials should have the same memory location
assert id(ar.visual.material) == id(br.visual.material)
# make sure hash is returning something
ahash = hash(ar.visual.material)
# should be returning valid material hashes
assert isinstance(ahash, int)
assert ahash != 0
def test_node_name(self):
"""
Test to see if node names generally survive
an export-import cycle.
"""
# a scene
s = g.get_mesh('cycloidal.3DXML')
# export as GLB then re-load
r = g.trimesh.load(
g.trimesh.util.wrap_as_stream(
s.export(file_type='glb')),
file_type='glb')
# make sure we have the same geometries before and after
assert set(s.geometry.keys()) == set(r.geometry.keys())
# make sure the node names are the same before and after
assert (set(s.graph.nodes_geometry) ==
set(r.graph.nodes_geometry))
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
py | 1a3c7985c1ab355b86cdb78975c4964a45059ce5 | import os
from . import connect
sg = connect(os.environ.get('SGCACHE_SHOTGUN_URL', 'http://localhost:8020/'))
cache = connect()
|
py | 1a3c7a4be529c90225f1a911b3cc769331aabafd | from sqlalchemy import func
from fence.errors import NotFound, UserError
from fence.models import (
Project,
StorageAccess,
CloudProvider,
ProjectToBucket,
Bucket,
User,
AccessPrivilege,
Group,
UserToGroup,
)
__all__ = [
"get_project",
"create_project_with_dict",
"create_project",
"create_bucket_on_project",
"get_project_info",
"get_all_projects",
"delete_project",
"delete_bucket_on_project",
"list_buckets_on_project",
"get_cloud_providers_from_project",
"get_buckets_by_project_cloud_provider",
"get_user_project_access_privilege",
]
def get_project(current_session, projectname):
return current_session.query(Project).filter_by(name=projectname).first()
def create_project_with_dict(current_session, project_data):
"""
Create a project given a dict of all needed info
Args:
project_data (dict): dict of project info
Return:
None
"""
auth_id = project_data["auth_id"]
name = project_data.get("name") or auth_id
storage_accesses = project_data.get("storage_accesses", [])
project = create_project(
current_session, name, auth_id, [sa["name"] for sa in storage_accesses]
)
for sa in storage_accesses:
for bucket in sa.get("buckets", []):
create_bucket_on_project(current_session, name, bucket, sa["name"])
return project
def create_project(current_session, name, auth_id, storage_accesses):
"""
Creates a project with an associated auth_id and storage access
"""
new_project = Project(name=name, auth_id=auth_id)
current_session.add(new_project)
current_session.flush()
for storage in storage_accesses:
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.name == storage)
.first()
)
if provider:
new_storage_access = StorageAccess(
provider_id=provider.id, project_id=new_project.id
)
current_session.add(new_storage_access)
else:
raise NotFound()
return new_project
def create_bucket_on_project(current_session, project_name, bucket_name, provider_name):
"""
Create a bucket and assign it to a project
"""
project = (
current_session.query(Project).filter(Project.name == project_name).first()
)
if not project:
msg = "".join(["Project ", project_name, " not found"])
raise NotFound(msg)
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.name == provider_name)
.first()
)
if not provider:
msg = "".join(["Provider ", provider_name, " not found"])
raise NotFound(msg)
bucket = (
current_session.query(Bucket)
.filter(Bucket.name == bucket_name, Bucket.provider_id == provider.id)
.first()
)
if not bucket:
bucket = Bucket(name=bucket_name, provider_id=provider.id)
current_session.add(bucket)
current_session.flush()
proj_to_bucket = ProjectToBucket(
project_id=project.id, bucket_id=bucket.id, privilege=["owner"]
)
current_session.add(proj_to_bucket)
# Find the users that need to be updated
users_in_project = current_session.query(AccessPrivilege).filter(
AccessPrivilege.project_id == project.id
)
users_to_update = []
for row in users_in_project:
usr = current_session.query(User).filter(User.id == row.user_id).first()
users_to_update.append((usr, row.privilege))
return {
"result": "success",
"provider": provider,
"bucket": bucket,
"users_to_update": users_to_update,
}
else:
raise UserError("Error, name already in use for that storage system")
def get_project_info(current_session, project_name):
"""
Get project info from userdatamodel
from its name
"""
proj = get_project(current_session, project_name)
if not proj:
msg = "".join(["Error: project ", project_name, " not found"])
raise NotFound(msg)
info = {
"id": proj.id,
"name": proj.name,
"auth_id": proj.auth_id,
"description": proj.description,
"associated buckets": [],
}
buckets = current_session.query(ProjectToBucket).filter(
ProjectToBucket.project_id == proj.id
)
for row in buckets:
bucket = (
current_session.query(Bucket).filter(Bucket.id == row.bucket_id).first()
)
info["associated buckets"].append(bucket.name)
return info
def get_all_projects(current_session):
projects = current_session.query(Project).all()
projects_info = [
get_project_info(current_session, project.name) for project in projects
]
return {"projects": projects_info}
def delete_project(current_session, project_name):
"""
Delete the project from the database
The project should have no buckets in use
"""
proj = current_session.query(Project).filter(Project.name == project_name).first()
if not proj:
return {"result": "error, project not found"}
buckets = (
current_session.query(ProjectToBucket)
.filter(ProjectToBucket.project_id == proj.id)
.first()
)
if buckets:
msg = (
"error, project still has buckets associated with it. Please"
" remove those first and then retry."
)
return {"result": msg}
storage_access = current_session.query(StorageAccess).filter(
StorageAccess.project_id == proj.id
)
"""
Find the users that only belong to this project
and store them to be removed
"""
accesses = current_session.query(AccessPrivilege).filter(
AccessPrivilege.project_id == proj.id
)
users_to_remove = []
for access in accesses:
num = (
current_session.query(func.count(AccessPrivilege.project_id))
.filter(AccessPrivilege.user_id == access.user_id)
.scalar()
)
if num == 1:
for storage in storage_access:
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.id == storage.provider_id)
.first()
)
usr = (
current_session.query(User)
.filter(User.id == access.user_id)
.first()
)
users_to_remove.append((provider, usr))
current_session.delete(usr)
current_session.delete(access)
for storage in storage_access:
current_session.delete(storage)
current_session.delete(proj)
return {"result": "success", "users_to_remove": users_to_remove}
def delete_bucket_on_project(current_session, project_name, bucket_name):
"""
Remove a bucket and its relationship to a project
"""
bucket = current_session.query(Bucket).filter_by(name=bucket_name).first()
if not bucket:
msg = "".join(["Bucket name ", bucket_name, " not found"])
raise NotFound(msg)
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.id == bucket.provider_id)
.first()
)
project = (
current_session.query(Project).filter(Project.name == project_name).first()
)
if not project:
msg = "".join(["Project name ", project_name, " not found"])
raise NotFound(msg)
proj_to_bucket = (
current_session.query(ProjectToBucket)
.filter(
ProjectToBucket.bucket_id == bucket.id,
ProjectToBucket.project_id == project.id,
)
.first()
)
if proj_to_bucket:
current_session.delete(proj_to_bucket)
current_session.delete(bucket)
return {"result": "success", "provider": provider}
else:
current_session.delete(bucket)
msg = (
"WARNING: Project-to-bucket "
"relationship not found, deleting bucket anyway"
)
return {"result": msg, "provider": provider}
def list_buckets_on_project(current_session, project_name):
"""
List all the buckets assigned to a project
"""
project = (
current_session.query(Project).filter(Project.name == project_name).first()
)
if not project:
msg = "".join(["Project name ", project_name, " not found"])
raise NotFound(msg)
buckets = current_session.query(ProjectToBucket).filter(
ProjectToBucket.project_id == project.id
)
response = {"buckets": []}
for bucket in buckets:
buck = (
current_session.query(Bucket).filter(Bucket.id == bucket.bucket_id).first()
)
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.id == buck.provider_id)
.first()
)
new_buck = {"name": buck.name, "provider": provider.name}
response["buckets"].append(new_buck)
return response
def get_cloud_providers_from_project(current_session, project_id):
"""
Retrieve cloud provider to be used in other operations that require the
backend.
"""
accesses = current_session.query(StorageAccess).filter(
StorageAccess.project_id == project_id
)
cloud_providers = []
for access in accesses:
cloud_providers.append(
current_session.query(CloudProvider)
.filter(CloudProvider.id == access.provider_id)
.first()
)
return cloud_providers
def get_buckets_by_project_cloud_provider(current_session, prjct_id, provider_id):
"""
List all the buckets assigned to a project
"""
buckets = current_session.query(ProjectToBucket).filter_by(project_id=prjct_id)
response = {"buckets": []}
for bucket in buckets:
buck = (
current_session.query(Bucket)
.filter(Bucket.id == bucket.bucket_id, Bucket.provider_id == provider_id)
.first()
)
if buck:
response["buckets"].append(buck)
return response
def get_user_project_access_privilege(current_session, user, project):
return (
current_session.query(AccessPrivilege)
.filter_by(project_id=project.id, user_id=user.id)
.first()
)
|
py | 1a3c7b4ee79d9db963905814f9cf2bbddae81c60 | import tensorflow as tf
import numpy as np
def linear(input_, output_size, stddev=0.02, bias_start=0.0, activation_fn=None, name='linear'):
"""
Fully connected linear layer
:param input_:
:param output_size:
:param stddev:
:param bias_start:
:param activation_fn:
:param name:
:return:
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable('Matrix', [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
b = tf.get_variable('bias', [output_size],
initializer=tf.constant_initializer(bias_start))
out = tf.nn.bias_add(tf.matmul(input_, w), b)
if activation_fn is not None:
return activation_fn(out), w, b
else:
return out, w, b
def simple_linear(input_, initializer=tf.constant_initializer([1.]), bias_start=0.0,
activation_fn=None, name='simple_linear'):
"""
simple element-wise linear layer
:param input_:
:param initializer
:param bias_start
:param activation_fn:
:param name:
:return:
"""
with tf.variable_scope(name):
w = tf.get_variable('Matrix', input_.get_shape(), tf.float32,
initializer)
b = tf.get_variable('bias', [input_.get_shape()[1]],
initializer=tf.constant_initializer(bias_start))
out = tf.nn.bias_add(tf.mul(input_, w), b)
if activation_fn is not None:
return activation_fn(out), w, b
else:
return out, w, b
def select_action_tf(belief, vector_set):
"""
Compute optimal action given a belief distribution
:param belief: dim(belief) == dim(AlphaVector)
:param vector_set
:return: optimal action, V(b)
"""
assert not len(vector_set) == 0
max_v = tf.constant([-np.inf], tf.float32)
best_action = tf.constant([-1])
for av in vector_set:
with tf.name_scope('V_b'):
v = tf.reduce_sum(tf.mul(av.v, belief))
best_action = tf.cond(tf.greater(v, max_v)[0], lambda: tf.constant([av.action]),
lambda: best_action)
max_v = tf.maximum(v, max_v)
return best_action, max_v
def clipped_error(x):
# Huber loss
try:
return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
except:
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
|
py | 1a3c7dccd8441f881abd5daf6d877b5cb3737169 | #
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from ucd.UCDClientUtil import UCD_Client_Util
verify_ssl = not server['disableSslVerification']
ucd_client = UCD_Client_Util.create_ucd_client(server, task, username, password, verify_ssl)
method = str(task.getTaskType()).lower().replace('.', '_')
call = getattr(ucd_client, method)
output = call(locals())
|
py | 1a3c7dced3c58d2da17bb94fbe3225e1ef8b6cd7 | # model settings
model = dict(
type='SimSiam',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3,), # no conv-1, x-1: stage-x
norm_cfg=dict(type='SyncBN'),
style='pytorch'),
neck=dict(
type='NonLinearNeck',
in_channels=2048, hid_channels=2048, out_channels=2048,
num_layers=3,
with_bias=True, with_last_bn=False, with_last_bn_affine=False,
with_avg_pool=True),
head=dict(
type='LatentPredictHead',
predictor=dict(
type='NonLinearNeck',
in_channels=2048, hid_channels=512, out_channels=2048,
num_layers=2,
with_avg_pool=False,
with_bias=True, with_last_bn=False, with_last_bias=True))
)
|
py | 1a3c802ce3abcc881de51bedbcdfa2c3d3b714ca |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import uda_acl_ext
class extended(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-uda-access-list - based on the path /uda/access-list/extended. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__uda_acl_ext',)
_yang_name = 'extended'
_rest_name = 'extended'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)
self.__uda_acl_ext = YANGDynClass(base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'uda', u'access-list', u'extended']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'uda', u'access-list', u'extended']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /uda/access_list/extended/name (uda-acl-name)
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /uda/access_list/extended/name (uda-acl-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with uda-acl-name""",
'defined-type': "brocade-uda-access-list:uda-acl-name",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)
def _get_uda_acl_ext(self):
"""
Getter method for uda_acl_ext, mapped from YANG variable /uda/access_list/extended/uda_acl_ext (container)
"""
return self.__uda_acl_ext
def _set_uda_acl_ext(self, v, load=False):
"""
Setter method for uda_acl_ext, mapped from YANG variable /uda/access_list/extended/uda_acl_ext (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uda_acl_ext is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uda_acl_ext() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """uda_acl_ext must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)""",
})
self.__uda_acl_ext = t
if hasattr(self, '_set'):
self._set()
def _unset_uda_acl_ext(self):
self.__uda_acl_ext = YANGDynClass(base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
uda_acl_ext = __builtin__.property(_get_uda_acl_ext, _set_uda_acl_ext)
_pyangbind_elements = {'name': name, 'uda_acl_ext': uda_acl_ext, }
|
py | 1a3c8053c3455d45e2f5e6104717400bee3bc234 | import numpy as np
import os
import sys
import math
from datetime import datetime
from importlib import reload
from pprint import pprint
from platform import python_version
print(python_version())
sys.path.append(os.getcwd())
import NDN3.NDNutils as NDNutils
import NDN3.NDN as NDN
import utils.data as udata
import utils.network as unet
import utils.analysis as uas
import utils.analysis_present as uasp
import fire
def runner(exp_folder, exp, run, hidden, c_size, c_filters):
run_1(exp_folder, exp, run, hidden, c_size, c_filters)
#
# based on bl3:
# - convolution instead of DoG
def run_1(exp_folder, exp, run, hidden, c_size, c_filters):
name = f'baseline3_C{c_filters}s{c_size}xN{hidden}x5000'
exp = f"{exp}1x{run}"
def get_hsm_params_custom(input, output, i):
_, output_shape = output.shape
_, input_shape = input.shape
pprint(f"in: {input_shape} out: {output_shape}")
intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))
hsm_params = NDNutils.ffnetwork_params(
verbose=False,
input_dims=[1, intput_w, input_h],
layer_sizes=[c_filters, int(hidden*output_shape), output_shape], # paper: 9, 0.2*output_shape
ei_layers=[None, None, None],
normalization=[0, 0, 0],
layer_types=['conv','normal','normal'],
act_funcs=['lin', 'softplus','softplus'],
shift_spacing=[(c_size+1)//2, 2, 0],
conv_filter_widths=[c_size, 0, 0],
reg_list={})
hsm_params['weights_initializers']=['normal','normal','normal']
hsm_params['biases_initializers']=['trunc_normal','trunc_normal','trunc_normal']
return hsm_params
def get_training_params():
epochs = 5000
return {'batch_size': 16, 'use_gpu': False, 'epochs_summary': epochs//50, 'epochs_training': epochs, 'learning_rate': 0.001}
input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(
[1], 'training', udata.normalize_mean_std)
input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
[1], 'validation', udata.normalize_mean_std)
for i in range(10):
seed = i
hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
pprint(hsm_params)
hsm, input_tuple = unet.get_network(
input_tr_processed, output_tr,
'adam',
get_training_params(),
hsm_params,
'poisson',
input_val_processed, output_val,
output_tr_mask, output_val_mask,
f"{name}__{i}", seed,
)
hsm.log_correlation = 'zero-NaNs'
(input, output, train_indxs, test_indxs, data_filters, larg, opt_params, name_str) = input_tuple
hsm.train(
input_data=input,
output_data=output,
train_indxs=train_indxs,
test_indxs=test_indxs,
data_filters=data_filters,
learning_alg=larg,
opt_params=opt_params,
output_dir=f"training_data/logs/{exp_folder}/{exp}/{name_str}"
)
res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed, output_val, output_val_mask)
hsm.save_model(f"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod")
with open("./training_data/experiments.txt", "a+") as f:
f.write(f"{exp_folder}/{exp}/{name}\n")
if __name__ == "__main__":
fire.Fire(runner)
|
py | 1a3c80582554f8cc312170664257d8077612563c | from setuptools import setup, find_packages
from PublisherAzureTestsResults.version import VERSION
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
setup(
name='robotframework-publisher-results-azure',
url='https://github.com/ismailktami/robotframework-publisher-results-azure',
version=VERSION,
description='Library to publish robot framework automation results on azure',
author='Ismail Ktami',
author_email='[email protected]',
license='MIT',
classifiers=classifiers,
keywords='robotframework azure devops testplans results outcomes',
packages=find_packages(),
install_requires=[
'robotframework>=3.2.2',
'requests',
'utils'
]
) |
py | 1a3c84a47c85315a1a7370b05a981f5ce56a568b | # /index.py
from flask import Flask, request, jsonify, render_template, redirect
import os
import dialogflow_v2 as dialogflow
import requests
import json
import pusher
from werkzeug.utils import secure_filename
from trim import song
from therapy import find
from sendemail import sendmail
from video_emotion import output
import cv2
import imutils
import cv2
from tensorflow import keras
import numpy as np
import time
app = Flask(__name__)
lol = 0
@app.route('/chatbot_page')
def chatbot_page():
return render_template('index.html')
@app.route('/tictac')
def tictac():
return render_template("tictac.html")
@app.route("/webcam")
def webcam():
return render_template('webcam.html')
@app.route("/extras")
def extras():
return render_template("extra.html")
@app.route('/predict2', methods=['GET'])
def predict2():
global lol
file_path = "D:\Downloads\screenshot.jpg"
fin, mood = output(file_path)
os.remove(file_path)
# cv2.imshow("image", fin)
# cv2.waitKey(0)
new_path = "D:\Projects\djhack\static\saves2\zinished{}.jpg".format(
str(lol))
cv2.imwrite(new_path, fin)
lol = lol+1
time.sleep(1)
return render_template("something.html", image_name="static\saves2\zinished" + str(lol-1) + ".jpg")
def intensity(level):
if level == 'low':
return 30
if level == 'medium':
return 20
if level == 'high':
return 10
def score_inc(num):
score = score + num
return score
@app.route('/webhook', methods=['POST'])
def webhook():
flag = 0
data = request.get_json(silent=True)
score = 0
if data['queryResult']['intent']['displayName'] == 'feel_happy':
reply = {
'fulfillmentText': 'happy works!',
}
return jsonify(reply)
if data['queryResult']['intent']['displayName'] == 'show_song':
rec_song = song()
my_string = "{} by {}"
my_string = my_string.format(
rec_song['song'][0], rec_song['artist'][0])
reply = {
'fulfillmentText': "According to your mood: " + my_string,
}
return jsonify(reply)
if data['queryResult']['intent']['displayName'] == 'doctor_rec':
city = data['queryResult']['parameters']['geo-city']
doctors = find(city)
fin = ""
for i in range(2):
my_string = "Doctor {}: \nName: {} Role: {} Contact: {}\n"
my_string = my_string.format(
i+1, doctors[i]['Name'], doctors[i]['Role'], doctors[i]['Contact'], )
fin += my_string
reply = {
'fulfillmentText': "Following are the doctor recommendations:\n" + fin
}
return jsonify(reply)
if data['queryResult']['intent']['displayName'] == 'Email':
sendmail()
reply = {
"fulfillmentText": "Email is on its way!"
}
# if data['queryResult']['intent']['displayName'] in ['feel_sad - yes - custom', 'feel_sad - yes - custom - custom', 'feel_sad - yes - custom - custom - custom']:
# level = data['queryResult']['parameters']
# score_inc(intensity(level))
# if data['queryResult']['intent']['displayName'] == 'feel_sad - yes - custom - custom - custom':
# stg = "Your concern level is {} out of 90."
# stg = stg.format(score)
# if score >= 30 and score < 50:
# reply = {
# 'fulfillmentText': stg + "You will be fine! Try playing our mini-games!"
# }
# elif score >= 50 and score < 70:
# reply = {
# 'fulfillmentText': stg + "Ask for song recommendations here. Take care, you'll get over it!"
# }
# elif score >= 70 and score <= 90:
# reply = {
# 'fulfillmentText': stg + "Please consider getting professional help. We can provide you with recommendations!"
# }
def detect_intent_texts(project_id, session_id, text, language_code):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
if text:
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input)
return response.query_result.fulfillment_text
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
project_id = os.getenv('DIALOGFLOW_PROJECT_ID')
fulfillment_text = detect_intent_texts(project_id, "unique", message, 'en')
response_text = {"message": fulfillment_text}
return jsonify(response_text)
@app.route('/snake')
def snake():
print("calls snake!")
return render_template('snake.html')
@app.route('/')
def home():
# landing page
return render_template('home.html')
@app.route('/services')
def services():
return render_template('services.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/doctor')
def doctor():
return render_template('doctor.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
# run Flask app
if __name__ == "__main__":
app.run()
|
py | 1a3c84b7fb4a4746c48428351556a7aa652d1c58 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Range
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import sys
from . import element
class Range(element.Element):
""" Set of values bounded by low and high.
A set of ordered Quantities defined by a low and high limit.
"""
resource_type = "Range"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.high = None
""" High limit.
Type `Quantity` (represented as `dict` in JSON). """
self.low = None
""" Low limit.
Type `Quantity` (represented as `dict` in JSON). """
super(Range, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Range, self).elementProperties()
js.extend(
[
("high", "high", quantity.Quantity, "Quantity", False, None, False),
("low", "low", quantity.Quantity, "Quantity", False, None, False),
]
)
return js
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + ".quantity"]
|
py | 1a3c84ef9392e39c921eae454467696faf456ad2 | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("localhost", 1028))
s.listen(1)
while True:
client, address = s.accept()
data = client.recv(1024)
client.send(data)
client.close()
|
py | 1a3c8519f355791ffb31bb224709071b5482bfdb | import time
from cnn_model import *
from audio_data import CNNDataset
from cnn_training import *
import argparse
import torch
import torch.nn as nn
import hdf5storage
import os
def main(config):
dtype = torch.FloatTensor
ltype = torch.LongTensor
use_cuda = torch.cuda.is_available()
if use_cuda:
print('Using CUDA.')
dtype = torch.cuda.FloatTensor
ltype = torch.cuda.LongTensor
torch.manual_seed(0)
optimizer = None
if config.stepName!='features':
if config.method == 'dnn':
model = CNNModel(kernel_size=config.kernel_size, nb_channels=config.nb_channels, nb_layers=config.nb_layers, dilation=config.dilation)
elif config.method == 'autoDense':
model = AutoDense()
elif config.method == 'autoStride':
model = AutoStride()
if use_cuda:
model = nn.DataParallel(model).cuda()
#model.cuda()
optimizer = optim.Adam(params=model.parameters(), lr=config.lr, weight_decay=0.0)
if hasattr(config.data, 'modelPath'):
modelPath = np.array2string(np.squeeze(config.data.modelPath))[1:-1]
print(modelPath)
checkpoint = torch.load(modelPath)
# print(checkpoint['model_state_dict'])
model.load_state_dict(checkpoint['model_state_dict'])
# print(checkpoint['optimizer_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# model = load_model_from(modelPath, use_cuda=True)
#model = torch.load('snapshots/some_model')
print('Model: ', model)
print('Parameter count: ', model.module.parameter_count())
if config.dataset=='librispeech':
inputLocation = np.array2string(np.squeeze(config.eC.inputPath))[1:-1]+'speech/LibriSpeech/'
dataset_name = 'dev-clean'
dataset_name_eval = 'test-clean'
elif config.dataset=='gtzan':
inputLocation = np.array2string(np.squeeze(config.eC.inputPath))[1:-1]+'music/gtzan/'
dataset_name = 'dev'
dataset_name_eval = 'test'
elif config.dataset=='medleysolos':
inputLocation = np.array2string(np.squeeze(config.eC.inputPath))[1:-1]+'music/medleysolos/'
dataset_name = 'dev'
dataset_name_eval = 'test'
print(inputLocation)
if config.stepName=='features':
dataLocation = np.array2string(np.squeeze(config.eC.dataPath))[1:-1]
dataLocation += 'features/'
dataLocation += np.array2string(np.squeeze(config.eS.infoHash))[1:-1]
dataLocationTrain = dataLocation+'_train'
dataLocationTest = dataLocation+'_test'
else:
dataLocationTrain = np.array2string(np.squeeze(config.data.trainPath))[1:-1]
dataLocationTest = np.array2string(np.squeeze(config.data.testPath))[1:-1]
data = CNNDataset(dataset_file=dataLocationTrain,
file_location=inputLocation+dataset_name,
sampling_rate=config.sampling_rate,
block_size = config.block_size,
frame_size = config.frame_size,
normalize=True, compute=config.stepName=='features', squeeze=config.squeeze)
data_eval = CNNDataset(dataset_file=dataLocationTest,
file_location=inputLocation+dataset_name_eval,
sampling_rate=config.sampling_rate,
block_size = config.block_size,
frame_size = config.frame_size,
normalize=True, compute=config.stepName=='features', squeeze=config.squeeze)
print('Dataset smat = hdf5storage.loadmatize: ', len(data))
if config.stepName!='features':
trainer = CNNTrainer(model=model,
method=config.method,
lr=config.lr,
log_plus = config.log_plus,
weight_decay=0.0,
optimizer=optimizer,
snapshot_path=config.expLanes[0:-4],
snapshot_interval=config.snapshot_interval,
dtype=dtype,
spectrum_normalization = config.spectrum_normalization)
if config.stepName=='train':
print('----- Training -----')
store, obs = trainer.train(dataset=data,
dataset_validation=data_eval,
batch_size=config.batch_size,
epochs=config.epochs,
target=config.target,
q = config.q)
if config.stepName=='test':
print('----- Evaluation -----')
store, obs = trainer.test(dataset=data_eval, batch_size=config.block_size, save=True)
if config.expLanes :
if config.stepName=='features':
store = {}
obs = {}
store['trainPath'] = data.dataset_file
store['testPath'] = data_eval.dataset_file
store['trainFiles'] = data.get_files()
store['testFiles'] = data_eval.get_files()
obs['nbBlocksTrain'] = len(data)
obs['nbBlocksTest'] = len(data_eval)
if config.stepName=='train':
print('train')
if config.stepName=='test':
print('test')
if os.path.exists(config.expLanes[0:-8]+'_data.mat'):
os.remove(config.expLanes[0:-8]+'_data.mat')
hdf5storage.savemat(config.expLanes[0:-8]+'_data', store)
if os.path.exists(config.expLanes[0:-8]+'_obs.mat'):
os.remove(config.expLanes[0:-8]+'_obs.mat')
hdf5storage.savemat(config.expLanes[0:-8]+'_obs', obs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Datah
parser.add_argument('--expLanes', type=str, default='')
parser.add_argument('--dataset', type=str, default='dev-clean')
parser.add_argument('--dataset_eval', type=str, default='test-clean')
parser.add_argument('-load_mdl', action='store_true')
# Logging
parser.add_argument('--snapshot_interval', type=int, default=1000)
parser.add_argument('--validation_interval', type=int, default=2000000)
# Training
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=10)
# Experience factors if config.dataset_eval is not None:
parser.add_argument('--target', type=str, default='spec', help='spec, wspec, cqt')
parser.add_argument('--q', type=int, default=27)
config = parser.parse_args()
if config.expLanes :
print('loading expLanes config')
ec = hdf5storage.loadmat(config.expLanes)
print('done')
eSetting = ec['data']['info']['setting']
eConfig = ec['data']['info']
# print(ec)
config.eC = eConfig.view(np.recarray)
eS = eSetting.view(np.recarray)
config.stepName = np.squeeze(config.eC.stepName)
config.eS = eS
config.data = ec['data'].view(np.recarray)
# config.batch_size = int(np.nan_to_num(np.squeeze(eSetting['batchSize'])))
# config.block_size = int(np.nan_to_num(np.squeeze(eSetting['blockSize'])))
config.batch_size = 150
config.block_size = 150
config.squeeze = eSetting['squeeze']
config.dataset = np.array2string(np.squeeze(eSetting['dataset']))[1:-1]
config.method = np.array2string(np.squeeze(eSetting['method']))[1:-1]
if config.stepName=='features':
config.frame_size = int(np.nan_to_num(np.squeeze(ec['data']['frameSize'])))
config.sampling_rate = int(np.nan_to_num(np.squeeze(ec['data']['samplingFrequency'])))
else :
config.kernel_size = int(np.nan_to_num(np.squeeze(eSetting['kernelSize'])))
config.lr = float(np.squeeze(eSetting['learningRate']))
config.epochs = int(np.nan_to_num(np.squeeze(eSetting['epochs'])))
config.nb_channels = int(np.nan_to_num(np.squeeze(eSetting['nbChannels'])))
config.nb_layers = int(np.nan_to_num(np.squeeze(eSetting['nbLayers'])))
config.dilation = int(np.nan_to_num(np.squeeze(eSetting['dilation'])))
config.log_plus = int(np.nan_to_num(np.squeeze(eSetting['logPlus'])))
config.spectrum_normalization = int(np.nan_to_num(np.squeeze(eSetting['spectrumNormalization'])))
config.sampling_rate = 1
config.frame_size = 1
#print(config.epochs)
main(config)
|
py | 1a3c851bbdb75a56121ca735240987d52fa02a91 | import zmq
PORT = 9123
def main():
"""Main.
"""
context = zmq.Context()
socket = context.socket(zmq.SUB)
print('Connecting port %s' % PORT)
socket.setsockopt(zmq.SUBSCRIBE, b'')
socket.connect("tcp://localhost:%s" % PORT)
print('Connected port %s' % PORT)
while True:
message = socket.recv()
print("Message received: %s" % message)
if __name__ == '__main__':
main()
|
py | 1a3c85d7188e97a6c88f938c532e44b851b3c088 | import requests
from bs4 import BeautifulSoup
import time
import sys
import json
import re
import os
s = requests.session()
s.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Cache-Control': 'max-age=0',
'Cookie': 'ASP.NET_SessionId=2w4r4q25gvy03j2vxuejt2u4'
})
r = s.get('http://www.elec.state.nj.us/ELECReport/SearchCandidate.aspx')
cookies = r.cookies
soup = BeautifulSoup(r.text)
viewstate = soup.find('input', {'id':'__VIEWSTATE'})['value']
viewstategenerator = soup.find('input', {'id':'__VIEWSTATEGENERATOR'})['value']
data = {
"__EVENTTARGET":None,
"__EVENTARGUMENT":None,
"ctl00$ContentPlaceHolder1$usrCandidate1$Committee":"Candidate",
"ctl00$ContentPlaceHolder1$usrCandidate1$txtFirstName":"Phil",
"ctl00$ContentPlaceHolder1$usrCandidate1$txtMI":None,
"ctl00$ContentPlaceHolder1$usrCandidate1$txtLastName":"Murphy",
"ctl00$ContentPlaceHolder1$usrCandidate1$txtSuffix":None,
"ctl00$ContentPlaceHolder1$usrCandidate1$ddlOffice":"ALL",
"ctl00$ContentPlaceHolder1$usrCandidate1$Location":"Location1",
"ctl00$ContentPlaceHolder1$usrCandidate1$ddlLocation1":None,
"ctl00$ContentPlaceHolder1$usrCandidate1$ddlParty":"ALL",
"ctl00$ContentPlaceHolder1$usrCandidate1$ddlElection":"ALL",
"ctl00$ContentPlaceHolder1$usrCandidate1$ddlYear":"2017",
"ctl00$ContentPlaceHolder1$usrCandidate1$btnSearch":"Search",
'__VIEWSTATE':viewstate,
'__VIEWSTATEGENERATOR': viewstategenerator
}
r = s.post('http://www.elec.state.nj.us/ELECReport/SearchCandidate.aspx', data=data, cookies=cookies)
soup = BeautifulSoup(r.text)
path = os.path.abspath('temp.html')
url = 'file://' + path
with open(path, 'w') as f:
f.write(r.text)
viewstate = soup.find('input', {'id':'__VIEWSTATE'})['value']
viewstategenerator = soup.find('input', {'id':'__VIEWSTATEGENERATOR'})['value']
data2 = {
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl$ctl03': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl$ctl02': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ToggleParam$collapse': 'false',
'__EVENTARGUMENT': 'Link$0',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$VisibilityState$ctl00':
'ReportPage',
'__VIEWSTATEGENERATOR': viewstategenerator,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl07$collapse': 'false',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl07$store': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl$ctl04': '100',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl08$ClientClickedId': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ToggleParam$store': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ScrollPosition': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl05$ctl00$CurrentPage': '1',
'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$usrCommonGrid1$gvwData',
'__VIEWSTATE': viewstate,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl11':'standards',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl10': 'ltr',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$AsyncWait$HiddenCancelField': 'False',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl03$ctl00': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl03$ctl01': None
}
r = s.post('http://www.elec.state.nj.us/ELECReport/SearchCandidate.aspx', data=data2, cookies=cookies)
soup = BeautifulSoup(r.text)
results = soup.findAll('table')
for elem in soup(text=re.compile(r'Date Recieved')):
print(elem.parent)
path = os.path.abspath('temp2.html')
url = 'file://' + path
with open(path, 'w') as f:
f.write(r.text)
viewstate = soup.find('input', {'id':'__VIEWSTATE'})['value']
viewstategenerator = soup.find('input', {'id':'__VIEWSTATEGENERATOR'})['value']
data3 = {
'ctl00$ScriptManager1':'ctl00$ScriptManager1|ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl03$ctl00': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl03$ctl01': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl10':'ltr',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl11':'standards',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$AsyncWait$HiddenCancelField':'False',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ToggleParam$store': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ToggleParam$collapse':'false',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl05$ctl00$CurrentPage':'1',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl08$ClientClickedId': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl07$store': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl07$collapse': 'false',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$VisibilityState$ctl00': 'ReportPage',
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ScrollPosition': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl$ctl02': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl$ctl03': None,
'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl$ctl04': '100',
'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$BITSReportViewer1$reportViewer1$ctl09$ReportControl',
'__EVENTARGUMENT': None,
'__VIEWSTATE': viewstate,
'__VIEWSTATEGENERATOR': viewstategenerator,
'__ASYNCPOST':'true',
'': ""
}
s.headers.update({
'X-MicrosoftAjax': 'Delta=true',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://www.elec.state.nj.us/ELECReport/searchcandidate.aspx',
'Host':'www.elec.state.nj.us',
'Origin':'http://www.elec.state.nj.us',
'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'en-US,en;q=0.8',
'Cache-Control':'no-cache',
'Connection':'keep-alive',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'
})
r = s.post('http://www.elec.state.nj.us/ELECReport/SearchCandidate.aspx', data=data3, cookies=cookies)
soup = BeautifulSoup(r.text)
print(r.text[:5000])
results = soup.findAll('table')
for elem in soup(text=re.compile(r'Date Recieved')):
print(elem.parent)
path = os.path.abspath('temp3.html')
url = 'file://' + path
with open(path, 'w') as f:
f.write(r.text)
|
py | 1a3c8748345990c8cd2cd5f7674bab2f2e40eb84 | """
Run script for 2d example with two fractures. Dynamics driven by Dirichlet
values at the fracture endpoints, which are different from the matrix BC values.
Flow and cooling from left to right, leftmost fracture grows.
-----------------------
| |
| |
| |
| |
|---- ----|
| |
| |
| |
-----------------------
"""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import porepy as pp
from porepy.models.thm_model import THM
from fracture_propagation_model import THMPropagationModel
from utils import read_pickle, write_pickle
logger = logging.getLogger(__name__)
class Example2Model(THMPropagationModel, THM):
"""
This class provides the parameter specification of the example, including grid/geometry,
BCs, rock and fluid parameters and time parameters. Also provides some common modelling
functions, such as the aperture computation from the displacement jumps, and data storage
and export functions.
"""
def _fractures(self):
self.fracs = [
np.array([[0.0, 0.5], [0.25, 0.5]]).T,
np.array([[0.75, 0.5], [1, 0.5]]).T,
]
def _depth(self, coords):
return np.zeros(coords.shape[1])
def _bc_type_mechanics(self, g) -> pp.BoundaryConditionVectorial:
"""
Dirichlet values at top and bottom.
"""
all_bf, east, west, north, south, _, _ = self._domain_boundary_sides(g)
dir_faces = south + north + g.tags["fracture_faces"]
bc = pp.BoundaryConditionVectorial(g, dir_faces, "dir")
return bc
def _bc_values_mechanics(self, g) -> np.ndarray:
"""Dirichlet displacement on the top, fixed on bottom and 0 Neumann
on left and right.
"""
# Retrieve the boundaries where values are assigned
bc_values = np.zeros((g.dim, g.num_faces))
return bc_values.ravel("F")
def _p_and_T_dir_faces(self, g):
"""
We prescribe Dirichlet value at the fractures.
No-flow for the matrix.
"""
if g.dim == self._Nd:
return np.empty(0, dtype=int)
else:
all_bf, east, west, north, south, _, _ = self._domain_boundary_sides(g)
return (east + west).nonzero()[0]
def _bc_values_scalar(self, g) -> np.ndarray:
"""
See bc_type_scalar
"""
# Retrieve the boundaries where values are assigned
dir_faces = self._p_and_T_dir_faces(g)
bc_values = np.zeros(g.num_faces)
bc_values[dir_faces] = (
5e4 / self.scalar_scale * (1 - g.face_centers[0, dir_faces])
)
return bc_values
def _bc_values_temperature(self, g) -> np.ndarray:
"""Cooling on the left from the onset of phase III."""
bc_values = np.zeros(g.num_faces)
dir_faces = self._p_and_T_dir_faces(g)
bc_values[dir_faces] = self.T_0_Kelvin - 50 * (1 - g.face_centers[0, dir_faces])
return bc_values
def _set_rock_and_fluid(self):
"""
Set rock and fluid properties to those of granite and water.
We ignore all temperature dependencies of the parameters.
"""
super()._set_rock_and_fluid()
def _hydrostatic_pressure(self, g, depth):
"""Set explicitly to zero to avoid the atmospheric pressure returned
by the exIII/exIV function for depth=0.
"""
return np.zeros_like(depth)
def _set_time_parameters(self):
"""
Specify time parameters.
"""
# For the initialization run, we use the following
# start time
self.time = 0
# and time step
self.time_step = self.params.get("time_step")
# We use
self.end_time = 4 * pp.HOUR
self.max_time_step = self.time_step
self.phase_limits = np.array([self.end_time])
self.phase_time_steps = np.array([self.time_step])
def _set_fields(self, params):
"""
Set various fields to be used in the model.
"""
super()._set_fields(params)
# Initial aperture, a_0
self.initial_aperture = 1e-3 / self.length_scale
self.gravity_on = False # Mechanics not implemented for True
self.box = {"xmin": 0, "ymin": 0, "xmax": 1, "ymax": 1}
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# dt in seconds
reference = False
time_steps = np.array([600, 300, 150, 75])
# Number of cells in each dimension
n_cells = np.array([32, 64, 128])
if reference:
n_cells = np.array([512])
time_steps = np.array([25])
fracture_sizes = {}
export_times = {}
mesh_size = 0.02
mesh_args = {
"mesh_size_frac": mesh_size,
"mesh_size_min": 0.5 * mesh_size,
"mesh_size_bound": 3.6 * mesh_size,
}
folder_name = "exII_revision"
if reference:
folder_name += "_ref"
params = {
"nl_convergence_tol": 1e-8,
"max_iterations": 50,
"file_name": "tensile_stable_propagation",
"mesh_args": mesh_args,
"folder_name": folder_name,
"nx": 10,
"prepare_umfpack": False,
}
if reference:
params["file_name"] = "tensile_stable_propagation_reference"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
for i, dt in enumerate(time_steps):
params["time_step"] = dt
for j, nx in enumerate(n_cells):
logger.info("\nSolving for dt {} and nx {}.".format(dt, nx))
params["nx"] = nx
m = Example2Model(params)
pp.run_time_dependent_model(m, params)
fracture_sizes[(dt, nx)] = m.fracture_sizes
export_times[(dt, nx)] = m.export_times
m._export_pvd()
plot = False
if reference:
data = read_pickle("exII/fracture_sizes")
fracture_sizes.update(data["fracture_sizes"])
time_steps = np.union1d(data["time_steps"], time_steps)
export_times = data["export_times"].update(export_times)
n_cells = np.union1d(data["n_cells"], n_cells)
data = {
"fracture_sizes": fracture_sizes,
"time_steps": time_steps,
"n_cells": n_cells,
"export_times": export_times,
}
write_pickle(data, folder_name + "/fracture_sizes")
if plot:
fig, ax = plt.subplots()
for i, dt in enumerate(time_steps):
for j, nx in enumerate(n_cells):
data = fracture_sizes[(dt, nx)]
length = data[:, 2] - data[:, 1]
ax.plot(data[:, 0], length, label="dt {} nx {}".format(dt, nx))
ax.legend()
plt.show()
|
py | 1a3c8884ba961b18207c9059137b8d800783a0ac | """bugTracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("", include("bugtrack.urls")),
path('admin/', admin.site.urls),
]
|
py | 1a3c88d5bbe6642f2b9754374afde1edba895a54 | # System imports
from datetime import datetime
import time
import json
import logging
# Package imports
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
# Local imports
import common
from ispyb_api import controller
api = Blueprint('ebic', __name__, url_prefix='/ebic')
rack_prefix = 'EBIC-RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['m01',
'm02',
'm03',
'm04',
'm05',
'm06',
'm07',
]
beamline_prefix = 'MICROSCOPE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
# Add the common locations on for the web ui
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
@api.route('/')
def index():
"""
Main page for dewar management
"""
return render_template('dewars.html',
title="eBIC Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="ebic",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
"""
API route for dewar management
"""
result = {}
status_code = 200
if request.method == "GET":
# Get any dewar with any rack location
# There should only be one per location
# Simple call so use controller directly
result = controller.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
location = request.form['location']
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def find():
"""
Return a list of matching dewars with this facility code
Should be requested with parameters in the URL ?fc=DLS-MS-1234 request
We specifically return the status code so the front end can show feedback
"""
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
|
py | 1a3c892e470617ac89b2a2b0942c029b5b45fbad | __author__ = 'Brent Berghmans 1334252'
import hashlib
import socket
import re
from threading import Thread
from threading import Lock
import Queue
import os
import sys
import time
from email.utils import formatdate
class HostReplacer:
def __init__(self, file = False):
self.mDict = dict()
if not file:
return
self.readFile(file)
def readFile(self, filename):
try:
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if not line.startswith("#"):
splits = line.split(" ")
if len(splits) == 2:
self.mDict[splits[0]] = splits[1]
else:
print("Error, line contains 3 parameters: " + line + ", skipping this line.")
except:
print("Exception occurred with file.")
def replace(self, string):
if string in self.mDict:
return self.mDict[string]
else:
return string
def substringReplace(self, string):
for key, value in self.mDict.iteritems():
if key.lower() in string.lower() or key.lower() == string.lower():
replaced = string.lower().replace(key, value)
print string + " -> " + replaced
return replaced
return string
class HTTPCache:
def __init__(self):
self.mMD5 = hashlib.md5()
self.mDict = dict()
self.mValues = self.mDict.values()
def generateHash(self, string):
hash = self.generateUniqueHash(string)
self.mDict[string] = hash
self.mValues = self.mDict.values()
return hash
def generateUniqueHash(self, string):
hash = ""
self.mMD5.update(string)
hash = self.mMD5.hexdigest()
self.mMD5 = hashlib.md5()
baseHash = hash
return baseHash
def getHash(self, key):
if key in self.mDict:
return self.mDict[key]
elif key + "/" in self.mDict:
return self.mDict[key + "/"]
else:
return False
def containsKey(self, key):
return key in self.mDict or key + "/" in self.mDict
def containsValue(self, value):
return value in self.mValues
def remove(self, key):
if self.containsKey(key):
self.mDict.pop(key)
elif self.containsKey(key + "/"):
self.mDict.pop(key + "/")
def insert(self, key, value):
self.mDict[key] = value
self.mValues = self.mDict.values()
class HTTPFileCache:
def __init__(self):
self.mCache = HTTPCache()
self.mLock = Lock()
def checkCacheDir(self):
if not os.path.isdir('./cache'):
os.makedirs('./cache')
def fileIsCached(self, url):
#Check if in our memory cache
self.mLock.acquire()
outcome = self.mCache.containsKey(url)
self.mLock.release()
#If not in our memory cache, check file system
if not outcome:
#print "Checking files;"
hash1 = self.mCache.generateUniqueHash(url)
hash2 = self.mCache.generateUniqueHash(url + "/")
print
if os.path.isfile("./cache/" + hash1 + ".tmp"):
#print "Hash1 exists"
self.mCache.insert(url, hash1)
outcome = True
elif os.path.isfile("./cache/" + hash2 + ".tmp"):
#print "Hash2 exists"
self.mCache.insert(url + "/", hash2)
outcome = True
return outcome
def getReadFilePointer(self, url):
#Get hashed name from cache
self.mLock.acquire()
hash = self.mCache.getHash(url)
self.mLock.release()
#open file pointer
f = open('cache/' + hash + ".tmp", 'rb')
return f
def getWriteFilePointer(self, url):
self.checkCacheDir()
hash = ""
#Check if file is cached
test = self.fileIsCached(url)
self.mLock.acquire()
#If cached, get the hash
if test:
hash = self.mCache.getHash(url)
#If not, generate a hash and put in cache
else:
hash = self.mCache.generateHash(url)
self.mLock.release()
#If file exists, we delete it
if not os.path.exists('cache/' + str(hash) + ".tmp"):
f = False
else:
os.remove('cache/' + str(hash) + ".tmp")
#Open write file pointer
f = open('cache/' + str(hash) + ".tmp", 'w+b')
return f
def removeFile(self, url):
test = self.fileIsCached(url)
self.mLock.acquire()
try:
if test:
hash = self.mCache.getHash(url)
if os.path.exists('cache/' + str(hash) + '.tmp'):
os.remove('cache/' + str(hash) + '.tmp')
self.mCache.remove(url)
except:
print "Error occured when deleting file."
self.mLock.release()
#################
# Entry #
# Point #
#################
class ClientHandler:
def __init__(self, clientSocket, clientAddress, fileCache, hostReplacer):
self.mSocket = clientSocket
self.mAddress = clientAddress
self.mRequestQueue = Queue.Queue()
self.mThread = Thread(target=self.doInBackground)
self.mGetRequestRe = re.compile("^(.|\r\n)*?(GET (.)* HTTP\\/1\\.(1|0)\r\n(.|\r\n)+?\r\n\r\n)")
self.mOtherRequestRe = re.compile("^(.|\r\n)*?([a-zA-Z]+? (.)* HTTP\\/1\\.(1|0)\r\n(.|\r\n)+?\r\n\r\n)")
self.mResponseRe = re.compile("(^(HTTP\\/1.1)\\s([0-9]{3})\\s.*\r\n(.+?:.+?\r\n)*\r\n)")
self.mShouldStop = False
self.mCache = fileCache
self.mRedirects = hostReplacer
self.mDebug = False
def runLoop(self):
self.mBuffer = ''
try:
while not self.mShouldStop:
self.mSocket.settimeout(30)
data = self.mSocket.recv(1024)
if not data:
break
self.mBuffer = self.mBuffer + data
#Check if the buffer contains requests
self.parseReceive()
#Send requested files if we have them
self.sendFile()
except socket.timeout:
if len(self.mBuffer.strip()) != 0:
self.sendBadRequest()
else:
self.sendTimeout()
except socket.error:
try:
self.mSocket.close()
#Dont know how to do a "NOP" in python
except socket.error:
x = 10
except:
x = 10
def parseReceive(self):
self.printDebug("In parseReceive")
while True:
#Check if the regex matches
matches = self.mOtherRequestRe.match(self.mBuffer)
if not matches:
break
#This one is the part of the string we need
match = matches.groups()[1]
if not match:
break
else:
self.printDebug("Found a match")
#Make a request based on string
request = HttpRequest(match)
#Edit headers
request.setHeader("User-Agent", "1.1 Brent Proxy")
request.setHeader("Via", "1.1 Brent Proxy")
request.setHeader("X-Forwarded-For", self.mAddress[0])
#If request is valid, add it to list
if not request.mInvalid:
#Try to replace host and request url with hosts file
host = self.mRedirects.substringReplace(request.getHeader("Host"))
url = self.mRedirects.substringReplace(request.getUrl())
request.setHeader("Host", host)
request.setUrl(url)
self.mRequestQueue.put(request)
#Remove this match from buffer, support for more than one request per socket (disabled for safety)
self.mBuffer = self.mBuffer[len(match):]
def sendFile(self):
self.printDebug("In sendFile")
#If we don't need to send files, return
if self.mRequestQueue.empty():
return
#Get some values
request = self.mRequestQueue.get()
path = request.getUrl()
host = request.getHeader("Host")
try:
#Get ip of host
ip = socket.gethostbyname(host)
#Check if file is cached
if self.mCache.fileIsCached(str(ip) + str(path)):
#If so, try to send from cached file (can fail because of deletion of file or busy file pointers)
try:
file = self.mCache.getReadFilePointer(str(ip) + str(path))
self.sendFromCache(request, file)
#if it fails, send from host
except:
print "Exception when sending cached file."
self.sendFromHost(request, ip, host, path)
#If not cached, send from host
else:
self.sendFromHost(request, ip, host, path)
except socket.error as msg:
print msg
self.sendBadRequest()
print "Exception in send."
self.mSocket.close()
self.mShouldStop = True
def sendFromCache(self, request, file):
print "CACHE: " + request.getUrl()
#Init
bufferData = file.read(5024)
self.printDebug(bufferData)
self.mSocket.send(bufferData)
#Keep sending as long as we have data
while bufferData:
bufferData = file.read(5024)
if not bufferData:
break
self.printDebug(bufferData)
self.mSocket.send(bufferData)
file.close()
def sendFromHost(self, request, ip, host, path):
print "HOST: " + request.getUrl()
#Make connection with host
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, 80))
#Edit request header
request.setHeader("Connection", "close")
#We only need to cache get requests
shouldCache = (request.getMethod().lower() == "get")
#Send request to host
query = request.getRequestQuery()
sock.send(query)
#If we need to cache, try to get a file write pointer
file = ""
if shouldCache:
try:
file = self.mCache.getWriteFilePointer(str(ip) + str(path))
except:
shouldCache = False
header = ""
headerFound = False
while True:
#Get data from host
data = sock.recv(5024)
if not data:
break
#Gepland om hier de header aan te passen, maar dit gaf problemen met chunked data, dus heb ik dit weggelaten
if not headerFound:
header = header + data
matches = self.mResponseRe.match(header)
if matches:
headerCopy = header
headerString = matches.groups()[0]
leftover = header[len(headerString):].strip()
header = HttpResponse(headerString)
headerFound = True
self.mSocket.send(headerCopy)
if shouldCache:
file.write(headerCopy)
continue
#Send to socket
self.mSocket.send(data)
#And write to file if we can/must
if shouldCache:
file.write(data)
if shouldCache:
file.close()
try:
if headerFound and header.getResponseCode() != 200 and shouldCache:
self.mCache.removeFile(str(ip) + str(path))
except:
x = 10
def sendNotFound(self):
self.mSocket.send("HTTP/1.1 404 Not Found\r\n\r\n<html><body>404 File not found.</body></html>")
def sendBadRequest(self):
try:
self.mSocket.send("HTTP/1.1 400 Bad Request\r\n\r\n<html><body>400 Bad Request.</body></html>")
self.mSocket.close()
self.mShouldStop = True
except:
print "Exception occurred when sending 400 Bad request."
def sendTimeout(self):
try:
self.mSocket.send("HTTP/1.1 408 Request Timeout\r\n\r\n<html><body>408 Request Timeout.</body></html>")
self.mSocket.close()
self.mShouldStop = True
except:
print "Exception occurred when sending 408 Request Timeout."
def doInBackground(self):
stop = self.runLoop()
self.mSocket.close()
def execute(self):
self.mThread.start()
def join(self):
self.mThread.join()
def printDebug(self, string):
if self.mDebug:
print string
class HttpBase:
def __init__(self):
self.mHeaders = dict()
self.mVersion = ""
def hasHeader(self, key):
return key in self.mHeaders
def getHeader(self, key):
return self.mHeaders[key.lower()]
def setHeader(self, key, value):
self.mHeaders[key.lower()] = value
def setVersion(self, version):
self.mVersion = version
def getVersion(self):
return self.mVersion
def parseHeaderLine(self, string):
#We should be able to split on ":"
if re.match(".*:.*", string):
headerSplit = re.split(":", string)
left = headerSplit[0].strip()
right = ""
#There might be more than one ":", just concatenate
for i in range(1, len(headerSplit)):
if i == 1:
right = headerSplit[i]
else:
right = right + ":" + headerSplit[i]
right = right.strip()
self.setHeader(left, right)
def getHeadersQuery(self):
keys = self.mHeaders.keys()
values = self.mHeaders.values()
toRet = ""
for i in range(0, len(keys)):
if i == len(keys) - 1:
toRet = toRet + keys[i] + ": " + values[i]
else:
toRet = toRet + keys[i] + ": " + values[i] + "\r\n"
return toRet
class HttpResponse(HttpBase):
def __init__(self, text = False):
HttpBase.__init__(self)
self.mBase = HttpBase()
self.mResponseCode = -1
self.mResponseName = ""
self.mInvalid = True
if not text:
return
self.parse(text)
def getResponse(self):
return (self.mResponseCode, self.mResponseName)
def setResponse(self, response):
self.setResponseName(str(response[2]))
self.setResponseCode(response[1])
def getResponseName(self):
return self.mResponseName
def setResponseName(self, responseName):
self.mResponseName = str(responseName)
def getResponseCode(self):
return self.mResponseCode
def setResponseCode(self, code):
self.mResponseCode = int(code)
def parse(self, response):
#Split on \r\n
splits = re.split("\r\n", response)
httpFound = False
for split in splits:
#If we have not found the first line
if not httpFound:
httpFound = self.parseFirstLine(split)
#If we have found the first line
else:
self.parseHeaderLine(split)
self.mInvalid = not httpFound
def parseFirstLine(self, line):
#Check if the line matches the first line of an HTTP request
if re.match("HTTP\\/1\\.(0|1) [0-9]{1,3} .+", line):
versNo = line[5:8].strip()
statusCode = line[9:12].strip()
statusName = line[13:].strip()
self.setVersion(versNo.strip())
self.mResponseCode = int(statusCode)
self.mResponseName = statusName.strip()
return True
else:
return False
def getFirstLineQuery(self):
toRet = ""
toRet = toRet + "HTTP/" + self.getVersion() + " " + str(self.mResponseCode) + " " + str(self.mResponseName)
return toRet
def getResponseQuery(self):
toRet = self.getFirstLineQuery() + "\r\n"
toRet = toRet + self.getHeadersQuery() + "\r\n\r\n"
return toRet
class HttpRequest(HttpBase):
def __init__(self, text = False):
HttpBase.__init__(self)
self.mBase = HttpBase()
self.mUrl = ""
self.mMethod = ""
self.mInvalid = True
if not text:
return
self.parse(text)
def getUrl(self):
return self.mUrl
def setUrl(self, url):
self.mUrl = url
def getMethod(self):
return self.mMethod
def setMethod(self, method):
self.mMethod = method
def parse(self, text):
#Split on \r\n
splits = re.split("\r\n", text)
httpFound = False
for split in splits:
#If we have not found the first line
if not httpFound:
httpFound = self.parseFirstLine(split)
#If we have found the first line
else:
self.parseHeaderLine(split)
self.mInvalid = not httpFound
def parseFirstLine(self, line):
#Check if the line matches the first line of an HTTP request
result = re.match("([a-zA-Z]+) .* HTTP\\/1\\.(1|0)", line)
if result:
self.mMethod = result.group(1)
url = line[len(self.mMethod) + 1:].strip()
versNo = url[len(url) - 9:]
url = url[0: len(url) - len(versNo)].strip()
versNo = versNo[6:].strip()
self.setUrl(url)
self.setVersion(versNo)
return True
else:
return False
def getFirstLineQuery(self):
toRet = ""
toRet = toRet + self.getMethod() + " " + self.getUrl() + " HTTP/" + self.getVersion()
return toRet
def getRequestQuery(self):
toRet = self.getFirstLineQuery() + "\r\n"
toRet = toRet + self.getHeadersQuery()
toRet = toRet + "\r\n\r\n"
return toRet
class HttpServer:
def __init__(self, port, file = False):
if not file:
print "Starting server on port " + str(port)
else:
print "Starting server on port " + str(port) + " with redirect file: " + str(file)
self.mInvalid = False
self.mPort = port
self.mCache = HTTPFileCache()
self.mRedirects = HostReplacer(file)
def open(self):
self.mSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.mSocket.bind(("localhost", self.mPort))
self.mSocket.listen(5)
def close(self):
self.mSocket.close()
def runServer(self):
self.open()
counter = 0
while True:
clientSocket, clientAddress = self.mSocket.accept()
#Make new handler
clientHandler = ClientHandler(clientSocket, clientAddress, self.mCache, self.mRedirects)
#Start it
clientHandler.execute()
counter = counter + 1
#print "Sockets opened: " + str(counter)
if len(sys.argv) == 3:
if sys.argv[1] == "-p":
port = int(sys.argv[2])
server = HttpServer(port)
server.runServer()
else:
print "Invalid params, aborting."
elif len(sys.argv) == 5:
if sys.argv[1] == "-p" and sys.argv[3] == "-r":
server = HttpServer(int(sys.argv[2]), str(sys.argv[4]))
server.runServer()
elif sys.argv[1] == "-r" and sys.argv[3] == "-p":
server = HttpServer(int(sys.argv[4]), str(sys.argv[2]))
server.runServer()
else:
print "Invalid params, aborting."
else:
print "Invalid params, aborting"
|
py | 1a3c894ca23a87e3ec54ba83ffa494f108945603 | #!/usr/bin/env python
from __future__ import print_function
import matplotlib
matplotlib.use("svg")
import matplotlib.pyplot as plt
import numpy as np
import subprocess
# Generate svg file ----------
f=plt.figure(figsize=(3,3))
ax=f.add_subplot(111,frameon=False)
theta = np.linspace(0,2*np.pi,10)
ax.plot( np.cos(theta), np.sin(theta), 'b-', lw=2, )
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(-1.1,1.1)
ax.set_ylim(-1.1,1.1)
f.savefig('circle.svg')
# check output
subprocess.check_call(
'xmllint --nowarning --noout circle.svg',
shell=True)
if 0:
print('done checking circle.svg')
for i in range(20):
print()
# stack two files
subprocess.check_call(
'../svg_stack.py circle.svg circle.svg > circles.svg',
#'../svg_stack.py circle.svg > circles.svg',
shell=True)
# check output
subprocess.check_call(
#'xmllint --dtdvalid http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd --nowarning --noout circles.svg',
'xmllint --nowarning --noout circles.svg',
shell=True)
# # check output
# subprocess.check_call(
# 'rasterizer circles.svg',
# shell=True)
|
py | 1a3c89df6f0fdf0c55717e6951e79c1d01e42229 | """Functionality for awesome-streamlit.org"""
from panel.pane import Markdown
def title_awesome(body: str,) -> Markdown:
"""An *Awesome Panel* title as a Markdown with
- the text like 'Awesome Panel About'
- the [Awesome Badge](https://cdn.rawgit.com/sindresorhus/awesome/\
d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)
Arguments:
body (str): Some title like 'About'
Returns:
Markdown: An 'Awesome Panel {body} title with a link and the awesome badge.
"""
return Markdown(
f"# Awesome Panel {body} "
""
)
|
py | 1a3c89f9c9b859f746b38194b7ef44d5285b3315 | """
WSGI config for CloudCase project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CloudCase.settings")
application = get_wsgi_application()
|
py | 1a3c8a063a4139fd5bfa7a325268ae1c62049785 | """
mfwel module. Contains the ModflowWel class. Note that the user can access
the ModflowWel class as `flopy.modflow.ModflowWel`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?wel.htm>`_.
"""
import sys
import numpy as np
from ..utils import MfList
from ..pakbase import Package
from ..utils.recarray_utils import create_empty_recarray
from ..utils.optionblock import OptionBlock
import warnings
class ModflowWel(Package):
"""
MODFLOW Well Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 0).
stress_period_data : list of boundaries, or recarray of boundaries, or
dictionary of boundaries
Each well is defined through definition of
layer (int), row (int), column (int), flux (float).
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. This gives the form of:
stress_period_data =
{0: [
[lay, row, col, flux],
[lay, row, col, flux],
[lay, row, col, flux]
],
1: [
[lay, row, col, flux],
[lay, row, col, flux],
[lay, row, col, flux]
], ...
kper:
[
[lay, row, col, flux],
[lay, row, col, flux],
[lay, row, col, flux]
]
}
Note that if the number of lists is smaller than the number of stress
periods, then the last list of wells will apply until the end of the
simulation. Full details of all options to specify stress_period_data
can be found in the flopy3 boundaries Notebook in the basic
subdirectory of the examples directory
dtype : custom datatype of stress_period_data.
If None the default well datatype will be applied (default is None).
extension : string
Filename extension (default is 'wel')
options : list of strings
Package options (default is None).
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output names will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
mxactw : int
Maximum number of wells for a stress period. This is calculated
automatically by FloPy based on the information in
stress_period_data.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
>>> wel = flopy.modflow.ModflowWel(m, stress_period_data=lrcq)
"""
_options = dict(
[
(
"specify",
{
OptionBlock.dtype: np.bool_,
OptionBlock.nested: True,
OptionBlock.n_nested: 2,
OptionBlock.vars: dict(
[
("phiramp", OptionBlock.simple_float),
(
"iunitramp",
dict(
[
(OptionBlock.dtype, int),
(OptionBlock.nested, False),
(OptionBlock.optional, True),
]
),
),
]
),
},
),
("tabfiles", OptionBlock.simple_tabfile),
]
)
def __init__(
self,
model,
ipakcb=None,
stress_period_data=None,
dtype=None,
extension="wel",
options=None,
binary=False,
unitnumber=None,
filenames=None,
):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowWel._defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(
ipakcb, fname=fname, package=ModflowWel._ftype()
)
else:
ipakcb = 0
# Fill namefile items
name = [ModflowWel._ftype()]
units = [unitnumber]
extra = [""]
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and
# unit number
Package.__init__(
self,
model,
extension=extension,
name=name,
unit_number=units,
extra=extra,
filenames=fname,
)
self._generate_heading()
self.url = "wel.htm"
self.ipakcb = ipakcb
self.np = 0
if options is None:
options = []
self.specify = False
self.phiramp = None
self.iunitramp = None
self.options = options
if isinstance(options, OptionBlock):
if not self.options.specify:
self.specify = self.options.specify
else:
self.specify = True
self.phiramp = self.options.phiramp
self.iunitramp = self.options.iunitramp
# this is to grab the aux variables...
options = []
else:
for idx, opt in enumerate(options):
if "specify" in opt:
t = opt.strip().split()
self.specify = True
self.phiramp = float(t[1])
self.iunitramp = int(t[2])
self.options.pop(idx)
break
if dtype is not None:
self.dtype = dtype
else:
self.dtype = self.get_default_dtype(
structured=self.parent.structured
)
# determine if any aux variables in dtype
dt = self.get_default_dtype(structured=self.parent.structured)
if len(self.dtype.names) > len(dt.names):
for name in self.dtype.names[len(dt.names) :]:
ladd = True
for option in options:
if name.lower() in option.lower():
ladd = False
break
if ladd:
options.append("aux {} ".format(name))
if isinstance(self.options, OptionBlock):
if not self.options.auxillary:
self.options.auxillary = options
else:
self.options = options
# initialize MfList
self.stress_period_data = MfList(
self, stress_period_data, binary=binary
)
self.parent.add_package(self)
def _ncells(self):
"""Maximum number of cells that have wells (developed for
MT3DMS SSM package).
Returns
-------
ncells: int
maximum number of wel cells
"""
return self.stress_period_data.mxact
def write_file(self, f=None):
"""
Write the package file.
Parameters:
f: (str) optional file name
Returns
-------
None
"""
if f is not None:
if isinstance(f, str):
f_wel = open(f, "w")
else:
f_wel = f
else:
f_wel = open(self.fn_path, "w")
f_wel.write("%s\n" % self.heading)
if (
isinstance(self.options, OptionBlock)
and self.parent.version == "mfnwt"
):
self.options.update_from_package(self)
if self.options.block:
self.options.write_options(f_wel)
line = " {0:9d} {1:9d} ".format(
self.stress_period_data.mxact, self.ipakcb
)
if isinstance(self.options, OptionBlock):
if self.options.noprint:
line += "NOPRINT "
if self.options.auxillary:
line += " ".join(
[str(aux).upper() for aux in self.options.auxillary]
)
else:
for opt in self.options:
line += " " + str(opt)
line += "\n"
f_wel.write(line)
if (
isinstance(self.options, OptionBlock)
and self.parent.version == "mfnwt"
):
if not self.options.block:
if isinstance(self.options.specify, np.ndarray):
self.options.tabfiles = False
self.options.write_options(f_wel)
else:
if self.specify and self.parent.version == "mfnwt":
f_wel.write(
"SPECIFY {0:10.5g} {1:10d}\n".format(
self.phiramp, self.iunitramp
)
)
self.stress_period_data.write_transient(f_wel)
f_wel.close()
def add_record(self, kper, index, values):
try:
self.stress_period_data.add_record(kper, index, values)
except Exception as e:
raise Exception("mfwel error adding record to list: " + str(e))
@staticmethod
def get_default_dtype(structured=True):
if structured:
dtype = np.dtype(
[
("k", int),
("i", int),
("j", int),
("flux", np.float32),
]
)
else:
dtype = np.dtype([("node", int), ("flux", np.float32)])
return dtype
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recarray that corresponds to dtype
dtype = ModflowWel.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
return create_empty_recarray(ncells, dtype, default_value=-1.0e10)
@staticmethod
def _get_sfac_columns():
return ["flux"]
@classmethod
def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
wel : ModflowWel object
ModflowWel object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> wel = flopy.modflow.ModflowWel.load('test.wel', m)
"""
if model.verbose:
sys.stdout.write("loading wel package file...\n")
return Package.load(
f,
model,
cls,
nper=nper,
check=check,
ext_unit_dict=ext_unit_dict,
)
@staticmethod
def _ftype():
return "WEL"
@staticmethod
def _defaultunit():
return 20
|
py | 1a3c8bf436ab9d61d8f1de98b86e581a1d8d1fda | # objective is to get the cart to the flag.
# for now, let's just move randomly:
import gym
import numpy as np
env = gym.make("MountainCar-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 1000
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
# Exploration settings
epsilon = 1 # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int)) # we use this tuple to look up the 3 Q values for the available actions in the q-table
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
done = False
if episode % SHOW_EVERY == 0:
render = True
print(episode)
else:
render = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
if episode % SHOW_EVERY == 0:
env.render()
#new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# If simulation did not end yet after last step - update Q table
if not done:
# Maximum possible Q value in next step (for new state)
max_future_q = np.max(q_table[new_discrete_state])
# Current Q value (for current state and performed action)
current_q = q_table[discrete_state + (action,)]
# And here's our equation for a new Q value for current state and action
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# Update Q table with new Q value
q_table[discrete_state + (action,)] = new_q
# Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly
elif new_state[0] >= env.goal_position:
#q_table[discrete_state + (action,)] = reward
print('Win At '+str(episode))
q_table[discrete_state + (action,)] = 0
discrete_state = new_discrete_state
# Decaying is being done every episode if episode number is within decaying range
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
env.close()
|
py | 1a3c8c776e88359e056d82d8189220e7a80f6114 | # -*- coding: utf-8 -*-
if __name__ == '__main__':
import os, sys
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(path, '..', '..'))
from ..Qt import QtGui
from .. import functions as fn
from .UIGraphicsItem import UIGraphicsItem
__all__ = ['VTickGroup']
class VTickGroup(UIGraphicsItem):
"""
**Bases:** :class:`UIGraphicsItem <pyqtgraph.UIGraphicsItem>`
Draws a set of tick marks which always occupy the same vertical range of the view,
but have x coordinates relative to the data within the view.
"""
def __init__(self, xvals=None, yrange=None, pen=None):
"""
============== ===================================================================
**Arguments:**
xvals A list of x values (in data coordinates) at which to draw ticks.
yrange A list of [low, high] limits for the tick. 0 is the bottom of
the view, 1 is the top. [0.8, 1] would draw ticks in the top
fifth of the view.
pen The pen to use for drawing ticks. Default is grey. Can be specified
as any argument valid for :func:`mkPen<pyqtgraph.mkPen>`
============== ===================================================================
"""
if yrange is None:
yrange = [0, 1]
if xvals is None:
xvals = []
UIGraphicsItem.__init__(self)
if pen is None:
pen = (200, 200, 200)
self.path = QtGui.QGraphicsPathItem()
self.ticks = []
self.xvals = []
self.yrange = [0,1]
self.setPen(pen)
self.setYRange(yrange)
self.setXVals(xvals)
def setPen(self, *args, **kwargs):
"""Set the pen to use for drawing ticks. Can be specified as any arguments valid
for :func:`mkPen<pyqtgraph.mkPen>`"""
self.pen = fn.mkPen(*args, **kwargs)
def setXVals(self, vals):
"""Set the x values for the ticks.
============== =====================================================================
**Arguments:**
vals A list of x values (in data/plot coordinates) at which to draw ticks.
============== =====================================================================
"""
self.xvals = vals
self.rebuildTicks()
#self.valid = False
def setYRange(self, vals):
"""Set the y range [low, high] that the ticks are drawn on. 0 is the bottom of
the view, 1 is the top."""
self.yrange = vals
self.rebuildTicks()
def dataBounds(self, *args, **kargs):
return None ## item should never affect view autoscaling
def yRange(self):
return self.yrange
def rebuildTicks(self):
self.path = QtGui.QPainterPath()
yrange = self.yRange()
for x in self.xvals:
self.path.moveTo(x, 0.)
self.path.lineTo(x, 1.)
def paint(self, p, *args):
UIGraphicsItem.paint(self, p, *args)
br = self.boundingRect()
h = br.height()
br.setY(br.y() + self.yrange[0] * h)
br.setHeight((self.yrange[1] - self.yrange[0]) * h)
p.translate(0, br.y())
p.scale(1.0, br.height())
p.setPen(self.pen)
p.drawPath(self.path)
|
py | 1a3c8d5dd08897c3c5c72102e8627c7d730e390a | # -*- coding:utf-8 -*-
import pygame
import sys
from gameObject import GameObject
class Person(GameObject):
def __init__(self, x, y, w, h):
super().__init__(x, y, w, h)
|
py | 1a3c8d77889f3d5e34bdad5e4540cfdb0c763c5b | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class NetworkWatcherPaged(Paged):
"""
A paging container for iterating over a list of :class:`NetworkWatcher <azure.mgmt.network.v2018_12_01.models.NetworkWatcher>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[NetworkWatcher]'}
}
def __init__(self, *args, **kwargs):
super(NetworkWatcherPaged, self).__init__(*args, **kwargs)
|
py | 1a3c8ed2e5fb6fa1ac766c79affae86845c87c5a | from typing import Any, Dict, Iterable
def filter_dict(d: Dict[str, Any], exclude: Iterable[str]) -> Dict[str, Any]:
"""Return a new dict with specified keys excluded from the original dict
Args:
d (dict): original dict
exclude (list): The keys that are excluded
"""
result: Dict[str, Any] = {}
for key, value in d.items():
if key not in exclude:
result.update({key: value})
return result
|
py | 1a3c8f589a4ff067bd5b38cf6872bd87d7fd2869 | #!/usr/bin/env python3
# Coded by CyberCommands
import os
import optparse
from pexpect import pxssh
os.system('cls' if os.name == 'nt' else 'clear')
print('''
======================================
THIS IS A SIMPLE SSH BOT CONTROL UNIT.
--------------------------------------
Coded by CyberCommands
======================================''')
class Client:
def __init__(self, host, user, password):
self.host = host
self.user = user
self.password = password
self.session = self.connect()
def connect(self):
try:
s = pxssh.pxssh()
s.login(self.host, self.user, self.password)
return s
except Exception as e:
print(e)
print('\033[91m[-] Error Connecting \033[0m')
def send_command(self, cmd):
self.session.sendline(cmd)
self.session.prompt()
return self.session.before
def botnet_command(command):
for client in Botnet:
output = client.send_command(command)
print('[*] Output from ' + client.host)
print('\033[32m[+] \033[0m' +str(output, encoding='utf-8')+ '\n')
def add_client(host, user, password):
client = Client(host, user, password)
Botnet.append(client)
order = input("Command >> ")
Botnet = []
add_client('host', 'username', 'password')
add_client('host', 'username', 'password')
botnet_command(order) |
py | 1a3c8f70daa53f4bdfd8e068b799318693ec3943 | #!/usr/bin/env python3
# from __future__ import print_function
"""
@summary: Timing transactions that are getting into the chain
@version: v46 (03/January/2019)
@since: 17/April/2018
@organization:
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
"""
import time, timeit, sys, os, json
from web3 import Web3, HTTPProvider
# extend path for imports:
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from hammer.config import RPCaddress2, FILE_LAST_EXPERIMENT, AUTOSTOP_TPS, EMPTY_BLOCKS_AT_END
from hammer.deploy import loadFromDisk, FILE_CONTRACT_ADDRESS
from hammer.clienttools import web3connection, getBlockTransactionCount
def loopUntil_NewContract(query_intervall = 0.1):
"""
Wait for new smart contract to be deployed.
Continuously polls file "FILE_CONTRACT_ADDRESS".
Returns when overwritten file has different address or new filedate.
N.B.: It actually happens that same Ethereum contract address is created again,
if blockchain is deleted, and everything restarted. So: Check filedate too.
"""
address, _ = loadFromDisk()
when = os.path.getmtime(FILE_CONTRACT_ADDRESS)
print ("(filedate %d) last contract address: %s" %(when, address))
while(True):
time.sleep(query_intervall)
# checks whether a new contract has been deployed
# because then a new address has been saved to file:
newAddress, _ = loadFromDisk()
newWhen = os.path.getmtime(FILE_CONTRACT_ADDRESS)
if (newAddress != address or newWhen != when):
print ("(filedate %d) new contract address: %s" %(newWhen, newAddress))
break
return
def timestampToSeconds(timestamp, NODENAME, CONSENSUS):
"""
turn timestamp into (float of) seconds
as a separate function so that it can be recycled in blocksDB_create.py
"""
# most ethereum clients return block timestamps as whole seconds:
timeunits = 1.0
# quorum raft consensus ... returns not seconds but nanoseconds?
if CONSENSUS=="raft": timeunits = 1000000000.0
# testrpc-py has odd timestamp units ;-)
# do check for updates: https://github.com/pipermerriam/eth-testrpc/issues/117
if NODENAME=="TestRPC": timeunits = 205.0
return timestamp / timeunits
def analyzeNewBlocks(blockNumber, newBlockNumber, txCount, start_time, peakTpsAv):
"""
iterate through all new blocks, add up number of transactions
print status line
"""
txCount_new = 0
for bl in range(blockNumber+1, newBlockNumber+1): # TODO check range again - shift by one?
# txCount_new += w3.eth.getBlockTransactionCount(bl)
blktx = getBlockTransactionCount(w3, bl)
txCount_new += blktx # TODO
ts_blockNumber = w3.eth.getBlock( blockNumber).timestamp
ts_newBlockNumber = w3.eth.getBlock(newBlockNumber).timestamp
ts_diff = ts_newBlockNumber - ts_blockNumber
blocktimeSeconds = timestampToSeconds(ts_diff, NODENAME, CONSENSUS)
try:
tps_current = txCount_new / blocktimeSeconds
except ZeroDivisionError:
# Odd: Parity seems to have a blocktime resolution of whole seconds??
# So if blocks come much faster (e.g. with instantseal),
# then they end up having a blocktime of zero lol.
# Then, set TPS_CURRENT to something wrong but syntactically correct.
tps_current = 0
txCount += txCount_new
elapsed = timeit.default_timer() - start_time
tpsAv = txCount / elapsed
if tpsAv > peakTpsAv:
peakTpsAv = tpsAv
verb = " is" if peakTpsAv==tpsAv else "was"
line = "block %d | new #TX %3d / %4.0f ms = " \
"%5.1f TPS_current | total: #TX %4d / %4.1f s = %5.1f TPS_average " \
"(peak %s %5.1f TPS_average)"
line = line % ( newBlockNumber, txCount_new, blocktimeSeconds * 1000,
tps_current, txCount, elapsed, tpsAv, verb, peakTpsAv)
print (line)
return txCount, peakTpsAv, tpsAv
def sendingEndedFiledate():
try:
when = os.path.getmtime(FILE_LAST_EXPERIMENT)
except FileNotFoundError:
when = 0
return when
def readInfofile(fn=FILE_LAST_EXPERIMENT):
with open(fn, "r") as f:
data = json.load(f)
return data
class CodingError(Exception):
pass
def getNearestEntry(myDict, myIndex):
"""
because
finalTpsAv = tpsAv[block_last]
can sometimes not be resolved, then choose
finalTpsAv = tpsAv[block_last+i]
testing with increasing i, the decreasing i
"""
answer = myDict.get(myIndex, None)
if answer:
return answer
maxIndex,minIndex = max(myDict.keys()), min(myDict.keys())
# first look later:
i = myIndex
while not answer:
i += +1
if i>maxIndex:
break
answer = myDict.get(i, None)
# then look earlier:
i=myIndex
while not answer:
i += -1
if i<minIndex:
raise CodingError("Ouch, this should never happen. Info: len(myDict)=%d myIndex=%d" %(len(myDict), myIndex))
answer = myDict.get(i, None)
return answer
def measurement(blockNumber, pauseBetweenQueries=0.3,
RELAXATION_ROUNDS=3, empty_blocks_at_end=EMPTY_BLOCKS_AT_END):
"""
when a (or more) new block appeared,
add them to the total, and print a line.
"""
whenBefore = sendingEndedFiledate()
# the block we had been waiting for already contains the first transaction/s
# N.B.: slight inaccurracy of time measurement, because not measured how long those needed
# txCount=w3.eth.getBlockTransactionCount(blockNumber)
txCount=getBlockTransactionCount(w3, blockNumber)
start_time = timeit.default_timer()
start_epochtime = time.time()
# TODO: perhaps additional to elapsed system time, show blocktime?
print('starting timer, at block', blockNumber, 'which has ',
txCount,' transactions; at epochtime', start_epochtime)
peakTpsAv = 0
counterStart, blocknumberEnd = 0, -1
tpsAv = {} # memorize all of them, so we can return value at 'block_last'
while(True):
newBlockNumber=w3.eth.blockNumber
if(blockNumber!=newBlockNumber): # when a new block appears:
args = (blockNumber, newBlockNumber, txCount, start_time, peakTpsAv)
txCount, peakTpsAv, tpsAv[newBlockNumber] = analyzeNewBlocks(*args)
blockNumber = newBlockNumber
# for the first 3 rounds, always reset the peakTpsAv again!
if counterStart < RELAXATION_ROUNDS:
peakTpsAv=0
counterStart += 1
# send.py --> store_experiment_data() is called AFTER last tx was mined.
# THEN do another 10 empty blocks ...
# only THEN end this:
# if AUTOSTOP_TPS and blocknumberEnd==-1 and sendingEndedFiledate()!=whenBefore:
if AUTOSTOP_TPS and sendingEndedFiledate()!=whenBefore:
print ("Received signal from send.py = updated INFOFILE.")
block_last = readInfofile()['send']['block_last']
# finalTpsAv = tpsAv[block_last]
finalTpsAv = getNearestEntry(myDict=tpsAv, myIndex=block_last)
break
# finalTpsAv = tpsAv
# blocknumberEnd = newBlockNumber + empty_blocks_at_end
# print ("The end is nigh ... after blocknumber", blocknumberEnd)
# if NODETYPE=="TestRPC":
# break # no empty blocks in TestRPC
# if blocknumberEnd>0 and newBlockNumber > blocknumberEnd:
# break
time.sleep(pauseBetweenQueries) # do not query too often; as little side effect on node as possible
# print ("end") # N.B.: it never gets here !
txt = "Experiment ended! Current blocknumber = %d"
txt = txt % (w3.eth.blockNumber)
print (txt)
return peakTpsAv, finalTpsAv, start_epochtime
def addMeasurementToFile(peakTpsAv, finalTpsAv, start_epochtime, fn=FILE_LAST_EXPERIMENT):
with open(fn, "r") as f:
data = json.load(f)
data["tps"]={}
data["tps"]["peakTpsAv"] = peakTpsAv
data["tps"]["finalTpsAv"] = finalTpsAv
data["tps"]["start_epochtime"] = start_epochtime
with open(fn, "w") as f:
json.dump(data, f)
if __name__ == '__main__':
global w3, NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID
w3, chainInfos = web3connection(RPCaddress=RPCaddress2, account=None)
NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID = chainInfos
blockNumber_before = w3.eth.blockNumber
print ("\nBlock ",blockNumber_before," - waiting for something to happen")
loopUntil_NewContract()
blocknumber_start_here = w3.eth.blockNumber
print ("\nblocknumber_start_here =", blocknumber_start_here)
peakTpsAv, finalTpsAv, start_epochtime = measurement( blocknumber_start_here )
addMeasurementToFile(peakTpsAv, finalTpsAv, start_epochtime, FILE_LAST_EXPERIMENT)
print ("Updated info file:", FILE_LAST_EXPERIMENT, "THE END.")
|
py | 1a3c8fe86d11c2f91561e2876d1295050f5ff037 | from egnn_pytorch.egnn_pytorch import EGNN, EGNN_sparse, EGNN_Network
|
py | 1a3c903cffb1ab350266a37df0865284c10233e5 | from yt.fields.field_info_container import FieldInfoContainer
from yt.fields.magnetic_field import setup_magnetic_field_aliases
from yt.fields.species_fields import add_species_field_by_density, setup_species_fields
from yt.frontends.gadget.fields import GadgetFieldInfo
from yt.frontends.sph.fields import SPHFieldInfo
metal_elements = ["He", "C", "N", "O", "Ne", "Mg", "Si", "S", "Ca", "Fe"]
class GizmoFieldInfo(GadgetFieldInfo):
# The known fields list is according to the GIZMO User Guide. See
# http://www.tapir.caltech.edu/~phopkins/Site/GIZMO_files/gizmo_documentation.html#snaps-reading
known_particle_fields = (
("Coordinates", ("code_length", ["particle_position"], None)),
("Velocities", ("code_velocity", ["particle_velocity"], None)),
("ParticleIDs", ("", ["particle_index"], None)),
("Masses", ("code_mass", ["particle_mass"], None)),
("InternalEnergy", ("code_specific_energy", ["specific_thermal_energy"], None)),
("Density", ("code_mass / code_length**3", ["density"], None)),
("SmoothingLength", ("code_length", ["smoothing_length"], None)),
("ElectronAbundance", ("", [], None)),
("NeutralHydrogenAbundance", ("", [], None)),
("StarFormationRate", ("Msun / yr", [], None)),
("Metallicity", ("code_metallicity", ["metallicity"], None)),
("Metallicity_00", ("", ["metallicity"], None)),
("Metallicity_01", ("", ["He_metallicity"], None)),
("Metallicity_02", ("", ["C_metallicity"], None)),
("Metallicity_03", ("", ["N_metallicity"], None)),
("Metallicity_04", ("", ["O_metallicity"], None)),
("Metallicity_05", ("", ["Ne_metallicity"], None)),
("Metallicity_06", ("", ["Mg_metallicity"], None)),
("Metallicity_07", ("", ["Si_metallicity"], None)),
("Metallicity_08", ("", ["S_metallicity"], None)),
("Metallicity_09", ("", ["Ca_metallicity"], None)),
("Metallicity_10", ("", ["Fe_metallicity"], None)),
("ArtificialViscosity", ("", [], None)),
("MagneticField", ("code_magnetic", ["particle_magnetic_field"], None)),
("DivergenceOfMagneticField", ("code_magnetic / code_length", [], None)),
("StellarFormationTime", ("", [], None)),
# "StellarFormationTime" has different meanings in (non-)cosmological
# runs, so units are left blank here.
("BH_Mass", ("code_mass", [], None)),
("BH_Mdot", ("code_mass / code_time", [], None)),
("BH_Mass_AlphaDisk", ("code_mass", [], None)),
)
def __init__(self, *args, **kwargs):
super(SPHFieldInfo, self).__init__(*args, **kwargs)
if ("PartType0", "Metallicity_00") in self.field_list:
self.nuclei_names = metal_elements
self.species_names = ["H_p0", "H_p1"] + metal_elements
def setup_particle_fields(self, ptype):
FieldInfoContainer.setup_particle_fields(self, ptype)
if ptype in ("PartType0",):
self.setup_gas_particle_fields(ptype)
setup_species_fields(self, ptype)
if ptype in ("PartType4",):
self.setup_star_particle_fields(ptype)
def setup_gas_particle_fields(self, ptype):
super().setup_gas_particle_fields(ptype)
def _h_p0_density(field, data):
x_H = 1.0 - data[(ptype, "He_metallicity")] - data[(ptype, "metallicity")]
return (
x_H
* data[(ptype, "density")]
* data[(ptype, "NeutralHydrogenAbundance")]
)
self.add_field(
(ptype, "H_p0_density"),
sampling_type="particle",
function=_h_p0_density,
units=self.ds.unit_system["density"],
)
add_species_field_by_density(self, ptype, "H")
def _h_p1_density(field, data):
x_H = 1.0 - data[(ptype, "He_metallicity")] - data[(ptype, "metallicity")]
return (
x_H
* data[(ptype, "density")]
* (1.0 - data[(ptype, "NeutralHydrogenAbundance")])
)
self.add_field(
(ptype, "H_p1_density"),
sampling_type="particle",
function=_h_p1_density,
units=self.ds.unit_system["density"],
)
add_species_field_by_density(self, ptype, "H_p1")
def _nuclei_mass_density_field(field, data):
species = field.name[1][: field.name[1].find("_")]
return data[ptype, "density"] * data[ptype, f"{species}_metallicity"]
for species in ["H", "H_p0", "H_p1"]:
for suf in ["_density", "_number_density"]:
field = f"{species}{suf}"
self.alias(("gas", field), (ptype, field))
if (ptype, "ElectronAbundance") in self.field_list:
def _el_number_density(field, data):
return (
data[ptype, "ElectronAbundance"] * data[ptype, "H_number_density"]
)
self.add_field(
(ptype, "El_number_density"),
sampling_type="particle",
function=_el_number_density,
units=self.ds.unit_system["number_density"],
)
self.alias(("gas", "El_number_density"), (ptype, "El_number_density"))
for species in self.nuclei_names:
self.add_field(
(ptype, f"{species}_nuclei_mass_density"),
sampling_type="particle",
function=_nuclei_mass_density_field,
units=self.ds.unit_system["density"],
)
for suf in ["_nuclei_mass_density", "_metallicity"]:
field = f"{species}{suf}"
self.alias(("gas", field), (ptype, field))
def _metal_density_field(field, data):
return data[ptype, "metallicity"] * data[ptype, "density"]
self.add_field(
(ptype, "metal_density"),
sampling_type="local",
function=_metal_density_field,
units=self.ds.unit_system["density"],
)
self.alias(("gas", "metal_density"), (ptype, "metal_density"))
magnetic_field = "MagneticField"
if (ptype, magnetic_field) in self.field_list:
setup_magnetic_field_aliases(self, ptype, magnetic_field)
def setup_star_particle_fields(self, ptype):
def _creation_time(field, data):
if data.ds.cosmological_simulation:
a_form = data["StellarFormationTime"]
z_form = 1 / a_form - 1
creation_time = data.ds.cosmology.t_from_z(z_form)
else:
t_form = data["StellarFormationTime"]
creation_time = data.ds.arr(t_form, "code_time")
return creation_time
self.add_field(
(ptype, "creation_time"),
sampling_type="particle",
function=_creation_time,
units=self.ds.unit_system["time"],
)
def _age(field, data):
return data.ds.current_time - data["creation_time"]
self.add_field(
(ptype, "age"),
sampling_type="particle",
function=_age,
units=self.ds.unit_system["time"],
)
|
py | 1a3c91a12161395ea3a8a329ea95d57c03ea92c2 | #!/usr/bin/env python
import logging
from hummingbot.connector.exchange.hitbtc.hitbtc_constants import Constants
from sqlalchemy.engine import RowProxy
from typing import (
Optional,
Dict,
List, Any)
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import (
OrderBookMessage, OrderBookMessageType
)
from hummingbot.connector.exchange.hitbtc.hitbtc_order_book_message import HitbtcOrderBookMessage
_logger = None
class HitbtcOrderBook(OrderBook):
@classmethod
def logger(cls) -> HummingbotLogger:
global _logger
if _logger is None:
_logger = logging.getLogger(__name__)
return _logger
@classmethod
def snapshot_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: float,
metadata: Optional[Dict] = None):
"""
Convert json snapshot data into standard OrderBookMessage format
:param msg: json snapshot data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: HitbtcOrderBookMessage
"""
if metadata:
msg.update(metadata)
return HitbtcOrderBookMessage(
message_type=OrderBookMessageType.SNAPSHOT,
content=msg,
timestamp=timestamp
)
@classmethod
def snapshot_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None):
"""
*used for backtesting
Convert a row of snapshot data into standard OrderBookMessage format
:param record: a row of snapshot data from the database
:return: HitbtcOrderBookMessage
"""
return HitbtcOrderBookMessage(
message_type=OrderBookMessageType.SNAPSHOT,
content=record.json,
timestamp=record.timestamp
)
@classmethod
def diff_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None):
"""
Convert json diff data into standard OrderBookMessage format
:param msg: json diff data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: HitbtcOrderBookMessage
"""
if metadata:
msg.update(metadata)
return HitbtcOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp
)
@classmethod
def diff_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None):
"""
*used for backtesting
Convert a row of diff data into standard OrderBookMessage format
:param record: a row of diff data from the database
:return: HitbtcOrderBookMessage
"""
return HitbtcOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=record.json,
timestamp=record.timestamp
)
@classmethod
def trade_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None):
"""
Convert a trade data into standard OrderBookMessage format
:param record: a trade data from the database
:return: HitbtcOrderBookMessage
"""
if metadata:
msg.update(metadata)
msg.update({
"exchange_order_id": msg.get("id"),
"trade_type": msg.get("side"),
"price": msg.get("price"),
"amount": msg.get("quantity"),
})
return HitbtcOrderBookMessage(
message_type=OrderBookMessageType.TRADE,
content=msg,
timestamp=timestamp
)
@classmethod
def trade_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None):
"""
*used for backtesting
Convert a row of trade data into standard OrderBookMessage format
:param record: a row of trade data from the database
:return: HitbtcOrderBookMessage
"""
return HitbtcOrderBookMessage(
message_type=OrderBookMessageType.TRADE,
content=record.json,
timestamp=record.timestamp
)
@classmethod
def from_snapshot(cls, snapshot: OrderBookMessage):
raise NotImplementedError(Constants.EXCHANGE_NAME + " order book needs to retain individual order data.")
@classmethod
def restore_from_snapshot_and_diffs(self, snapshot: OrderBookMessage, diffs: List[OrderBookMessage]):
raise NotImplementedError(Constants.EXCHANGE_NAME + " order book needs to retain individual order data.")
|
py | 1a3c91dd60fbfa49a0e558a87eefc18a38639afb | """
Storage exception classes
"""
class FileTransferError(Exception):
pass
|
py | 1a3c921509d5e533a2e8598b150a8d19bd5e3631 | import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
import os
import weakref
import gc
from weakref import proxy
import contextlib
from test.support import import_helper
from test.support import threading_helper
from test.support.script_helper import assert_python_ok
import functools
py_functools = import_helper.import_fresh_module('functools',
blocked=['_functools'])
c_functools = import_helper.import_fresh_module('functools')
decimal = import_helper.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
@support.cpython_only
def test_disallow_instantiation(self):
# Ensure that the type disallows instantiation (bpo-43916)
support.check_disallow_instantiation(
self, type(c_functools.cmp_to_key(None))
)
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
def test_total_ordering_for_metaclasses_issue_44605(self):
@functools.total_ordering
class SortableMeta(type):
def __new__(cls, name, bases, ns):
return super().__new__(cls, name, bases, ns)
def __lt__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ < other.__name__
def __eq__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ == other.__name__
class B(metaclass=SortableMeta):
pass
class A(metaclass=SortableMeta):
pass
self.assertTrue(A < B)
self.assertFalse(A > B)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with threading_helper.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with threading_helper.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with threading_helper.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with threading_helper.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(metaclass=abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.__dict__['add'].__isabstractmethod__)
with self.assertRaises(TypeError):
Abstract()
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_staticmethod_type_ann_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register
@staticmethod
def _(arg: int):
return isinstance(arg, int)
@t.register
@staticmethod
def _(arg: str):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_type_ann_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register
@classmethod
def _(cls, arg: int):
return cls("int")
@t.register
@classmethod
def _(cls, arg: str):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_method_wrapping_attributes(self):
class A:
@functools.singledispatchmethod
def func(self, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@classmethod
def cls_func(cls, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@staticmethod
def static_func(arg: int) -> str:
"""My function docstring"""
return str(arg)
for meth in (
A.func,
A().func,
A.cls_func,
A().cls_func,
A.static_func,
A().static_func
):
with self.subTest(meth=meth):
self.assertEqual(meth.__doc__, 'My function docstring')
self.assertEqual(meth.__annotations__['arg'], int)
self.assertEqual(A.func.__name__, 'func')
self.assertEqual(A().func.__name__, 'func')
self.assertEqual(A.cls_func.__name__, 'cls_func')
self.assertEqual(A().cls_func.__name__, 'cls_func')
self.assertEqual(A.static_func.__name__, 'static_func')
self.assertEqual(A().static_func.__name__, 'static_func')
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with threading_helper.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
py | 1a3c927b0172cd273f08a77032c10d0b737301db | import PIL
import numpy as np
from datetime import datetime
from django.conf import settings
import anodos.tools
import swarm.models
import pflops.models
import distributors.models
import swarm.workers.worker
class Worker(swarm.workers.worker.Worker):
name = 'Service'
def __init__(self):
self.count_of_products = 0
self.count_of_parties = 0
self.count_of_parameters = 0
self.count_of_images = 0
self.count_of_urls = 0
self.message = None
super().__init__()
def run(self):
if self.command == 'info':
print('Продуктов в PFLOPS:',
pflops.models.Product.objects.all().count())
print('Продуктов не перенесено от дистрибьюторов:',
distributors.models.Product.objects.filter(to_pflops__isnull=True).count())
print('Продуктов перенесено от дистрибьюторов:',
distributors.models.Product.objects.filter(to_pflops__isnull=False).count())
elif self.command == 'update_products':
# Обновляем продукты
self.update_products()
# Обновляем цены и наличие
self.update_prices_and_quantities()
# Готовим оповещение
self.message = f'- продуктов: {self.count_of_products};\n' \
f'- партий: {self.count_of_parties}.'
elif self.command == 'update_parameters':
# Характеристики
self.update_parameters()
# Готовим оповещение
self.message = f'- параметров: {self.count_of_parameters}.'
elif self.command == 'update_images':
# Изображения
self.update_images()
# Готовим оповещение
self.message = f'- изображений: {self.count_of_images}.'
elif self.command == 'rewrite_products':
ids_ = pflops.models.Product.objects.all().values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {product}')
product.save()
elif self.command == 'rewrite_parameter_values':
ids_ = pflops.models.ParameterValue.objects.all().values('id')
for n, id_ in enumerate(ids_):
value = pflops.models.ParameterValue.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {value}')
value.save()
elif self.command == 'del_all_images':
pflops.models.ProductImage.objects.all().delete()
elif self.command == 'fix':
bad_name = 'Schneder Electric'
print(bad_name)
try:
distributor = distributors.models.Distributor.objects.get(name=bad_name)
print(distributor)
products = distributors.models.Product.objects.filter(distributor=distributor)
for product in products:
print(product)
product.delete()
vendors = distributors.models.Vendor.objects.filter(distributor=distributor)
for vendor in vendors:
print(product)
vendor.delete()
distributor.delete()
except distributors.models.Distributor.DoesNotExist:
print('Нечего вычищать!')
try:
vendor = pflops.models.Vendor.objects.get(name=bad_name)
print(vendor)
products = pflops.models.Product.objects.filter(vendor=vendor)
for product in products:
print(product)
product.delete()
vendor.delete()
except pflops.models.Vendor.DoesNotExist:
print('Нечего вычищать!')
elif self.command == 'update_sitemap':
self.update_sitemap()
# Готовим оповещение
self.message = f'- ссылок: {self.count_of_urls}.'
else:
print('Неизвестная команда!')
if self.message:
anodos.tools.send(content=f'{self.name}: {self.command} finish at {self.delta()}:\n'
f'{self.message}')
else:
anodos.tools.send(content=f'{self.name}: {self.command} finish at {self.delta()}.\n')
def update_products(self):
""" Переносит сущность продукт в чистовик """
ids_ = distributors.models.Product.objects.filter(vendor__to_pflops__isnull=False).values('id')
for n, id_ in enumerate(ids_):
product_ = distributors.models.Product.objects.get(id=id_['id'])
if product_.category is not None:
category = product_.category.to_pflops
else:
category = None
if product_.unit is not None:
unit = product_.unit.to_pflops
else:
unit = None
product = pflops.models.Product.objects.take(vendor=product_.vendor.to_pflops,
part_number=product_.part_number,
category=category,
name=product_.name,
short_name=product_.short_name,
name_rus=product_.name_rus,
name_other=product_.name_other,
description=product_.description,
warranty=product_.warranty,
ean_128=product_.ean_128,
upc=product_.upc,
pnc=product_.pnc,
hs_code=product_.hs_code,
gtin=product_.gtin,
tnved=product_.tnved,
traceable=product_.traceable,
weight=product_.weight,
width=product_.width,
height=product_.height,
depth=product_.depth,
volume=product_.volume,
multiplicity=product_.multiplicity,
unit=unit,
content=product_.content)
if product_.to_pflops != product:
product_.to_pflops = product
product_.save()
self.count_of_products += 1
print(f'{n + 1} of {len(ids_)} {product}')
def update_prices_and_quantities(self):
rub_ = distributors.models.Currency.objects.take(key="RUB")
rub = pflops.models.Currency.objects.take(key="RUB")
ids_ = pflops.models.Product.objects.all().values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
parties = distributors.models.Party.objects.filter(product__to_pflops=product)
price = None
quantity = 0
quantity_great_than = False
# Цены
for party in parties:
if party.quantity:
if party.price_out_open:
if party.price_out_open.currency == rub_:
price = party.price_out_open.value
else:
price = float(party.price_out_open.value) * float(party.price_out_open.currency.rate) / \
float(party.price_out_open.currency.quantity)
break
elif party.price_in:
if party.price_in.currency == rub_:
price_ = float(party.price_in.value) * settings.MARGIN
else:
price_ = float(party.price_in.value) * float(party.price_in.currency.rate) / \
float(party.price_in.currency.quantity) * settings.MARGIN
if price is None or price_ < price:
price = price_
# Количество
for party in parties:
if party.quantity:
quantity += party.quantity
if party.quantity_great_than:
quantity_great_than = True
if price is not None:
price = pflops.models.Price.objects.create(value=price, currency=rub)
product.price = price
product.quantity = quantity
product.quantity_great_than = quantity_great_than
product.save()
print(f'{n + 1} of {len(ids_)} {product} | {product.quantity} | {product.price}')
self.count_of_parties += 1
def update_parameters(self):
# Удаляем мусор
distributors.models.Parameter.objects.filter(distributor__isnull=True).delete()
# Удаляем дубли и кривой текст
parameters = distributors.models.Parameter.objects.all()
for n, parameter in enumerate(parameters):
print(f'{n+1} of {len(parameters)} {parameter}')
if anodos.tools.fix_text(parameter.name) != parameter.name:
parameter.delete()
continue
parameters_ = distributors.models.Parameter.objects.filter(distributor=parameter.distributor,
name=parameter.name)
for m, parameter_ in enumerate(parameters_):
if m > 0:
parameter_.delete()
# Проходим по всем продуктам
ids_ = pflops.models.Product.objects.all().values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {product}')
# Выбираем источник для переноса параметоров в чистовик
max_parameters_count = -1
product_ = None
for p_ in distributors.models.Product.objects.filter(to_pflops=product):
parameters_count = distributors.models.ParameterValue.objects.filter(product=p_).count()
if parameters_count > max_parameters_count:
product_ = p_
# Получаем ID текущих значений характеристик (чтобы потом удалить неактуальные)
parameter_values_ids_ = pflops.models.ParameterValue.objects.filter(product=product).values('id')
parameter_values_ids = set()
for parameter_values_id_ in parameter_values_ids_:
parameter_values_ids.add(str(parameter_values_id_['id']))
# Переносим параметры
parameter_values_ = distributors.models.ParameterValue.objects.filter(product=product_)
for parameter_value_ in parameter_values_:
if parameter_value_.parameter is not None:
parameter = parameter_value_.parameter.to_pflops
else:
continue
if parameter_value_.unit is not None:
unit = parameter_value_.unit.to_pflops
else:
unit = None
# TODO Сделать дополнительную обработку при выборе единицы измерения
value = parameter_value_.value
parameter_value = pflops.models.ParameterValue.objects.take(product=product,
parameter=parameter,
value=value,
unit=unit)
if parameter_value is not None:
if str(parameter_value.id) in parameter_values_ids:
parameter_values_ids.remove(str(parameter_value.id))
self.count_of_parameters += 1
# Удаляем устаревшие параметры
for parameter_values_id in parameter_values_ids:
pflops.models.ParameterValue.objects.filter(id=parameter_values_id).delete()
def update_images(self):
# Проходим по всем продуктам
ids_ = pflops.models.Product.objects.all().values('id')
# ids_ = pflops.models.Product.objects.filter(images_loaded__isnull=True).values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {product}')
self.update_images_of_product(product)
def update_images_of_product(self, product):
# Получаем векторы для сравнения из базы имеющихся изображений
vs = []
images = pflops.models.ProductImage.objects.filter(product=product)
for image in images:
# Если изображение уже есть
if image.file_name:
# Загружаем изображение
try:
im = PIL.Image.open(image.file_name)
except FileNotFoundError:
continue
except PIL.UnidentifiedImageError:
continue
# Сравниваем изображения с имеющимися
copy = False
thumbnail_ = im.resize((42, 42))
v_ = np.array(thumbnail_).reshape(42 * 42 * 4)
for v in vs:
r = np.dot(v, v_) / (np.linalg.norm(v) * np.linalg.norm(v_))
if r < 1.0e-11:
copy = True
# Если это копия
if copy is True:
image.delete()
else:
vs.append(v_)
# Проходим по всех исходным продуктам у дистрибьюторов
for product_ in distributors.models.Product.objects.filter(to_pflops=product):
# Проходим по всем изображениям
images_ = distributors.models.ProductImage.objects.filter(product=product_)
for image_ in images_:
self.count_of_images += 1
# Берём сущность с базы
image = pflops.models.ProductImage.objects.take(product=product,
source_url=image_.source_url)
if image.file_name:
continue
# Открываем исходное изображение и проверяем, достаточный ли размер изображения
try:
im = PIL.Image.open(image_.file_name)
except ValueError:
continue
except AttributeError:
continue
except PIL.UnidentifiedImageError:
continue
if im.size[0] < 450 and im.size[1] < 450:
im.close()
continue
# Вычисляем размеры и координаты
size = max(im.size[0], im.size[1])
dx = (size - im.size[0]) // 2
dy = (size - im.size[1]) // 2
# Создаём новое изображение и масштабируем его
try:
im_new = PIL.Image.new('RGBA', (size, size), '#00000000')
im_new.paste(im, (dx, dy))
im_new = im_new.resize((600, 600))
except SyntaxError:
im.close()
im_new.close()
image.delete()
continue
except OSError:
im.close()
im_new.close()
image.delete()
continue
# Сравниваем изображения с имеющимися
copy = False
thumbnail_ = im_new.resize((42, 42))
v_ = np.array(thumbnail_).reshape(42*42*4)
for v in vs:
r = np.dot(v, v_) / (np.linalg.norm(v) * np.linalg.norm(v_))
if r < 1.0e-12:
copy = True
if copy is True:
im.close()
im_new.close()
image.delete()
else:
vs.append(v_)
image.file_name = f'{settings.MEDIA_ROOT}products/photos/{image.id}.png'
anodos.tools.create_directory_for_file(image.file_name)
im_new.save(image.file_name, "PNG")
image.save()
print(image)
im.close()
im_new.close()
def update_sitemap(self):
print('update_sitemap')
count_of_urls = 0
count_of_urlsets = 0
urls_in_urlset = 25000
urlsets_str = ''
urlset_str = ''
products = pflops.models.Product.objects.all()
for n, product in enumerate(products):
if product.url_xml:
urlset_str = f'{urlset_str}{product.url_xml}'
count_of_urls += 1
if (count_of_urls and count_of_urls % urls_in_urlset == 0) or n + 1 == len(products):
urlset_filename = f'{settings.STATIC_ROOT}sitemap/sitemap-{count_of_urlsets}.xml'
urlset_url = f'{settings.HOST}{settings.STATIC_URL}sitemap/sitemap-{count_of_urlsets}.xml'
urlsets_str = f'{urlsets_str}\n' \
f' <sitemap>\n' \
f' <loc>{urlset_url}</loc>\n' \
f' <lastmod>{str(datetime.now())}</lastmod>\n' \
f' </sitemap>\n'
count_of_urlsets += 1
urlset_str = f'<?xml version="1.0" encoding="UTF-8"?>\n' \
f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n' \
f'{urlset_str}' \
f'</urlset>\n'
anodos.tools.create_directory_for_file(urlset_filename)
urlset_file = open(urlset_filename, 'w')
urlset_file.write(urlset_str)
urlset_file.close()
print(urlset_filename)
urlset_str = ''
urlsets_str = f'<?xml version="1.0" encoding="UTF-8"?>\n' \
f'<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n' \
f'{urlsets_str}' \
f'</sitemapindex>\n'
urlsets_filename = f'{settings.STATIC_ROOT}sitemap/sitemap.xml'
anodos.tools.create_directory_for_file(urlsets_filename)
urlset_files = open(urlsets_filename, 'w')
urlset_files.write(urlsets_str)
urlset_files.close()
print(urlsets_filename)
self.count_of_urls = count_of_urls
|
py | 1a3c9345f840297619c04041ecd989b3a6f6deef | """
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import pprint
import marshal
import warnings
import pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, dont_fail=False, **kwargs):
self._kwargs = kwargs
self._configure(kwargs, dont_fail=dont_fail)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses ``__init__`` methods)
"""
self.encoding = options.pop('encoding', None)
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.indent = options.pop('indent', None)
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = item.fields.keys()
else:
field_iter = item.keys()
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__(dont_fail=True, **kwargs)
self.file = file
self._kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**self._kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict) + '\n'
self.file.write(to_bytes(data, self.encoding))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__(dont_fail=True, **kwargs)
self.file = file
# there is a small difference between the behaviour or JsonItemExporter.indent
# and ScrapyJSONEncoder.indent. ScrapyJSONEncoder.indent=None is needed to prevent
# the addition of newlines everywhere
json_indent = self.indent if self.indent is not None and self.indent > 0 else None
self._kwargs.setdefault('indent', json_indent)
self._kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**self._kwargs)
self.first_item = True
def _beautify_newline(self):
if self.indent is not None:
self.file.write(b'\n')
def start_exporting(self):
self.file.write(b"[")
self._beautify_newline()
def finish_exporting(self):
self._beautify_newline()
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',')
self._beautify_newline()
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict)
self.file.write(to_bytes(data, self.encoding))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
super().__init__(**kwargs)
if not self.encoding:
self.encoding = 'utf-8'
self.xg = XMLGenerator(file, encoding=self.encoding)
def _beautify_newline(self, new_item=False):
if self.indent is not None and (self.indent > 0 or new_item):
self.xg.characters('\n')
def _beautify_indent(self, depth=1):
if self.indent:
self.xg.characters(' ' * self.indent * depth)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
self._beautify_newline(new_item=True)
def export_item(self, item):
self._beautify_indent(depth=1)
self.xg.startElement(self.item_element, {})
self._beautify_newline()
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value, depth=2)
self._beautify_indent(depth=1)
self.xg.endElement(self.item_element)
self._beautify_newline(new_item=True)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value, depth):
self._beautify_indent(depth=depth)
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
self._beautify_newline()
for subname, value in serialized_value.items():
self._export_xml_field(subname, value, depth=depth+1)
self._beautify_indent(depth=depth)
elif is_listlike(serialized_value):
self._beautify_newline()
for value in serialized_value:
self._export_xml_field('value', value, depth=depth+1)
self._beautify_indent(depth=depth)
elif isinstance(serialized_value, str):
self.xg.characters(serialized_value)
else:
self.xg.characters(str(serialized_value))
self.xg.endElement(name)
self._beautify_newline()
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
super().__init__(dont_fail=True, **kwargs)
if not self.encoding:
self.encoding = 'utf-8'
self.include_headers_line = include_headers_line
self.stream = io.TextIOWrapper(
file,
line_buffering=False,
write_through=True,
encoding=self.encoding,
newline='' # Windows needs this https://github.com/scrapy/scrapy/issues/3034
)
self.csv_writer = csv.writer(self.stream, **self._kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_unicode(s, self.encoding)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
super().__init__(**kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
"""Exports items in a Python-specific binary format (see
:mod:`marshal`).
:param file: The file-like object to use for exporting the data. Its
``write`` method should accept :class:`bytes` (a disk file
opened in binary mode, a :class:`~io.BytesIO` object, etc)
"""
def __init__(self, file, **kwargs):
super().__init__(**kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__(**kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""This is a base class for item exporters that extends
:class:`BaseItemExporter` with support for nested items.
It serializes items to built-in Python types, so that any serialization
library (e.g. :mod:`json` or msgpack_) can be used on top of it.
.. _msgpack: https://pypi.org/project/msgpack/
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
if not self.encoding:
self.encoding = 'utf-8'
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (str, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in value.items():
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
|
py | 1a3c934c554f63bb420777c96f1d1eec2c955564 | from chocs import HttpRequest, HttpResponse, HttpStatus
from tests.fixtures.app_fixture import app
@app.get("/test")
def test_get(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test get")
@app.post("/test")
def test_patch(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test post")
@app.patch("/test")
def test_patch(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test patch")
@app.put("/test")
def test_patch(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test put")
@app.delete("/test")
def test_delete(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test delete")
@app.options("/test")
def test_delete(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test options")
@app.head("/test")
def test_delete(request: HttpRequest) -> HttpResponse:
return HttpResponse(status=HttpStatus.OK, body="test head")
|
py | 1a3c935ffb55ad5f1478b4dc1aed364c20bff6f2 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ResourcePolicyArgs', 'ResourcePolicy']
@pulumi.input_type
class ResourcePolicyArgs:
def __init__(__self__, *,
policy_document: pulumi.Input[str],
policy_name: pulumi.Input[str]):
"""
The set of arguments for constructing a ResourcePolicy resource.
:param pulumi.Input[str] policy_document: The policy document
:param pulumi.Input[str] policy_name: A name for resource policy
"""
pulumi.set(__self__, "policy_document", policy_document)
pulumi.set(__self__, "policy_name", policy_name)
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> pulumi.Input[str]:
"""
The policy document
"""
return pulumi.get(self, "policy_document")
@policy_document.setter
def policy_document(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_document", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
A name for resource policy
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
class ResourcePolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_document: Optional[pulumi.Input[str]] = None,
policy_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The resource schema for AWSLogs ResourcePolicy
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] policy_document: The policy document
:param pulumi.Input[str] policy_name: A name for resource policy
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResourcePolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The resource schema for AWSLogs ResourcePolicy
:param str resource_name: The name of the resource.
:param ResourcePolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourcePolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_document: Optional[pulumi.Input[str]] = None,
policy_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourcePolicyArgs.__new__(ResourcePolicyArgs)
if policy_document is None and not opts.urn:
raise TypeError("Missing required property 'policy_document'")
__props__.__dict__["policy_document"] = policy_document
if policy_name is None and not opts.urn:
raise TypeError("Missing required property 'policy_name'")
__props__.__dict__["policy_name"] = policy_name
super(ResourcePolicy, __self__).__init__(
'aws-native:logs:ResourcePolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ResourcePolicy':
"""
Get an existing ResourcePolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ResourcePolicyArgs.__new__(ResourcePolicyArgs)
__props__.__dict__["policy_document"] = None
__props__.__dict__["policy_name"] = None
return ResourcePolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> pulumi.Output[str]:
"""
The policy document
"""
return pulumi.get(self, "policy_document")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Output[str]:
"""
A name for resource policy
"""
return pulumi.get(self, "policy_name")
|
py | 1a3c94f656ed72278ad12978b66baf4a40a5d70a | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import copy
import numpy as np
from scipy.constants import h, k
from pymatgen.core.structure import Molecule
from mrnet.core.mol_entry import MoleculeEntry
from mrnet.core.rates import (
ReactionRateCalculator,
BEPRateCalculator,
ExpandedBEPRateCalculator,
RedoxRateCalculator,
)
try:
import openbabel as ob
except ImportError:
ob = None
__author__ = "Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Evan Spotte-Smith"
__email__ = "[email protected]"
__status__ = "Alpha"
__date__ = "September 2019"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
# Not real molecules; just place-holders
# We're only interested in the math
mol_placeholder = Molecule(["H"], [[0.0, 0.0, 0.0]])
class ReactionRateCalculatorTest(unittest.TestCase):
def setUp(self) -> None:
if ob:
self.energies = [-271.553636516598, -78.5918513462683, -350.105998350078]
self.enthalpies = [13.917, 34.596, 49.515]
self.entropies = [67.357, 55.047, 84.265]
self.rct_1 = MoleculeEntry(
mol_placeholder,
self.energies[0],
enthalpy=self.enthalpies[0],
entropy=self.entropies[0],
)
self.rct_2 = MoleculeEntry(
mol_placeholder,
self.energies[1],
enthalpy=self.enthalpies[1],
entropy=self.entropies[1],
)
self.pro = MoleculeEntry(
mol_placeholder,
self.energies[2],
enthalpy=self.enthalpies[2],
entropy=self.entropies[2],
)
self.ts = MoleculeEntry(
mol_placeholder, -350.099875862606, enthalpy=48.560, entropy=83.607
)
self.calc = ReactionRateCalculator(
[self.rct_1, self.rct_2], [self.pro], self.ts
)
def tearDown(self) -> None:
if ob:
del self.calc
del self.ts
del self.pro
del self.rct_2
del self.rct_1
del self.entropies
del self.enthalpies
del self.energies
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_net_properties(self):
self.assertAlmostEqual(
self.calc.net_energy,
(self.energies[2] - (self.energies[0] + self.energies[1])) * 27.2116,
6,
)
self.assertEqual(
self.calc.net_enthalpy,
(self.enthalpies[2] - (self.enthalpies[0] + self.enthalpies[1]))
* 0.0433641,
)
self.assertEqual(
self.calc.net_entropy,
(self.entropies[2] - (self.entropies[0] + self.entropies[1]))
* 0.0000433641,
)
gibbs_300 = self.pro.get_free_energy(300) - (
self.rct_1.get_free_energy(300) + self.rct_2.get_free_energy(300)
)
self.assertEqual(self.calc.calculate_net_gibbs(300), gibbs_300)
gibbs_100 = self.pro.get_free_energy(100) - (
self.rct_1.get_free_energy(100) + self.rct_2.get_free_energy(100)
)
self.assertEqual(self.calc.calculate_net_gibbs(100.00), gibbs_100)
self.assertDictEqual(
self.calc.calculate_net_thermo(),
{
"energy": self.calc.net_energy,
"enthalpy": self.calc.net_enthalpy,
"entropy": self.calc.net_entropy,
"gibbs": self.calc.calculate_net_gibbs(),
},
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_act_properties(self):
trans_energy = self.ts.energy
trans_enthalpy = self.ts.enthalpy
trans_entropy = self.ts.entropy
pro_energies = [p.energy for p in self.calc.products]
rct_energies = [r.energy for r in self.calc.reactants]
pro_enthalpies = [p.enthalpy for p in self.calc.products]
rct_enthalpies = [r.enthalpy for r in self.calc.reactants]
pro_entropies = [p.entropy for p in self.calc.products]
rct_entropies = [r.entropy for r in self.calc.reactants]
self.assertAlmostEqual(
self.calc.calculate_act_energy(),
(trans_energy - sum(rct_energies)) * 27.2116,
6,
)
self.assertAlmostEqual(
self.calc.calculate_act_energy(reverse=True),
(trans_energy - sum(pro_energies)) * 27.2116,
6,
)
self.assertEqual(
self.calc.calculate_act_enthalpy(),
(trans_enthalpy - sum(rct_enthalpies)) * 0.0433641,
)
self.assertEqual(
self.calc.calculate_act_enthalpy(reverse=True),
(trans_enthalpy - sum(pro_enthalpies)) * 0.0433641,
)
self.assertEqual(
self.calc.calculate_act_entropy(),
(trans_entropy - sum(rct_entropies)) * 0.0000433641,
)
self.assertEqual(
self.calc.calculate_act_entropy(reverse=True),
(trans_entropy - sum(pro_entropies)) * 0.0000433641,
)
gibbs_300 = self.calc.calculate_act_energy() + (
self.calc.calculate_act_enthalpy() - 300 * self.calc.calculate_act_entropy()
)
gibbs_300_rev = self.calc.calculate_act_energy(reverse=True) + (
self.calc.calculate_act_enthalpy(reverse=True)
- 300 * self.calc.calculate_act_entropy(reverse=True)
)
gibbs_100 = (
self.calc.calculate_act_energy()
+ self.calc.calculate_act_enthalpy()
- 100 * self.calc.calculate_act_entropy()
)
self.assertEqual(self.calc.calculate_act_gibbs(300), gibbs_300)
self.assertEqual(
self.calc.calculate_act_gibbs(300, reverse=True), gibbs_300_rev
)
self.assertEqual(self.calc.calculate_act_gibbs(100), gibbs_100)
self.assertEqual(
self.calc.calculate_act_thermo(temperature=300.00),
{
"energy": self.calc.calculate_act_energy(),
"enthalpy": self.calc.calculate_act_enthalpy(),
"entropy": self.calc.calculate_act_entropy(),
"gibbs": self.calc.calculate_act_gibbs(300),
},
)
self.assertEqual(
self.calc.calculate_act_thermo(temperature=300.00, reverse=True),
{
"energy": self.calc.calculate_act_energy(reverse=True),
"enthalpy": self.calc.calculate_act_enthalpy(reverse=True),
"entropy": self.calc.calculate_act_entropy(reverse=True),
"gibbs": self.calc.calculate_act_gibbs(300, reverse=True),
},
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_rate_constant(self):
gibbs_300 = self.calc.calculate_act_gibbs(300)
gibbs_300_rev = self.calc.calculate_act_gibbs(300, reverse=True)
gibbs_600 = self.calc.calculate_act_gibbs(600)
# Test normal forwards and reverse behavior
self.assertEqual(
self.calc.calculate_rate_constant(temperature=300.0),
k * 300 / h * np.exp(-gibbs_300 / (8.617333262 * 10 ** -5 * 300)),
)
self.assertEqual(
self.calc.calculate_rate_constant(temperature=600),
k * 600 / h * np.exp(-gibbs_600 / (8.617333262 * 10 ** -5 * 600)),
)
self.assertEqual(
self.calc.calculate_rate_constant(temperature=300.0, reverse=True),
k * 300 / h * np.exp(-gibbs_300_rev / (8.617333262 * 10 ** -5 * 300)),
)
# Test effect of kappa
self.assertEqual(
self.calc.calculate_rate_constant(),
self.calc.calculate_rate_constant(kappa=0.5) * 2,
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_rates(self):
rate_constant = self.calc.calculate_rate_constant()
rate_constant_600 = self.calc.calculate_rate_constant(temperature=600)
rate_constant_rev = self.calc.calculate_rate_constant(reverse=True)
base_rate = rate_constant
self.assertAlmostEqual(self.calc.calculate_rate([1, 1]), base_rate)
self.assertAlmostEqual(self.calc.calculate_rate([1, 0.5]), base_rate / 2, 8)
self.assertAlmostEqual(self.calc.calculate_rate([0.5, 1]), base_rate / 2, 8)
self.assertAlmostEqual(self.calc.calculate_rate([0.5, 0.5]), base_rate / 4, 8)
self.assertAlmostEqual(
self.calc.calculate_rate([1], reverse=True), rate_constant_rev, 8
)
self.assertAlmostEqual(
self.calc.calculate_rate([1, 1], temperature=600), rate_constant_600, 8
)
class BEPReactionRateCalculatorTest(unittest.TestCase):
def setUp(self) -> None:
if ob:
self.energies = [-271.553636516598, -78.5918513462683, -350.105998350078]
self.enthalpies = [13.917, 34.596, 49.515]
self.entropies = [67.357, 55.047, 84.265]
self.rct_1 = MoleculeEntry(
mol_placeholder,
self.energies[0],
enthalpy=self.enthalpies[0],
entropy=self.entropies[0],
)
self.rct_2 = MoleculeEntry(
mol_placeholder,
self.energies[1],
enthalpy=self.enthalpies[1],
entropy=self.entropies[1],
)
self.pro = MoleculeEntry(
mol_placeholder,
self.energies[2],
enthalpy=self.enthalpies[2],
entropy=self.entropies[2],
)
self.calc = BEPRateCalculator(
[self.rct_1, self.rct_2], [self.pro], 1.718386088799889, 1.722
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_act_properties(self):
self.assertAlmostEqual(
self.calc.calculate_act_energy(),
self.calc.ea_reference
+ 0.5 * (self.calc.net_enthalpy - self.calc.delta_h_reference),
6,
)
self.assertAlmostEqual(
self.calc.calculate_act_energy(reverse=True),
self.calc.ea_reference
+ 0.5 * (-1 * self.calc.net_enthalpy - self.calc.delta_h_reference),
6,
)
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_enthalpy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_entropy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_gibbs(300)
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_thermo(temperature=300.00)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_rate_constant(self):
rate_constant = np.exp(
-self.calc.calculate_act_energy() / (8.617333262 * 10 ** -5 * 300)
)
rate_constant_600 = np.exp(
-self.calc.calculate_act_energy() / (8.617333262 * 10 ** -5 * 600)
)
self.assertEqual(
self.calc.calculate_rate_constant(temperature=300), rate_constant
)
self.assertEqual(
self.calc.calculate_rate_constant(temperature=600), rate_constant_600
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_rates(self):
base_rate = self.calc.calculate_rate([1, 1])
rate_600 = self.calc.calculate_rate([1, 1], temperature=600)
self.assertAlmostEqual(self.calc.calculate_rate([1, 1]) / base_rate, 1, 6)
self.assertAlmostEqual(
self.calc.calculate_rate([1, 0.5]) / (base_rate / 2), 1, 6
)
self.assertAlmostEqual(
self.calc.calculate_rate([0.5, 1]) / (base_rate / 2), 1, 6
)
self.assertAlmostEqual(
self.calc.calculate_rate([0.5, 0.5]) / (base_rate / 4), 1, 6
)
self.assertAlmostEqual(
self.calc.calculate_rate([1, 1], kappa=0.5) / (base_rate / 2), 1, 6
)
self.assertAlmostEqual(
self.calc.calculate_rate([1, 1], temperature=600) / rate_600, 1, 6
)
class ExpandedBEPReactionRateCalculatorTest(unittest.TestCase):
def setUp(self) -> None:
if ob:
self.energies = [-271.553636516598, -78.5918513462683, -350.105998350078]
self.enthalpies = [13.917, 34.596, 49.515]
self.entropies = [67.357, 55.047, 84.265]
self.rct_1 = MoleculeEntry(
mol_placeholder,
self.energies[0],
enthalpy=self.enthalpies[0],
entropy=self.entropies[0],
)
self.rct_2 = MoleculeEntry(
mol_placeholder,
self.energies[1],
enthalpy=self.enthalpies[1],
entropy=self.entropies[1],
)
self.pro = MoleculeEntry(
mol_placeholder,
self.energies[2],
enthalpy=self.enthalpies[2],
entropy=self.entropies[2],
)
self.calc = ExpandedBEPRateCalculator(
[self.rct_1, self.rct_2], [self.pro], 1.71, 0.1, -0.05, 1.8, 0.1, 0.05
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_act_properties(self):
delta_g_ref = (
self.calc.delta_e_reference
+ self.calc.delta_h_reference
- 300 * self.calc.delta_s_reference
)
delta_g = self.calc.calculate_net_gibbs(300)
delta_g_rev = -delta_g
delta_g_ref_600 = (
self.calc.delta_e_reference
+ self.calc.delta_h_reference
- 600 * self.calc.delta_s_reference
)
delta_g_600 = self.calc.calculate_net_gibbs(600)
delta_ga_ref_300 = self.calc.delta_ea_reference + (
self.calc.delta_ha_reference - 300 * self.calc.delta_sa_reference
)
delta_ga_ref_600 = self.calc.delta_ea_reference + (
self.calc.delta_ha_reference - 600 * self.calc.delta_sa_reference
)
self.assertAlmostEqual(
self.calc.calculate_act_gibbs(300),
delta_ga_ref_300 + self.calc.alpha * (delta_g - delta_g_ref),
)
self.assertAlmostEqual(
self.calc.calculate_act_gibbs(300, reverse=True),
delta_ga_ref_300 + self.calc.alpha * (delta_g_rev - delta_g_ref),
)
self.assertAlmostEqual(
self.calc.calculate_act_gibbs(600),
delta_ga_ref_600 + self.calc.alpha * (delta_g_600 - delta_g_ref_600),
)
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_energy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_enthalpy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_entropy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_thermo(temperature=300.00)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_rate_constant(self):
gibbs_300 = self.calc.calculate_act_gibbs(300)
gibbs_600 = self.calc.calculate_act_gibbs(600)
self.assertEqual(
self.calc.calculate_rate_constant(temperature=300),
k * 300 / h * np.exp(-gibbs_300 / (8.617333262 * 10 ** -5 * 300)),
)
self.assertEqual(
self.calc.calculate_rate_constant(temperature=600),
k * 600 / h * np.exp(-gibbs_600 / (8.617333262 * 10 ** -5 * 600)),
)
# Test effect of kappa
self.assertEqual(
self.calc.calculate_rate_constant(),
self.calc.calculate_rate_constant(kappa=0.5) * 2,
)
class RedoxRateCalculatorTest(unittest.TestCase):
def setUp(self) -> None:
if ob:
self.energies = [-349.88738062842, -349.955817900195]
self.enthalpies = [53.623, 51.853]
self.entropies = [82.846, 79.595]
rct_mol = copy.deepcopy(mol_placeholder)
rct_mol.set_charge_and_spin(charge=1)
pro_mol = copy.deepcopy(mol_placeholder)
pro_mol.set_charge_and_spin(charge=0)
self.rct = MoleculeEntry(
rct_mol,
self.energies[0],
enthalpy=self.enthalpies[0],
entropy=self.entropies[0],
)
self.pro = MoleculeEntry(
pro_mol,
self.energies[1],
enthalpy=self.enthalpies[1],
entropy=self.entropies[1],
)
self.calc = RedoxRateCalculator(
[self.rct], [self.pro], 1.031373321805404, 18.5, 1.415, -1.897, 7.5, 5
)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_act_properties(self):
self.assertAlmostEqual(
self.calc.calculate_act_gibbs(temperature=300), 0.284698735, 9
)
self.assertAlmostEqual(
self.calc.calculate_act_gibbs(temperature=300, reverse=True), 0.284433478, 9
)
self.assertAlmostEqual(
self.calc.calculate_act_gibbs(temperature=600), 0.306243023, 9
)
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_energy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_enthalpy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_entropy()
with self.assertRaises(NotImplementedError):
self.calc.calculate_act_thermo(temperature=300.00)
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_rate_constant(self):
# self.assertAlmostEqual(
# self.calc.calculate_rate_constant(temperature=300), 255536.74880926133, 4
# )
# self.assertAlmostEqual(
# self.calc.calculate_rate_constant(temperature=300, reverse=True),
# 258172.2056825794,
# 4,
# )
self.assertAlmostEqual(
self.calc.calculate_rate_constant(temperature=600), 82962806.19389883, 4
)
if __name__ == "__main__":
unittest.main()
|
py | 1a3c964b89fd7f1cbabe0e1789dab5c5bb816c6c | __all__ = [
"BeginningAuction",
"BeginningAuctionInputDto",
"EndingAuction",
"EndingAuctionInputDto",
"PlacingBid",
"PlacingBidInputDto",
"PlacingBidOutputBoundary",
"PlacingBidOutputDto",
"WithdrawingBids",
"WithdrawingBidsInputDto",
]
from auctions.application.use_cases.beginning_auction import BeginningAuction, BeginningAuctionInputDto
from auctions.application.use_cases.ending_auction import EndingAuction, EndingAuctionInputDto
from auctions.application.use_cases.placing_bid import (
PlacingBid,
PlacingBidInputDto,
PlacingBidOutputBoundary,
PlacingBidOutputDto,
)
from auctions.application.use_cases.withdrawing_bids import WithdrawingBids, WithdrawingBidsInputDto
|
py | 1a3c966f4827e874453cb286f888eb8e5a7cbe6e | from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Resource
from .serializers import ResourceSerializer
class ResourceView(APIView):
"""
Create a single link resource
"""
def post(self, request):
serializer = ResourceSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
"""
Get all link resources
"""
def get(self, request):
resources = Resource.objects.all()
serializer = ResourceSerializer(resources, many=True)
return Response(serializer.data, status=status.HTTP_200_OK) |
py | 1a3c9690c825c8784edc2722c7eab8bd37d3c92b | __version_info__ = {
'major': 2,
'minor': 1,
'micro': 0,
'releaselevel': 'final',
'serial': 4
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0], __version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
|
py | 1a3c973460090891fab19c917d5d9c0e345b40a3 | from __future__ import print_function
from PIL import Image
from os.path import join
import os
import torch.utils.data as data
from utils import download_url, check_integrity, list_dir, list_files
import torch
import torchvision
from torchvision import transforms
from sampler import RandSubClassSampler
class Omniglot(data.Dataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = 'omniglot-py'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
def __init__(self, root, background=True,
transform=None, target_transform=None,
download=False, deform=None):
self.root = join(os.path.expanduser(root), self.folder)
self.background = background
self.transform = transform
self.deform = deform
self.target_transform = target_transform
if download:
self.download()
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))]
for a in self._alphabets], [])
self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')]
for idx, character in enumerate(self._characters)]
self._flat_character_images = sum(self._character_images, [])
def __len__(self):
return len(self._flat_character_images)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode='r').convert('L')
image_d = image
if self.deform is not None:
image_d = self.deform(image_d)
if self.transform:
image = self.transform(image)
image_d = self.transform(image_d)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, image_d, character_class, index
def _check_integrity(self):
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]):
return False
return True
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
filename = self._get_target_folder()
zip_filename = filename + '.zip'
url = self.download_url_prefix + '/' + zip_filename
download_url(url, self.root, zip_filename, self.zips_md5[filename])
print('Extracting downloaded file: ' + join(self.root, zip_filename))
with zipfile.ZipFile(join(self.root, zip_filename), 'r') as zip_file:
zip_file.extractall(self.root)
def _get_target_folder(self):
if self.background == 'images_background_train' or self.background == 'images_background_val':
return self.background
return 'images_background' if self.background else 'images_evaluation'
def Omniglot_loader(batch_size, num_workers=2, root='../data'):
binary_flip = transforms.Lambda(lambda x: 1 - x)
normalize = transforms.Normalize((0.086,), (0.235,))
train_dataset = Omniglot(
root=root, download=True, background=True,
transform=transforms.Compose(
[transforms.RandomResizedCrop(32, (0.85, 1.)),
transforms.ToTensor(),
binary_flip,
normalize]
))
train_length = len(train_dataset)
train_imgid2cid = [train_dataset[i][2] for i in range(train_length)] # train_dataset[i] returns (img, cid)
# Randomly select 20 characters from 964. By default setting (batch_size=100), each character has 5 images in a mini-batch.
train_sampler = RandSubClassSampler(
inds=range(train_length),
labels=train_imgid2cid,
cls_per_batch=20,
batch_size=batch_size,
num_batch=train_length//batch_size)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=train_sampler)
train_loader.num_classes = 964
test_dataset = Omniglot(
root=root, download=True, background=False,
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
))
eval_length = len(test_dataset)
eval_imgid2cid = [test_dataset[i][2] for i in range(eval_length)]
eval_sampler = RandSubClassSampler(
inds=range(eval_length),
labels=eval_imgid2cid,
cls_per_batch=20,
batch_size=batch_size,
num_batch=eval_length // batch_size)
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=eval_sampler)
eval_loader.num_classes = 659
return train_loader, eval_loader
def Omniglot_bg_loader(batch_size, num_workers=2, train_cls_per_batch=20, test_cls_per_batch=20, root='../data'):
binary_flip = transforms.Lambda(lambda x: 1 - x)
normalize = transforms.Normalize((0.086,), (0.235,))
train_dataset = Omniglot(
root=root, download=False, background='images_background_train',
transform=transforms.Compose(
[transforms.RandomResizedCrop(32, (0.85, 1.)),
transforms.ToTensor(),
binary_flip,
normalize]
))
if train_cls_per_batch is not None:
train_length = len(train_dataset)
train_imgid2cid = [train_dataset[i][2] for i in range(train_length)]
train_sampler = RandSubClassSampler(
inds=range(train_length),
labels=train_imgid2cid,
cls_per_batch=train_cls_per_batch,
batch_size=batch_size,
num_batch=train_length//batch_size)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=train_sampler)
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
train_loader.num_classes = 964 - 169
test_dataset = Omniglot(
root=root, download=False, background='images_background_val',
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
))
if test_cls_per_batch is not None:
eval_length = len(test_dataset)
eval_imgid2cid = [test_dataset[i][2] for i in range(eval_length)]
eval_sampler = RandSubClassSampler(
inds=range(eval_length),
labels=eval_imgid2cid,
cls_per_batch=test_cls_per_batch,
batch_size=batch_size,
num_batch=eval_length // batch_size)
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=eval_sampler)
else:
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
eval_loader.num_classes = 169
return train_loader, eval_loader
def omniglot_alphabet_func(alphabet, background, root='../data'):
def create_alphabet_dataset(batch_size, num_workers=2):
# This dataset is only for unsupervised clustering
# train_dataset (with data augmentation) is used during the optimization of clustering criteria
# test_dataset (without data augmentation) is used after the clustering is converged
binary_flip = transforms.Lambda(lambda x: 1 - x)
normalize = transforms.Normalize((0.086,), (0.235,))
train_dataset = Omniglot(
root=root, download=True, background=background,
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
),
deform=transforms.Compose([
transforms.RandomAffine(
degrees = (-5, 5),
translate = (0.1, 0.1),
scale = (0.8, 1.2),
shear = (-10, 10),
fillcolor = 255)
])
)
# Following part dependents on the internal implementation of official Omniglot dataset loader
# Only use the images which has alphabet-name in their path name (_characters[cid])
valid_flat_character_images = [(imgname,cid) for imgname,cid in train_dataset._flat_character_images if alphabet in train_dataset._characters[cid]]
ndata = len(valid_flat_character_images) # The number of data after filtering
train_imgid2cid = [valid_flat_character_images[i][1] for i in range(ndata)] # The tuple (valid_flat_character_images[i]) are (img, cid)
cid_set = set(train_imgid2cid) # The labels are not 0..c-1 here.
cid2ncid = {cid:ncid for ncid,cid in enumerate(cid_set)} # Create the mapping table for New cid (ncid)
valid_characters = {cid2ncid[cid]:train_dataset._characters[cid] for cid in cid_set}
for i in range(ndata): # Convert the labels to make sure it has the value {0..c-1}
valid_flat_character_images[i] = (valid_flat_character_images[i][0],cid2ncid[valid_flat_character_images[i][1]])
# Apply surgery to the dataset
train_dataset._flat_character_images = valid_flat_character_images
train_dataset._characters = valid_characters
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
train_loader.num_classes = len(cid_set)
test_dataset = Omniglot(
root=root, download=True, background=background,
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
))
# Apply surgery to the dataset
test_dataset._flat_character_images = valid_flat_character_images # Set the new list to the dataset
test_dataset._characters = valid_characters
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
eval_loader.num_classes = train_loader.num_classes
print('=> Alphabet %s has %d characters and %d images.'%(alphabet, train_loader.num_classes, len(train_dataset)))
return train_loader, eval_loader
return create_alphabet_dataset
omniglot_background_alphabets=[
'Alphabet_of_the_Magi',
'Gujarati',
'Anglo-Saxon_Futhorc',
'Hebrew',
'Arcadian',
'Inuktitut_(Canadian_Aboriginal_Syllabics)',
'Armenian',
'Japanese_(hiragana)',
'Asomtavruli_(Georgian)',
'Japanese_(katakana)',
'Balinese',
'Korean',
'Bengali',
'Latin',
'Blackfoot_(Canadian_Aboriginal_Syllabics)',
'Malay_(Jawi_-_Arabic)',
'Braille',
'Mkhedruli_(Georgian)',
'Burmese_(Myanmar)',
'N_Ko',
'Cyrillic',
'Ojibwe_(Canadian_Aboriginal_Syllabics)',
'Early_Aramaic',
'Sanskrit',
'Futurama',
'Syriac_(Estrangelo)',
'Grantha',
'Tagalog',
'Greek',
'Tifinagh'
]
omniglot_background_val_alphabets=[
'Alphabet_of_the_Magi',
'Japanese_(katakana)',
'Latin',
'Cyrillic',
'Grantha'
]
omniglot_evaluation_alphabets_mapping = {
'Malayalam':'Malayalam',
'Kannada':'Kannada',
'Syriac':'Syriac_(Serto)',
'Atemayar_Qelisayer':'Atemayar_Qelisayer',
'Gurmukhi':'Gurmukhi',
'Old_Church_Slavonic':'Old_Church_Slavonic_(Cyrillic)',
'Manipuri':'Manipuri',
'Atlantean':'Atlantean',
'Sylheti':'Sylheti',
'Mongolian':'Mongolian',
'Aurek':'Aurek-Besh',
'Angelic':'Angelic',
'ULOG':'ULOG',
'Oriya':'Oriya',
'Avesta':'Avesta',
'Tibetan':'Tibetan',
'Tengwar':'Tengwar',
'Keble':'Keble',
'Ge_ez':'Ge_ez',
'Glagolitic':'Glagolitic'
}
# Create the functions to access the individual alphabet dataset in Omniglot
for funcName, alphabetStr in omniglot_evaluation_alphabets_mapping.items():
locals()['Omniglot_eval_' + funcName] = omniglot_alphabet_func(alphabet=alphabetStr, background=False)
def show_batch(inp, title=None):
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Qt5Agg")
"""Show batch"""
inp = inp.numpy().transpose((1, 2, 0))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.show()
raw_input()
if __name__ == '__main__':
import numpy as np
train_loader, eval_loader = Omniglot_loader(batch_size=10, num_workers=2, root='./data_shallow14/datasets')
print('len', len(train_loader.dataset), len(eval_loader.dataset))
img, img_d, target, idx = next(iter(train_loader))
print(target, idx)
print(len(np.unique(target)))
out = torchvision.utils.make_grid(img_d)
show_batch(out, title=target)
|
py | 1a3c97ca0b1a8591b868b409c5f9faaef3e84d3f | # -*- coding: utf-8 -*-
"""Cisco DNA Center ComplianceDetailsOfDevice data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorB70E1B6A2F51A59690669A4B2Fd3F0(object):
"""ComplianceDetailsOfDevice request schema definition."""
def __init__(self):
super(JSONSchemaValidatorB70E1B6A2F51A59690669A4B2Fd3F0, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"deviceUuid": {
"type": "string"
},
"response": {
"items": {
"properties": {
"additionalDataURL": {
"type": "string"
},
"category": {
"type": "string"
},
"complianceType": {
"type": "string"
},
"deviceUuid": {
"type": "string"
},
"displayName": {
"type": "string"
},
"lastSyncTime": {
"type": "string"
},
"lastUpdateTime": {
"type": "string"
},
"message": {
"type": "string"
},
"sourceInfoList": {
"items": {
"properties": {
"appName": {
"type": "string"
},
"businessKey": {
"properties": {
"businessKeyAttributes": {
"type": "string"
},
"otherAttributes": {
"properties": {
"cfsAttributes": {
"type": "string"
},
"name": {
"type": "string"
}
},
"type": "object"
},
"resourceName": {
"type": "string"
}
},
"type": "object"
},
"count": {
"type": "number"
},
"diffList": {
"items": {
"properties": {
"businessKey": {
"type": "string"
},
"configuredValue": {
"type": "string"
},
"displayName": {
"type": "string"
},
"extendedAttributes": {
"type": "string"
},
"intendedValue": {
"type": "string"
},
"moveFromPath": {
"type": "string"
},
"op": {
"type": "string"
},
"path": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"displayName": {
"type": "string"
},
"licenseAppName": {
"type": "string"
},
"name": {
"type": "string"
},
"nameWithBusinessKey": {
"type": "string"
},
"networkProfileName": {
"type": "string"
},
"provisioningArea": {
"type": "string"
},
"sourceEnum": {
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"state": {
"type": "string"
},
"status": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
py | 1a3c98452e86564107b63e7acb0bddcdca024f12 | import objects.utils as utils
import discord
import random
import time
from discord.ext import commands
class MessagesEditCog(commands.Cog, name="Messages Cog"):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_raw_message_edit(self, payload):
answer = await self.bot.get_answer(payload.message_id)
if answer is not None:
message = await answer.channel.fetch_message(payload.message_id)
elif "guild_id" in payload.data and payload.data["guild_id"] is not None:
guild = self.bot.get_guild(int(payload.data["guild_id"]))
if guild is not None:
channel = guild.get_channel(int(payload.data["channel_id"]))
message = await channel.fetch_message(payload.message_id)
else:
return
else:
return
if answer is not None:
try:
await answer.delete()
except:
pass
await self.bot.process_commands(message)
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
answer = await self.bot.get_answer(payload.message_id)
if answer is not None:
try:
await answer.delete()
except:
pass
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, payload):
answers = []
for message_id in payload.message_ids:
answer = await self.bot.get_answer(message_id)
if answer is not None:
answers.append(answer)
if len(answers) > 0:
try:
await answers[0].channel.delete_messages(answers)
except:
pass
def setup(bot):
bot.add_cog(MessagesEditCog(bot)) |
py | 1a3c991adf231bd04fd6f35229d3c7cb72a4ebb9 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=arguments-differ
"""Contains a (slow) Python simulator.
It simulates a qasm quantum circuit (an experiment) that has been compiled
to run on the simulator. It is exponential in the number of qubits.
The simulator is run using
.. code-block:: python
QasmSimulatorPy().run(qobj)
Where the input is a Qobj object and the output is a BasicAerJob object, which can
later be queried for the Result object. The result will contain a 'memory' data
field, which is a result of measurements for each shot.
"""
import uuid
import time
import logging
from math import log2
from collections import Counter
import numpy as np
from qiskit.util import local_hardware_info
from qiskit.providers.models import QasmBackendConfiguration
from qiskit.result import Result
from qiskit.providers import BaseBackend
from qiskit.providers.basicaer.basicaerjob import BasicAerJob
from .exceptions import BasicAerError
from .basicaertools import single_gate_matrix
from .basicaertools import cx_gate_matrix
from .basicaertools import einsum_vecmul_index
logger = logging.getLogger(__name__)
class QasmSimulatorPy(BaseBackend):
"""Python implementation of a qasm simulator."""
MAX_QUBITS_MEMORY = int(log2(local_hardware_info()['memory'] * (1024 ** 3) / 16))
DEFAULT_CONFIGURATION = {
'backend_name': 'qasm_simulator',
'backend_version': '2.0.0',
'n_qubits': min(24, MAX_QUBITS_MEMORY),
'url': 'https://github.com/Qiskit/qiskit-terra',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'coupling_map': None,
'description': 'A python simulator for qasm experiments',
'basis_gates': ['u1', 'u2', 'u3', 'cx', 'id', 'unitary'],
'gates': [
{
'name': 'u1',
'parameters': ['lambda'],
'qasm_def': 'gate u1(lambda) q { U(0,0,lambda) q; }'
},
{
'name': 'u2',
'parameters': ['phi', 'lambda'],
'qasm_def': 'gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }'
},
{
'name': 'u3',
'parameters': ['theta', 'phi', 'lambda'],
'qasm_def': 'gate u3(theta,phi,lambda) q { U(theta,phi,lambda) q; }'
},
{
'name': 'cx',
'parameters': ['c', 't'],
'qasm_def': 'gate cx c,t { CX c,t; }'
},
{
'name': 'id',
'parameters': ['a'],
'qasm_def': 'gate id a { U(0,0,0) a; }'
},
{
'name': 'unitary',
'parameters': ['matrix'],
'qasm_def': 'unitary(matrix) q1, q2,...'
}
]
}
DEFAULT_OPTIONS = {
"initial_statevector": None,
"chop_threshold": 1e-15
}
# Class level variable to return the final state at the end of simulation
# This should be set to True for the statevector simulator
SHOW_FINAL_STATE = False
def __init__(self, configuration=None, provider=None):
super().__init__(configuration=(
configuration or QasmBackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)),
provider=provider)
# Define attributes in __init__.
self._local_random = np.random.RandomState()
self._classical_memory = 0
self._classical_register = 0
self._statevector = 0
self._number_of_cmembits = 0
self._number_of_qubits = 0
self._shots = 0
self._memory = False
self._initial_statevector = self.DEFAULT_OPTIONS["initial_statevector"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
self._qobj_config = None
# TEMP
self._sample_measure = False
def _add_unitary(self, gate, qubits):
"""Apply an N-qubit unitary matrix.
Args:
gate (matrix_like): an N-qubit unitary matrix
qubits (list): the list of N-qubits.
"""
# Get the number of qubits
num_qubits = len(qubits)
# Compute einsum index string for 1-qubit matrix multiplication
indexes = einsum_vecmul_index(qubits, self._number_of_qubits)
# Convert to complex rank-2N tensor
gate_tensor = np.reshape(np.array(gate, dtype=complex),
num_qubits * [2, 2])
# Apply matrix multiplication
self._statevector = np.einsum(indexes, gate_tensor, self._statevector,
dtype=complex, casting='no')
def _get_measure_outcome(self, qubit):
"""Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
"""
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
# Compute einsum index string for 1-qubit matrix multiplication
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return '0', probabilities[0]
# Else outcome was '1'
return '1', probabilities[1]
def _add_sample_measure(self, measure_params, num_samples):
"""Generate memory samples from current statevector.
Args:
measure_params (list): List of (qubit, cmembit) values for
measure instructions to sample.
num_samples (int): The number of memory samples to generate.
Returns:
list: A list of memory values in hex format.
"""
# Get unique qubits that are actually measured and sort in
# ascending order
measured_qubits = sorted(list({qubit for qubit, cmembit in measure_params}))
num_measured = len(measured_qubits)
# We use the axis kwarg for numpy.sum to compute probabilities
# this sums over all non-measured qubits to return a vector
# of measure probabilities for the measured qubits
axis = list(range(self._number_of_qubits))
for qubit in reversed(measured_qubits):
# Remove from largest qubit to smallest so list position is correct
# with respect to position from end of the list
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.reshape(np.sum(np.abs(self._statevector) ** 2,
axis=tuple(axis)),
2 ** num_measured)
# Generate samples on measured qubits as ints with qubit
# position in the bit-string for each int given by the qubit
# position in the sorted measured_qubits list
samples = self._local_random.choice(range(2 ** num_measured),
num_samples, p=probabilities)
# Convert the ints to bitstrings
memory = []
for sample in samples:
classical_memory = self._classical_memory
for qubit, cmembit in measure_params:
pos = measured_qubits.index(qubit)
qubit_outcome = int((sample & (1 << pos)) >> pos)
membit = 1 << cmembit
classical_memory = (classical_memory & (~membit)) | (qubit_outcome << cmembit)
value = bin(classical_memory)[2:]
memory.append(hex(int(value, 2)))
return memory
def _add_qasm_measure(self, qubit, cmembit, cregbit=None):
"""Apply a measure instruction to a qubit.
Args:
qubit (int): qubit is the qubit measured.
cmembit (int): is the classical memory bit to store outcome in.
cregbit (int, optional): is the classical register bit to store outcome in.
"""
# get measure outcome
outcome, probability = self._get_measure_outcome(qubit)
# update classical state
membit = 1 << cmembit
self._classical_memory = (self._classical_memory & (~membit)) | (int(outcome) << cmembit)
if cregbit is not None:
regbit = 1 << cregbit
self._classical_register = \
(self._classical_register & (~regbit)) | (int(outcome) << cregbit)
# update quantum state
if outcome == '0':
update_diag = [[1 / np.sqrt(probability), 0], [0, 0]]
else:
update_diag = [[0, 0], [0, 1 / np.sqrt(probability)]]
# update classical state
self._add_unitary(update_diag, [qubit])
def _add_qasm_reset(self, qubit):
"""Apply a reset instruction to a qubit.
Args:
qubit (int): the qubit being rest
This is done by doing a simulating a measurement
outcome and projecting onto the outcome state while
renormalizing.
"""
# get measure outcome
outcome, probability = self._get_measure_outcome(qubit)
# update quantum state
if outcome == '0':
update = [[1 / np.sqrt(probability), 0], [0, 0]]
self._add_unitary(update, [qubit])
else:
update = [[0, 1 / np.sqrt(probability)], [0, 0]]
self._add_unitary(update, [qubit])
def _validate_initial_statevector(self):
"""Validate an initial statevector"""
# If initial statevector isn't set we don't need to validate
if self._initial_statevector is None:
return
# Check statevector is correct length for number of qubits
length = len(self._initial_statevector)
required_dim = 2 ** self._number_of_qubits
if length != required_dim:
raise BasicAerError('initial statevector is incorrect length: ' +
'{} != {}'.format(length, required_dim))
def _set_options(self, qobj_config=None, backend_options=None):
"""Set the backend options for all experiments in a qobj"""
# Reset default options
self._initial_statevector = self.DEFAULT_OPTIONS["initial_statevector"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
if backend_options is None:
backend_options = {}
# Check for custom initial statevector in backend_options first,
# then config second
if 'initial_statevector' in backend_options:
self._initial_statevector = np.array(backend_options['initial_statevector'],
dtype=complex)
elif hasattr(qobj_config, 'initial_statevector'):
self._initial_statevector = np.array(qobj_config.initial_statevector,
dtype=complex)
if self._initial_statevector is not None:
# Check the initial statevector is normalized
norm = np.linalg.norm(self._initial_statevector)
if round(norm, 12) != 1:
raise BasicAerError('initial statevector is not normalized: ' +
'norm {} != 1'.format(norm))
# Check for custom chop threshold
# Replace with custom options
if 'chop_threshold' in backend_options:
self._chop_threshold = backend_options['chop_threshold']
elif hasattr(qobj_config, 'chop_threshold'):
self._chop_threshold = qobj_config.chop_threshold
def _initialize_statevector(self):
"""Set the initial statevector for simulation"""
if self._initial_statevector is None:
# Set to default state of all qubits in |0>
self._statevector = np.zeros(2 ** self._number_of_qubits,
dtype=complex)
self._statevector[0] = 1
else:
self._statevector = self._initial_statevector.copy()
# Reshape to rank-N tensor
self._statevector = np.reshape(self._statevector,
self._number_of_qubits * [2])
def _get_statevector(self):
"""Return the current statevector"""
vec = np.reshape(self._statevector, 2 ** self._number_of_qubits)
vec[abs(vec) < self._chop_threshold] = 0.0
return vec
def _validate_measure_sampling(self, experiment):
"""Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment.
"""
# If shots=1 we should disable measure sampling.
# This is also required for statevector simulator to return the
# correct final statevector without silently dropping final measurements.
if self._shots <= 1:
self._sample_measure = False
return
# Check for config flag
if hasattr(experiment.config, 'allows_measure_sampling'):
self._sample_measure = experiment.config.allows_measure_sampling
# If flag isn't found do a simple test to see if a circuit contains
# no reset instructions, and no gates instructions after
# the first measure.
else:
measure_flag = False
for instruction in experiment.instructions:
# If circuit contains reset operations we cannot sample
if instruction.name == "reset":
self._sample_measure = False
return
# If circuit contains a measure option then we can
# sample only if all following operations are measures
if measure_flag:
# If we find a non-measure instruction
# we cannot do measure sampling
if instruction.name not in ["measure", "barrier", "id", "u0"]:
self._sample_measure = False
return
elif instruction.name == "measure":
measure_flag = True
# If we made it to the end of the circuit without returning
# measure sampling is allowed
self._sample_measure = True
def run(self, qobj, backend_options=None):
"""Run qobj asynchronously.
Args:
qobj (Qobj): payload of the experiment
backend_options (dict): backend options
Returns:
BasicAerJob: derived from BaseJob
Additional Information:
backend_options: Is a dict of options for the backend. It may contain
* "initial_statevector": vector_like
The "initial_statevector" option specifies a custom initial
initial statevector for the simulator to be used instead of the all
zero state. This size of this vector must be correct for the number
of qubits in all experiments in the qobj.
Example::
backend_options = {
"initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2),
}
"""
self._set_options(qobj_config=qobj.config,
backend_options=backend_options)
job_id = str(uuid.uuid4())
job = BasicAerJob(self, job_id, self._run_job, qobj)
job.submit()
return job
def _run_job(self, job_id, qobj):
"""Run experiments in qobj
Args:
job_id (str): unique id for the job.
qobj (Qobj): job description
Returns:
Result: Result object
"""
self._validate(qobj)
result_list = []
self._shots = qobj.config.shots
self._memory = getattr(qobj.config, 'memory', False)
self._qobj_config = qobj.config
start = time.time()
for experiment in qobj.experiments:
result_list.append(self.run_experiment(experiment))
end = time.time()
result = {'backend_name': self.name(),
'backend_version': self._configuration.backend_version,
'qobj_id': qobj.qobj_id,
'job_id': job_id,
'results': result_list,
'status': 'COMPLETED',
'success': True,
'time_taken': (end - start),
'header': qobj.header.to_dict()}
return Result.from_dict(result)
def run_experiment(self, experiment):
"""Run an experiment (circuit) and return a single experiment result.
Args:
experiment (QobjExperiment): experiment from qobj experiments list
Returns:
dict: A result dictionary which looks something like::
{
"name": name of this experiment (obtained from qobj.experiment header)
"seed": random seed used for simulation
"shots": number of shots used in the simulation
"data":
{
"counts": {'0x9: 5, ...},
"memory": ['0x9', '0xF', '0x1D', ..., '0x9']
},
"status": status string for the simulation
"success": boolean
"time_taken": simulation time of this single experiment
}
Raises:
BasicAerError: if an error occurred.
"""
start = time.time()
self._number_of_qubits = experiment.config.n_qubits
self._number_of_cmembits = experiment.config.memory_slots
self._statevector = 0
self._classical_memory = 0
self._classical_register = 0
self._sample_measure = False
# Validate the dimension of initial statevector if set
self._validate_initial_statevector()
# Get the seed looking in circuit, qobj, and then random.
if hasattr(experiment.config, 'seed_simulator'):
seed_simulator = experiment.config.seed_simulator
elif hasattr(self._qobj_config, 'seed_simulator'):
seed_simulator = self._qobj_config.seed_simulator
else:
# For compatibility on Windows force dyte to be int32
# and set the maximum value to be (2 ** 31) - 1
seed_simulator = np.random.randint(2147483647, dtype='int32')
self._local_random.seed(seed=seed_simulator)
# Check if measure sampling is supported for current circuit
self._validate_measure_sampling(experiment)
# List of final counts for all shots
memory = []
# Check if we can sample measurements, if so we only perform 1 shot
# and sample all outcomes from the final state vector
if self._sample_measure:
shots = 1
# Store (qubit, cmembit) pairs for all measure ops in circuit to
# be sampled
measure_sample_ops = []
else:
shots = self._shots
for _ in range(shots):
self._initialize_statevector()
# Initialize classical memory to all 0
self._classical_memory = 0
self._classical_register = 0
for operation in experiment.instructions:
conditional = getattr(operation, 'conditional', None)
if isinstance(conditional, int):
conditional_bit_set = (self._classical_register >> conditional) & 1
if not conditional_bit_set:
continue
elif conditional is not None:
mask = int(operation.conditional.mask, 16)
if mask > 0:
value = self._classical_memory & mask
while (mask & 0x1) == 0:
mask >>= 1
value >>= 1
if value != int(operation.conditional.val, 16):
continue
# Check if single gate
if operation.name == 'unitary':
qubits = operation.qubits
gate = operation.params[0]
self._add_unitary(gate, qubits)
elif operation.name in ('U', 'u1', 'u2', 'u3'):
params = getattr(operation, 'params', None)
qubit = operation.qubits[0]
gate = single_gate_matrix(operation.name, params)
self._add_unitary(gate, [qubit])
# Check if CX gate
elif operation.name in ('id', 'u0'):
pass
elif operation.name in ('CX', 'cx'):
qubit0 = operation.qubits[0]
qubit1 = operation.qubits[1]
gate = cx_gate_matrix()
self._add_unitary(gate, [qubit0, qubit1])
# Check if reset
elif operation.name == 'reset':
qubit = operation.qubits[0]
self._add_qasm_reset(qubit)
# Check if barrier
elif operation.name == 'barrier':
pass
# Check if measure
elif operation.name == 'measure':
qubit = operation.qubits[0]
cmembit = operation.memory[0]
cregbit = operation.register[0] if hasattr(operation, 'register') else None
if self._sample_measure:
# If sampling measurements record the qubit and cmembit
# for this measurement for later sampling
measure_sample_ops.append((qubit, cmembit))
else:
# If not sampling perform measurement as normal
self._add_qasm_measure(qubit, cmembit, cregbit)
elif operation.name == 'bfunc':
mask = int(operation.mask, 16)
relation = operation.relation
val = int(operation.val, 16)
cregbit = operation.register
cmembit = operation.memory if hasattr(operation, 'memory') else None
compared = (self._classical_register & mask) - val
if relation == '==':
outcome = (compared == 0)
elif relation == '!=':
outcome = (compared != 0)
elif relation == '<':
outcome = (compared < 0)
elif relation == '<=':
outcome = (compared <= 0)
elif relation == '>':
outcome = (compared > 0)
elif relation == '>=':
outcome = (compared >= 0)
else:
raise BasicAerError('Invalid boolean function relation.')
# Store outcome in register and optionally memory slot
regbit = 1 << cregbit
self._classical_register = \
(self._classical_register & (~regbit)) | (int(outcome) << cregbit)
if cmembit is not None:
membit = 1 << cmembit
self._classical_memory = \
(self._classical_memory & (~membit)) | (int(outcome) << cmembit)
else:
backend = self.name()
err_msg = '{0} encountered unrecognized operation "{1}"'
raise BasicAerError(err_msg.format(backend, operation.name))
# Add final creg data to memory list
if self._number_of_cmembits > 0:
if self._sample_measure:
# If sampling we generate all shot samples from the final statevector
memory = self._add_sample_measure(measure_sample_ops, self._shots)
else:
# Turn classical_memory (int) into bit string and pad zero for unused cmembits
outcome = bin(self._classical_memory)[2:]
memory.append(hex(int(outcome, 2)))
# Add data
data = {'counts': dict(Counter(memory))}
# Optionally add memory list
if self._memory:
data['memory'] = memory
# Optionally add final statevector
if self.SHOW_FINAL_STATE:
data['statevector'] = self._get_statevector()
# Remove empty counts and memory for statevector simulator
if not data['counts']:
data.pop('counts')
if 'memory' in data and not data['memory']:
data.pop('memory')
end = time.time()
return {'name': experiment.header.name,
'seed_simulator': seed_simulator,
'shots': self._shots,
'data': data,
'status': 'DONE',
'success': True,
'time_taken': (end - start),
'header': experiment.header.to_dict()}
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas."""
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise BasicAerError('Number of qubits {} '.format(n_qubits) +
'is greater than maximum ({}) '.format(max_qubits) +
'for "{}".'.format(self.name()))
for experiment in qobj.experiments:
name = experiment.header.name
if experiment.config.memory_slots == 0:
logger.warning('No classical registers in circuit "%s", '
'counts will be empty.', name)
elif 'measure' not in [op.name for op in experiment.instructions]:
logger.warning('No measurements in circuit "%s", '
'classical register will remain all zeros.', name)
|
py | 1a3c999841966f75b32cebfb7f84c1763301c1da | # THIS IS PART 2 SINCE I SKIPPED BADLIBS!
from random import randint
import sys
guess_this_number = randint(1,10)
guess = 0
guesses = 0
clue = ""
first_round = True
while guess != guess_this_number:
if first_round == True:
guess = int(input("Enter an integer number: "))
first_round = False;
else:
print("- - - - - - - - - - - - - - - -")
print(guess, clue)
guess = int(input("Guess again: "))
if (guess < guess_this_number):
clue = "is too low!"
else:
clue = "is too high!"
guesses += 1
print("- - - - - - - - - - - - - - - - - - - - - - - -")
print(guess_this_number, "is correct and you made it in ", guesses, " guesses!")
print("- - - - - - - - - - - - - - - - - - - - - - - -")
|
py | 1a3c9ad20b9f8ed09246fa0ac9d235431e3c05f5 | import os
import subprocess
import sys
from functools import partial
from sofa_config import *
from sofa_print import *
def sofa_viz(cfg):
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print_warning(
'If your rendering timeline is slow, please try \033[4msofa report --plot_ratio=10\033[24m to downsample scatter points,')
print_warning('and then \033[4msofa viz\033[24m to see the downsampled results.')
print_hint('SOFA Vlization is listening on port \033[4m\033[97mhttp://localhost:%d\033[24m\033[0m\033[24m' % (cfg.viz_port) )
print_hint('To change port, please run command: \033[4msofa viz --viz_port=PortNumber\033[24m')
print_hint('Please open your browser to start profiling.')
print_hint('After profiling, please enter Ctrl+C to exit.')
os.system(
'cd %s && python3.6 -m http.server %d 2>&1 1> /dev/null; cd -' %
(cfg.logdir,cfg.viz_port))
|
py | 1a3c9b1c55b27b4735741d6a6174af9ac71ee8f4 | import traceback
from queue import Empty
from queue import Queue
from threading import Thread
from .promise import Promise
class Task(object):
"""
Task runs a python function `target` when called.
"""
def __init__(self, target, *args, **kwargs):
"""Initialize the Task object."""
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self.target(*self.args, **self.kwargs)
class TaskQueue(Thread):
"""
A background thread to start all queued processes one after another.
"""
def __init__(self):
super().__init__(daemon=True)
self.queue = Queue()
self.active_task = None
self.running = False
def __del__(self):
self.running = False
def execute(self, task):
self.queue.put(task)
def cancel_all(self):
try:
while not self.Empty():
self.queue.get_nowait()
self.queue.task_done()
except Empty:
pass
def busy(self):
result = False
with self._block:
result = self.active_task is not None
return result
def run(self):
self.running = True
while self.running:
task = self.queue.get()
with self._block:
self.active_task = task
try:
task.run()
except:
traceback.print_exc()
finally:
self.queue.task_done()
with self._block:
self.active_task = None
_tasks = TaskQueue()
_tasks.start()
def busy():
return _tasks.busy()
def execute_async(func, *args, **kwargs):
return Promise(lambda resolve_fn: _tasks.execute(
Task(func, resolve_fn, *args, **kwargs)))
def cancel_all():
_tasks.cancel_all()
|
py | 1a3c9bc35541a126814dfc28f8f40a8699956def | import unittest
import cupy
from cupy import cuda
from cupy import testing
import numpy
@testing.gpu
class TestFromData(unittest.TestCase):
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array(self, xp, dtype, order):
return xp.array([[1, 2, 3], [2, 3, 4]], dtype=dtype, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_numpy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.with_requires('numpy>=1.10')
@testing.numpy_cupy_array_equal()
def test_array_from_numpy_broad_cast(self, xp, dtype, order):
a = testing.shaped_arange((2, 1, 4), numpy, dtype)
a = numpy.broadcast_to(a, (2, 3, 4))
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_copy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_copy_is_copied(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, order=order)
a.fill(0)
return b
@testing.for_orders('CFAK')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal()
def test_array_copy_with_dtype(self, xp, dtype1, dtype2, order):
# complex to real makes no sense
a = testing.shaped_arange((2, 3, 4), xp, dtype1)
return xp.array(a, dtype=dtype2, order=order)
@testing.for_orders('CFAK')
@testing.numpy_cupy_array_equal()
def test_array_copy_with_dtype_being_none(self, xp, order):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.array(a, dtype=None, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_no_copy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, copy=False, order=order)
a.fill(0)
return b
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_f_contiguous_input(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype, order='F')
b = xp.array(a, copy=False, order=order)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_f_contiguous_output(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, copy=False, order='F')
self.assertTrue(b.flags.f_contiguous)
return b
@testing.multi_gpu(2)
def test_array_multi_device(self):
with cuda.Device(0):
x = testing.shaped_arange((2, 3, 4), cupy, dtype='f')
with cuda.Device(1):
y = cupy.array(x)
self.assertIsInstance(y, cupy.ndarray)
self.assertIsNot(x, y) # Do copy
self.assertEqual(int(x.device), 0)
self.assertEqual(int(y.device), 1)
testing.assert_array_equal(x, y)
@testing.multi_gpu(2)
def test_array_multi_device_zero_size(self):
with cuda.Device(0):
x = testing.shaped_arange((0,), cupy, dtype='f')
with cuda.Device(1):
y = cupy.array(x)
self.assertIsInstance(y, cupy.ndarray)
self.assertIsNot(x, y) # Do copy
assert x.device.id == 0
assert y.device.id == 1
testing.assert_array_equal(x, y)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_no_copy_ndmin(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, copy=False, ndmin=5)
self.assertEqual(a.shape, (2, 3, 4))
a.fill(0)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.asarray(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_is_not_copied(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asarray(a)
a.fill(0)
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_with_order(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asarray(a, order=order)
if order in ['F', 'f']:
self.assertTrue(b.flags.f_contiguous)
else:
self.assertTrue(b.flags.c_contiguous)
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_preserves_numpy_array_order(self, xp, dtype, order):
a_numpy = testing.shaped_arange((2, 3, 4), numpy, dtype, order)
b = xp.asarray(a_numpy)
assert b.flags.f_contiguous == a_numpy.flags.f_contiguous
assert b.flags.c_contiguous == a_numpy.flags.c_contiguous
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asanyarray_with_order(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asanyarray(a, order=order)
if order in ['F', 'f']:
self.assertTrue(b.flags.f_contiguous)
else:
self.assertTrue(b.flags.c_contiguous)
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_from_numpy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
b = xp.asarray(a, order=order)
if order in ['F', 'f']:
self.assertTrue(b.flags.f_contiguous)
else:
self.assertTrue(b.flags.c_contiguous)
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_with_order_copy_behavior(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asarray(a, order=order)
a.fill(0)
return b
@testing.for_all_dtypes()
def test_asarray_cuda_array_interface(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = cupy.asarray(DummyObjectWithCudaArrayInterface(a))
testing.assert_array_equal(a, b)
@testing.for_all_dtypes()
def test_asarray_cuda_array_interface_is_not_copied(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = cupy.asarray(DummyObjectWithCudaArrayInterface(a))
a.fill(0)
testing.assert_array_equal(a, b)
@testing.for_all_dtypes()
def test_asarray_cuda_array_interface_ignores_order(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = cupy.asarray(DummyObjectWithCudaArrayInterface(a), order='F')
self.assertTrue(b.flags.c_contiguous)
testing.assert_array_equal(a, b)
def test_ascontiguousarray_on_noncontiguous_array(self):
a = testing.shaped_arange((2, 3, 4))
b = a.transpose(2, 0, 1)
c = cupy.ascontiguousarray(b)
self.assertTrue(c.flags.c_contiguous)
testing.assert_array_equal(b, c)
def test_ascontiguousarray_on_contiguous_array(self):
a = testing.shaped_arange((2, 3, 4))
b = cupy.ascontiguousarray(a)
self.assertIs(a, b)
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_copy(self, xp, dtype, order):
a = xp.zeros((2, 3, 4), dtype=dtype)
b = xp.copy(a, order=order)
a[1] = 1
return b
@testing.multi_gpu(2)
@testing.for_CF_orders()
@testing.for_all_dtypes()
def test_copy_multigpu(self, dtype, order):
with cuda.Device(0):
src = cupy.random.uniform(-1, 1, (2, 3)).astype(dtype)
with cuda.Device(1):
dst = cupy.copy(src, order)
testing.assert_allclose(src, dst, rtol=0, atol=0)
@testing.for_CF_orders()
@testing.numpy_cupy_equal()
def test_copy_order(self, xp, order):
a = xp.zeros((2, 3, 4), order=order)
b = xp.copy(a)
return (b.flags.c_contiguous, b.flags.f_contiguous)
class DummyObjectWithCudaArrayInterface():
def __init__(self, a):
self.a = a
@property
def __cuda_array_interface__(self):
desc = {
'shape': self.a.shape,
'typestr': self.a.dtype.str,
'descr': self.a.dtype.descr,
'data': (self.a.data.mem.ptr, False),
'version': 0,
}
return desc
@testing.parameterize(
*testing.product({
'ndmin': [0, 1, 2, 3],
'copy': [True, False],
'xp': [numpy, cupy]
})
)
class TestArrayPreservationOfShape(unittest.TestCase):
@testing.for_all_dtypes()
def test_cupy_array(self, dtype):
shape = 2, 3
a = testing.shaped_arange(shape, self.xp, dtype)
cupy.array(a, copy=self.copy, ndmin=self.ndmin)
# Check if cupy.ndarray does not alter
# the shape of the original array.
self.assertEqual(a.shape, shape)
@testing.parameterize(
*testing.product({
'ndmin': [0, 1, 2, 3],
'copy': [True, False],
'xp': [numpy, cupy]
})
)
class TestArrayCopy(unittest.TestCase):
@testing.for_all_dtypes()
def test_cupy_array(self, dtype):
a = testing.shaped_arange((2, 3), self.xp, dtype)
actual = cupy.array(a, copy=self.copy, ndmin=self.ndmin)
should_copy = (self.xp is numpy) or self.copy
# TODO(Kenta Oono): Better determination of copy.
is_copied = not ((actual is a) or (actual.base is a) or
(actual.base is a.base and a.base is not None))
self.assertEqual(should_copy, is_copied)
class TestArrayInvalidObject(unittest.TestCase):
def test_invalid_type(self):
a = numpy.array([1, 2, 3], dtype=object)
with self.assertRaises(ValueError):
cupy.array(a)
|
py | 1a3c9cf6333e0f939f9ea20f8ecb5b03b7a7fa1c | #coding:utf-8
import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run() |
py | 1a3c9cffc0a7fb9c08c78976a4628d74e3e951e9 |
from env import *
from replayBuffer import *
from params import *
env = HyperGraphEnv()
tf_env = TFPyEnvironment(env)
#hypermaramters
fc_layer_params=[64,64,64,64,64,64]
q_net = QRnnNetwork(tf_env.observation_spec(), tf_env.action_spec(), lstm_size=(16,))
q_net_2 = q_net = QNetwork(
tf_env.observation_spec(),
tf_env.action_spec(),
fc_layer_params=fc_layer_params)
#agent
train_step = tf.Variable(0)
#optimizer = tf.keras.optimizers.RMSprop(learning_rate=2.5e-4, rho=0.95, momentum=0.0, epsilon=0.00001, centered= True)
optimizer = tf.keras.optimizers.Adam(lr=0.0001)
decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate = 1.0,
decay_steps = 25000,
end_learning_rate = 0.03
)
tf_agent = DqnAgent(tf_env.time_step_spec(),
tf_env.action_spec(),
q_network=q_net_2,
optimizer = optimizer,
td_errors_loss_fn = tf.keras.losses.Huber(reduction="none"),
train_step_counter = train_step,
target_update_period = 100,
epsilon_greedy = lambda : decay_fn(train_step))
tf_agent.initialize()
#replay buffer
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
#data_spec = agent.collect_data_spec,
data_spec = tf_agent.collect_data_spec,
batch_size = tf_env.batch_size,
max_length = replay_buffer_capacity
)
replay_buffer_observer = replay_buffer.add_batch
collect_driver = DynamicEpisodeDriver(
tf_env,
tf_agent.collect_policy,
observers=[replay_buffer_observer] + train_metrics,
num_episodes=2)
initial_collect_policy = RandomTFPolicy(tf_env.time_step_spec(),
tf_env.action_spec())
init_driver = DynamicEpisodeDriver(
tf_env,
initial_collect_policy,
observers=[replay_buffer.add_batch, ShowProgress(20000)],
num_episodes=1000)
final_time_step, final_policy_state = init_driver.run()
tf.random.set_seed(9) # chosen to show an example of trajectory at the end of an episode
trajectories, buffer_info = next(iter(replay_buffer.as_dataset(
sample_batch_size=2,
num_steps= 2,
single_deterministic_pass=False)))
time_steps, action_steps, next_time_steps = to_transition(trajectories)
time_steps.observation.shape
dataset = replay_buffer.as_dataset(
sample_batch_size=64,
num_steps= 2,
num_parallel_calls=3).prefetch(3)
collect_driver.run = function(collect_driver.run)
tf_agent.train = function(tf_agent.train)
def train_agent(n_iterations):
time_step = None
policy_state = tf_agent.collect_policy.get_initial_state(tf_env.batch_size)
iterator = iter(dataset)
for iteration in range(n_iterations):
time_step, policy_state = collect_driver.run(time_step, policy_state)
trajectories, buffer_info = next(iterator)
train_loss = tf_agent.train(trajectories)
print("\r{} loss:{:.5f}".format(
iteration, train_loss.loss.numpy()), end="")
if iteration % 100 == 0:
log_metrics(train_metrics)
train_agent(n_iterations=500000)
|
py | 1a3ca0001ba4f602cc4638d528355e06350bf046 | from mandrake.bot.commands import CommandContext
from mandrake import errors, database
async def ping(ctx: CommandContext):
await ctx.send(f":ping_pong: Pong! \nPing time: {int(ctx.client.latency*1000)} ms")
async def emby(ctx: CommandContext):
if ctx.remaining() == None:
await ctx.error_react()
return
try:
if not await database.check_emby_user(ctx):
await database.add_emby_user(ctx, ctx.remaining())
await ctx.ok_react()
else:
await ctx.error_react()
await ctx.dm("You already have an Emby username registered with the bot!")
except Exception as e:
await errors.send_error(ctx, e)
await ctx.error_react()
|
py | 1a3ca11c960196d4aae9c68445e42628a5212d3b | #!/usr/bin/env python
'''
some index fastq's have a weird number of quality line characters. some have an extra
character; others seem to have a single character.
this script truncates quality lines longer than the sequence line and pads quality
lines that are shorter than the sequence line.
author : scott w olesen <[email protected]>
'''
import argparse, sys, os, itertools
sys.path.append(os.path.normpath(os.path.abspath(__file__) + '/../..'))
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser(description='correct quality line length')
parser.add_argument('fastq', help='input barcode fastq')
parser.add_argument('-z', '--fill_char', default='F', help='fill character (default: F)')
parser.add_argument('-o', '--output', default=sys.stdout, type=argparse.FileType('w'), help='output fastq (default: stdout)')
args = parser.parse_args()
with open(args.fastq) as f:
for four_lines in itertools.izip(*[iter(f)]*4):
at_line, seq_line, plus_line, quality_line = [l.rstrip() for l in four_lines]
ls = len(seq_line)
lq = len(quality_line)
if lq < ls:
quality_line = quality_line.ljust(len(seq_line), args.fill_char)
elif lq > ls:
quality_line = quality_line[0: ls]
args.output.write("\n".join([at_line, seq_line, plus_line, quality_line]) + "\n") |
py | 1a3ca13f1ff68629815cf24f839d0f4e1f71c1f3 | from typing import Optional
import os
from fastapi import FastAPI
app = FastAPI()
# multiple path parameters.
@app.get("/users/{user_id}/items/{item_id}")
async def read_user_item(
user_id: int,
item_id: str,
q: Optional[str] = None,
short: bool = False
):
# http://127.0.0.1:11111/users/1/items/bladeoftheruinedking?q=lifesteal&short=no
item = {"item_id": item_id, "owner_id": user_id}
if q:
item.update({"q": q})
if not short:
item.update(
{"description": "This is an amazing item that has a long description"}
)
return item
# required query parameter
@app.get("/items/{item_id}")
async def read_user_item(item_id: str, needy: str):
# this throws an error saying that the needy query parameter is required
# http://127.0.0.1:11111/items/1
# after filling the needy parameter it works.
# http://127.0.0.1:11111/items/1?needy=filled
item = {"item_id": item_id, "needy": needy}
return item
if __name__ == '__main__':
print(f'INFO: Starting the FASTAPI server...')
print(f'INFO: DOCS on: http://127.0.0.1:11111/docs')
os.system(f"uvicorn {(__file__.split('/')[-1]).split('.')[0]}:app --host 127.0.0.1 --port 11111")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.