ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a41f4495abe766e1231192ea705ac9e3a51b27e | # -*- coding: utf-8 -*-
import pickle
from os import path
import jieba
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
comment = []
with open('quan.txt', mode = 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
arr = line.split(',')
if len(arr) == 5:
comment.append(arr[4].replace('\n',''))
# comment_after_split = jieba.cut(str(comment),cut_all=False)
# wl_space_split = ''.join(comment_after_split)
# wordcloud=WordCloud(font_path="fyuan.ttf",background_color="black",width=600,height=300,max_words=50).generate(wl_space_split)
# #3.生成图片
# image=wordcloud.to_image()
# #4.显示图片
# image.show()
comment_after_split = jieba.cut(str(comment),cut_all=False)
wl_space_split = ' '.join(comment_after_split)
# # print(wl_space_split)
backgroud_Image = plt.imread('IMG_3246.JPG')
# 设置屏蔽词
stopwords = STOPWORDS.copy()
stopwords.add('电影')
stopwords.add('一部')
stopwords.add('里面')
stopwords.add('讲')
stopwords.add('是')
stopwords.add('有点')
stopwords.add('还是')
stopwords.add('这部')
stopwords.add('真的')
stopwords.add('也许')
stopwords.add('可能')
stopwords.add('之后')
# 设置词云的字体,背景色,最大词大小,背景图
wc = WordCloud(width=1024, height=768,
background_color='white',
mask=backgroud_Image,
font_path='fyuan.ttf',
stopwords=stopwords,
max_font_size = 400,
random_state = 50
)
wc.generate_from_text(wl_space_split)
img_colors = ImageColorGenerator(backgroud_Image)
wc.recolor(color_func=img_colors)
plt.imshow(wc)
plt.axis('off')
plt.show()
wc.to_file('./image.jpg')
|
py | 1a41f46c4340a4e667a97e7ae098d8fc9215326a | from setuptools import find_packages, setup
def readme():
with open("README.md") as f:
return f.read()
# read version file
exec(open("alibi_detect/version.py").read())
extras_require = {"examples": ["seaborn>=0.9.0", "tqdm>=4.28.1", "nlp>=0.3.0"],
"prophet": ["fbprophet>=0.5, <0.7", "holidays==0.9.11", "pystan<3.0"],
"torch": ["torch>=1.0"]}
setup(
name="alibi-detect",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
version=__version__, # type: ignore # noqa F821
description="Algorithms for outlier detection, concept drift and metrics.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/SeldonIO/alibi-detect",
license="Apache 2.0",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.6",
# lower bounds based on Debian Stable versions where available
install_requires=[
"matplotlib>=3.0.0, <4.0.0",
"numpy>=1.16.2, <2.0.0",
"pandas>=0.23.3, <2.0.0",
"Pillow>=5.4.1, <9.0.0",
"opencv-python>=3.2.0, <5.0.0",
"scipy>=1.3.0, <2.0.0",
'scikit-image>=0.14.2, !=0.17.1, <0.19', # https://github.com/SeldonIO/alibi/issues/215
"scikit-learn>=0.20.2, <0.25.0",
"tensorflow>=2.0.0, <2.5.0",
"tensorflow_probability>=0.8.0, <0.13.0",
"transformers>=2.10.0, <5.0.0"
],
extras_require=extras_require,
test_suite="tests",
zip_safe=False,
)
|
py | 1a41f472f34819c86205a8b5fefb2fa8e3b91850 | from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import os
prototxtPath = os.path.sep.join(["Res10Face_Detector", "deploy.prototxt"])
weightsPath = os.path.sep.join(["Res10Face_Detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
model = load_model("Models/mask_detector.model")
def classify(image):
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
(104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
count= 0
labels = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
count+=1
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
(mask, withoutMask) = model.predict(face)[0]
label = "Mask" if mask > withoutMask else "No Mask"
labels.append(label)
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(image, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
return (count, labels, image) |
py | 1a41f486e3c616aec8d4fc934d792cd3e7933f0a | import sys
import numpy as np
def coadd_cameras(flux_cam, wave_cam, ivar_cam, mask_cam=None):
"""Adds spectra from the three cameras as long as they have the same number of wavelength bins.
This is not a replacement for desispec.coaddition.coadd_cameras,
but a simpler (versatile and faster) implementation which uses only numpy.
This also assumes the input spectra grid are already aligned
(i.e. same wavelength grid in the overlapping regions),
This is likely the case if the spectra are from the official data releases.
Parameters
----------
flux_cam : dict
Dictionary containing the flux values from the three cameras
wave_cam : dict
Dictionary containing the wavelength values from the three cameras
ivar_cam : dict
Dictionary containing the inverse variance values from the three cameras
mask_cam : dict, optional
Dictionary containing the mask values from the three cameras
Returns
-------
Tuple
returns the combined flux, wavelength and inverse variance grids.
"""
sbands = np.array(["b", "r", "z"]) # bands sorted by inc. wavelength
# create wavelength array
wave = None
tolerance = 0.0001 # A , tolerance
shifts = {}
for b in sbands:
wave_camera = np.atleast_2d(wave_cam[b].copy())
if wave is None:
wave = wave_camera
else:
shifts[b] = np.sum(
np.all((wave + tolerance) < wave_camera[:, 0][:, None], axis=0)
)
wave = np.append(
wave,
wave_camera[
:, np.all(wave_camera > (wave[:, -1][:, None] + tolerance), axis=0)
],
axis=1,
)
nwave = wave.shape[1]
blue = sbands[0]
ntarget = len(flux_cam[blue])
flux = None
ivar = None
mask = None
for b in sbands:
flux_camera = np.atleast_2d(flux_cam[b].copy())
ivar_camera = np.atleast_2d(ivar_cam[b].copy())
ivar_camera[ivar_camera <= 0] = 0
if mask_cam is not None:
mask_camera = np.atleast_2d(mask_cam[b].astype(bool))
ivar_camera[mask_camera] = 0
if flux is None:
flux = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
flux[:, : flux_camera.shape[1]] += flux_camera * ivar_camera
ivar = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
ivar[:, : ivar_camera.shape[1]] += ivar_camera
if mask is not None:
mask = np.ones((ntarget, nwave), dtype=mask_cam[blue].dtype)
mask[:, : mask_camera.shape[1]] &= mask_camera
else:
flux[:, shifts[b] : (shifts[b] + flux_camera.shape[1])] += (
flux_camera * ivar_camera
)
ivar[:, shifts[b] : (shifts[b] + ivar_camera.shape[1])] += ivar_camera
if mask is not None:
mask[:, shifts[b] : (shifts[b] + mask_camera.shape[1])] &= mask_camera
flux = flux / ivar
flux[~np.isfinite(flux)] = 0
ivar[~np.isfinite(ivar)] = 0
if wave_cam[blue].ndim == 1:
wave = np.squeeze(wave)
if mask_cam is not None:
return flux, wave, ivar, mask
else:
return flux, wave, ivar |
py | 1a41f5382f42566b24bf3ed02a86f3b66ee8b04a | #!/usr/bin/env python3
"""Make rhyming words"""
import argparse
import re
import string
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Make rhyming "words"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word', metavar='str', help='A word to rhyme')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
prefixes = list('bcdfghjklmnpqrstvwxyz') + (
'bl br ch cl cr dr fl fr gl gr pl pr sc '
'sh sk sl sm sn sp st sw th tr tw thw wh wr '
'sch scr shr sph spl spr squ str thr').split()
start, rest = stemmer(args.word)
if rest:
print('\n'.join(sorted([p + rest for p in prefixes if p != start])))
else:
print(f'Cannot rhyme "{args.word}"')
# --------------------------------------------------
def stemmer(word):
"""Return leading consonants (if any), and 'stem' of word"""
vowels = 'aeiou'
consonants = ''.join(
[c for c in string.ascii_lowercase if c not in vowels])
pattern = (
'([' + consonants + ']+)?' # capture one or more, optional
'(' # start capture
'[' + vowels + ']' # at least one vowel
'.*' # zero or more of anything else
')?') # end capture, optional group
match = re.match(pattern, word.lower())
return (match.group(1) or '', match.group(2) or '') if match else ('', '')
# --------------------------------------------------
def test_stemmer():
"""test the stemmer"""
assert stemmer('') == ('', '')
assert stemmer('cake') == ('c', 'ake')
assert stemmer('chair') == ('ch', 'air')
assert stemmer('APPLE') == ('', 'apple')
assert stemmer('RDNZL') == ('rdnzl', '')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
py | 1a41f5440b12539ba1382982c0f80e6e57f232ed | from sys import argv
from Bio import SeqIO, Seq, AlignIO
import pandas as pd
# user input:
aligned_fasta_path = argv[1]
outfile_path = argv[2]
regions_table_path = argv[3] # tables of regions of the genome, to determine translation reading frame in translation.
excel_mutations_table_path = argv[4] # TODO: pipeline - add as argument
def highlight_row(row):
"""
Highlight the mutations cells in excel, row by row.
:param row: row to return it's colors
:return: colors list matching row indices.
"""
colors_list = [""] * 7 + ["background-color: silver"] * 2 # color of the fixed part of the table
# (the mutation table part, first 8 columns)
mut = row["mut"]
ref = row["REF"]
for samp in row[9:]: # now color all other cells (each column belongs to a different sample)
if samp != ref: # highlight cell if it has a mutation.
if samp == mut: # highlight only if the mutation is matching the mutation in table
color = "background-color: yellow"
else: # sample == 'X' -> do not color.
color = ''
else:
color = ''
colors_list.append(color)
return colors_list
codon_map = {
"TTT": "F", "TTC": "F", "TTA": "L", "TTG": "L",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S",
"TAT": "Y", "TAC": "Y", "TAA": "*", "TAG": "*",
"TGT": "C", "TGC": "C", "TGA": "*", "TGG": "W",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAT": "H", "CAC": "H", "CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I", "ATG": "M",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N", "AAA": "K", "AAG": "K",
"AGT": "S", "AGC": "S", "AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D", "GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G"
}
def full_codon_gaps(sequence, start, end, gap='-'):
"""
avoid partial gaps in codon and convert to whole gaps codon
exmple: from A-- to ---
:param sequence: Seq object (Biopython)
:param start: start of reading frame
:param end: end of reading frame
:param gap: gap character. default: '-'
:return: new sequence, with full codons
"""
old_seq = str(sequence)
new_seq = old_seq[:start]
for i in range(start-1, end, 3):
codon = old_seq[i: i+3]
if '-' in codon:
codon = '---'
new_seq += codon
new_seq += old_seq[end:]
return Seq.Seq(new_seq)
def translate(sequence, start, end, codon_table):
"""
translate nucleotides sequence in given region to amino acid sequence according to codons in region start->end
:param sequence: nucleotides sequence as str #
:param start: position of first nucleotide
:param end: position of last nucleotide
:param codon_table: dictionary of codons as keys and AA name as values, for translation.
:return: translated sequence (aa seq)
"""
tranlsated = []
for i in range(start, end, 3):
codon = sequence[i:i+3]
if codon in codon_table:
aa = codon_table[codon] # get the codon's matching amino acid by codon table dictionary
else:
aa = 'X' # ignore frameshifts
tranlsated.append(aa)
return tranlsated
# 1. load sequences and tables
regionsTable = pd.read_csv(regions_table_path)
multifasta = SeqIO.to_dict(SeqIO.parse(aligned_fasta_path, 'fasta'))
multifasta.pop('NC_045512.2', None) # remove refseq sequence from alignment file if exists.
multifasta.pop('REF_NC_045512.2', None)
mutTable_excel = pd.read_excel(excel_mutations_table_path, sheet_name=None, engine='openpyxl')
for name in mutTable_excel:
mutTable_excel[name]['lineage'] = name # add a lineage column to all variant's tables
mutTable = pd.concat(mutTable_excel.values(), ignore_index=True)
# select only part of the columns:
mutTable = mutTable[['Position', 'Reference', 'Mutation', 'protein',
'variant', 'Mutation type', 'lineage', 'annotation']]
# compress identical mutations into one line and concat lineage names in the lineage column:
# mutTable = mutTable.groupby( # to create compressed table:
# ['Position', 'Reference', 'Mutation', 'protein', 'variant', 'Mutation type', 'annotation'], as_index=False).agg(
# {'lineage': ';'.join}
# )
# 2. keep only non-synonymous mutations
# comparing in lower case to avoid mistakes such as SNP_Stop != SNP_stop. to catch all cases.
mutTable = mutTable[(mutTable['Mutation type'].str.lower() == 'snp') | (mutTable['Mutation type'].str.lower() == 'snp_stop')]
finalTable = mutTable
# 3. iterate over mutations and create final table.
for sample, record in multifasta.items():
# each sample found in fasta will create a column of final table next to mutations info.
seq = record.seq # 1 fasta sequence as string
sample_muts = [] # will aggregate translated value of each mutation to this list that will be added as column.
for mut in mutTable.iterrows():
pos = mut[1][0]
gene = mut[1][3]
aa = mut[1][4]
aa_number = int(aa[1:-1]) # strip letters of both sides (snp mutations example: Q57H) letter-number-letter
aa_from = aa[0] # original AA
aa_to = aa[-1] # mutated AA
# get start and end regions sequence location from regions table.
region_start = int(regionsTable[regionsTable.id == gene].start.values[0])
region_end = int(regionsTable[regionsTable.id == gene].end.values[0])
# translate region (with translate() function in the top of the page)
region_translated = translate(str(seq), region_start-1, region_end, codon_map)
alt = region_translated[aa_number-1] # get the specific aa by its number (in its name)
sample_muts.append(alt) # add to mutations list of the specific mutation to create a column to the final table
finalTable[sample] = sample_muts # add the column of the sample.
varcol = finalTable.apply(lambda row: row[8:].unique(), axis=1) # add a 'var' column to list unique values of row
finalTable.insert(6, 'var', varcol) # insert var column
finalTable = finalTable.sort_values(by=["lineage", "protein"], ascending=[True, False]) # sort by lineage and then gene
finalTable = finalTable.rename(columns={'Position': 'nuc pos', 'Mutation type': 'type', 'protein': 'gene',
'variant': 'name', 'Reference': 'REF', 'Mutation': 'mut'}) # rename columns (old: new)
sorted_cols = ['nuc pos', 'type', 'gene', 'var', 'name', 'lineage', 'annotation', 'REF', 'mut'] # re-order columns
finalTable = finalTable[sorted_cols + [col for col in finalTable.columns if col not in sorted_cols]] # re-order columns
# write to file
# add highlights with designated function in the top of the page
finalTable.style.apply(lambda row: highlight_row(row), axis=1).to_excel(outfile_path, index=False) # add highlights
|
py | 1a41f54ae4b7b88af95b10655b202a07f7fa07fe | # -*- coding: utf-8 -*-
import pandas as pd
import os
file_path = os.path.dirname(os.path.realpath(__file__))
# File uploads - Extended Data Figure 5
other = pd.read_excel(file_path + "/../../data/other_category.xlsx")
# Plot colors
c = ['#725843', '#9f7f65', '#7c7b78', '#bbbbbb', '#90b493']
# Plotting
ax = other.plot(x='Year', kind='area', color = c, legend='reverse', xlim=(1900, 2014),ylim=(0, 22), xticks=[1910, 1930, 1950, 1970, 1990, 2010], yticks=[0, 4, 8, 12, 16, 20], lw=0)
ax.set_xticklabels([1910, 1930, 1950, 1970, 1990, 2010], rotation=0, fontsize=6)
ax.set_yticklabels([0, 4, 8, 12, 16, 20], rotation=0, fontsize=6)
ax.set_xlabel('year', fontsize=7)
ax.set_ylabel('dry weight (Gigatonnes)', fontsize=7)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), prop={'size': 6}, bbox_to_anchor=(0, 1.680/1.750), loc="upper left",frameon=False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.figure.set_figheight(2.3)
ax.figure.set_figwidth(3.5)
file_out_name = file_path + '/../output/extended_data_figure5'
ax.figure.savefig(file_out_name+'.png', bbox_inches='tight', pad_inches = 0.05, dpi = 600)
ax.figure.savefig(file_out_name+'.eps', bbox_inches='tight', pad_inches = 0.05)
ax.figure.savefig(file_out_name+'.svg', bbox_inches='tight', pad_inches = 0.05) |
py | 1a41f5ed2a5b58050e709574a02863baccd2d751 | # Copyright (c) 2018-2019 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
ParameterStatus message
A ParameterStatus message will be generated whenever the backend believes the
frontend should know about a setting parameter value. For example, when you do
SET SESSION AUTOCOMMIT ON | OFF, you get back a parameter status telling you the
new value of autocommit.
At present Vertica supports a handful of parameters, they are:
standard_conforming_strings, server_version, client_locale, client_label,
long_string_types, protocol_version, auto_commit, MARS
More parameters would be added in the future. Accordingly, a frontend should
simply ignore ParameterStatus for parameters that it does not understand or care
about.
"""
from __future__ import print_function, division, absolute_import
from struct import unpack
from ..message import BackendMessage
UTF_8 = 'utf-8'
class ParameterStatus(BackendMessage):
message_id = b'S'
def __init__(self, data):
BackendMessage.__init__(self)
null_byte = data.find(b'\x00')
unpacked = unpack('{0}sx{1}sx'.format(null_byte, len(data) - null_byte - 2), data)
self.name = unpacked[0].decode(UTF_8)
self.value = unpacked[1].decode(UTF_8)
def __str__(self):
return "ParameterStatus: {} = {}".format(self.name, self.value)
BackendMessage.register(ParameterStatus)
|
py | 1a41f657644627602186b7b3377955cab4077cc1 | import math
'''
isReceiving returns true if a transaction was a return
Integer transactionAmount
'''
def isReceiving(transactionAmount):
if transactionAmount == 0:
return None # should not happen
else:
return transactionAmount > 0
'''
isPaying returns true is a transaction was a payment
Integer transactionAmount
'''
def isPaying(transactionAmount):
if transactionAmount == 0:
return None # should not happen
return transactionAmount < 0
'''
getAbsoluteAmount returns the absolute value of a relative transaction amount
Integer transactionAmount
'''
def getAbsoluteAmount(transactionAmount):
return math.fabs(transactionAmount)
'''
checks if a String represents a Fractional or Integral
'''
def isNumber(str):
if (str[0] == '.' or str[len(str) - 1] == '.'):
return False
foundFloatingPoint = False
for digit in str:
if not digit.isdigit():
if (digit == '.'):
if (foundFloatingPoint):
return False
else:
foundFloatingPoint = True
else:
return False
return True
'''
accepted characters: A-z (case-insensitive), 0-9 and underscores.
length: 5-32 characters.
'''
def isValidTelegramUsername(string):
length = len(string)
validLength = length >= 5 and length <= 32
if validLength:
for char in string:
if not(char.isalpha() or char.isdigit() or char == '_'):
return False
return True
else:
return False
'''
tests
'''
def main():
print(isPaying(-1), isPaying(1), isReceiving(-1), isReceiving(1), getAbsoluteAmount(-1), getAbsoluteAmount(-1))
if __name__ == '__main__':
main()
|
py | 1a41f7c2c67cfd9a3c7e5f8b34d083e92bc4cdf5 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from h._compat import xrange
from unittest.mock import Mock
import datetime
import pytest
from h.activity import bucketing
from tests.common import factories
UTCNOW = datetime.datetime(year=1970, month=2, day=21, hour=19, minute=30)
FIVE_MINS_AGO = UTCNOW - datetime.timedelta(minutes=5)
YESTERDAY = UTCNOW - datetime.timedelta(days=1)
THIRD_MARCH_1968 = datetime.datetime(year=1968, month=3, day=3)
FIFTH_NOVEMBER_1969 = datetime.datetime(year=1969, month=11, day=5)
class timeframe_with: # noqa: N801
def __init__(self, label, document_buckets):
self.label = label
self.document_buckets = document_buckets
def __eq__(self, timeframe):
return (
self.label == timeframe.label
and self.document_buckets == timeframe.document_buckets
)
def __repr__(self):
return '{class_} "{label}" with {n} document buckets'.format(
class_=self.__class__, label=self.label, n=len(self.document_buckets)
)
@pytest.mark.usefixtures("factories")
class TestDocumentBucket:
def test_init_sets_the_document_title(self, db_session, document):
title_meta = factories.DocumentMeta(
type="title", value=["The Document Title"], document=document
)
document.title = "The Document Title"
db_session.add(title_meta)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.title == "The Document Title"
def test_init_uses_the_document_web_uri(self, db_session, document):
document.web_uri = "http://example.com"
bucket = bucketing.DocumentBucket(document)
assert bucket.uri == "http://example.com"
def test_init_sets_None_uri_when_no_http_or_https_can_be_found(
self, db_session, document
):
document.web_uri = None
bucket = bucketing.DocumentBucket(document)
assert bucket.uri is None
def test_init_sets_the_domain_from_the_extracted_uri(self, db_session, document):
document.web_uri = "https://www.example.com/foobar.html"
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "www.example.com"
def test_init_sets_domain_to_local_file_when_no_uri_is_set(
self, db_session, document
):
docuri_pdf = factories.DocumentURI(
uri="urn:x-pdf:fingerprint", document=document
)
db_session.add(docuri_pdf)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "Local file"
def test_annotations_count_returns_count_of_annotations(self, db_session, document):
bucket = bucketing.DocumentBucket(document)
for _ in xrange(7):
annotation = factories.Annotation()
bucket.append(annotation)
assert bucket.annotations_count == 7
def test_append_appends_the_annotation(self, document):
bucket = bucketing.DocumentBucket(document)
annotations = []
for _ in xrange(7):
annotation = factories.Annotation()
annotations.append(annotation)
bucket.append(annotation)
assert bucket.annotations == annotations
def test_append_adds_unique_annotation_tag_to_bucket(self, document):
ann_1 = factories.Annotation(tags=["foo", "bar"])
ann_2 = factories.Annotation(tags=["foo", "baz"])
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
assert bucket.tags == set(["foo", "bar", "baz"])
def test_append_adds_unique_annotation_user_to_bucket(self, document):
ann_1 = factories.Annotation(userid="luke")
ann_2 = factories.Annotation(userid="alice")
ann_3 = factories.Annotation(userid="luke")
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
bucket.append(ann_3)
assert bucket.users == set(["luke", "alice"])
def test_eq(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
for _ in xrange(5):
annotation = factories.Annotation()
bucket_1.append(annotation)
bucket_2.append(annotation)
assert bucket_1 == bucket_2
def test_eq_annotations_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.annotations = [1, 2, 3]
bucket_2.annotations = [2, 3, 4]
assert not bucket_1 == bucket_2
def test_eq_tags_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.tags.update(["foo", "bar"])
bucket_2.tags.update(["foo", "baz"])
assert not bucket_1 == bucket_2
def test_eq_users_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.users.update(["alice", "luke"])
bucket_2.users.update(["luke", "paula"])
assert not bucket_1 == bucket_2
def test_eq_uri_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.uri = "http://example.com"
bucket_2.uri = "http://example.org"
assert not bucket_1 == bucket_2
def test_eq_domain_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.domain = "example.com"
bucket_2.domain = "example.org"
assert not bucket_1 == bucket_2
def test_eq_title_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.title = "First Title"
bucket_2.title = "Second Title"
assert not bucket_1 == bucket_2
def test_incontext_link_returns_link_to_first_annotation(self, document, patch):
incontext_link = patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
ann = factories.Annotation()
bucket.append(ann)
request = Mock()
assert bucket.incontext_link(request) == incontext_link.return_value
def test_incontext_link_returns_none_if_bucket_empty(self, document, patch):
patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
request = Mock()
assert bucket.incontext_link(request) is None
@pytest.fixture
def document(self, db_session):
document = factories.Document()
db_session.add(document)
db_session.flush()
return document
@pytest.mark.usefixtures("factories", "utcnow")
class TestBucket:
def test_no_annotations(self):
assert bucketing.bucket([]) == []
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_one_annotation(self, annotation_datetime, timeframe_label):
annotation = factories.Annotation(
document=factories.Document(), updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation.document: bucketing.DocumentBucket(
annotation.document, [annotation]
)
},
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_multiple_annotations_of_one_document_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
results = [
factories.Annotation(
target_uri="https://example.com", updated=annotation_datetime
)
for _ in range(3)
]
timeframes = bucketing.bucket(results)
document = results[0].document
assert timeframes == [
timeframe_with(
timeframe_label, {document: bucketing.DocumentBucket(document, results)}
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(YESTERDAY, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_annotations_of_multiple_documents_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
annotation_1 = factories.Annotation(
target_uri="http://example1.com", updated=annotation_datetime
)
annotation_2 = factories.Annotation(
target_uri="http://example2.com", updated=annotation_datetime
)
annotation_3 = factories.Annotation(
target_uri="http://example3.com", updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation_1, annotation_2, annotation_3])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation_1.document: bucketing.DocumentBucket(
annotation_1.document, [annotation_1]
),
annotation_2.document: bucketing.DocumentBucket(
annotation_2.document, [annotation_2]
),
annotation_3.document: bucketing.DocumentBucket(
annotation_3.document, [annotation_3]
),
},
)
]
def test_annotations_of_the_same_document_in_different_timeframes(self):
results = [
factories.Annotation(),
factories.Annotation(updated=FIFTH_NOVEMBER_1969),
factories.Annotation(updated=THIRD_MARCH_1968),
]
document = factories.Document()
for annotation in results:
annotation.document = document
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(document, [results[2]])
assert timeframes == [
timeframe_with("Last 7 days", {document: expected_bucket_1}),
timeframe_with("Nov 1969", {document: expected_bucket_2}),
timeframe_with("Mar 1968", {document: expected_bucket_3}),
]
def test_recent_and_older_annotations_together(self):
results = [
factories.Annotation(target_uri="http://example1.com"),
factories.Annotation(target_uri="http://example2.com"),
factories.Annotation(target_uri="http://example3.com"),
factories.Annotation(
target_uri="http://example4.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example5.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example6.com", updated=THIRD_MARCH_1968
),
]
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(results[0].document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(results[1].document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(results[2].document, [results[2]])
expected_bucket_4 = bucketing.DocumentBucket(results[3].document, [results[3]])
expected_bucket_5 = bucketing.DocumentBucket(results[4].document, [results[4]])
expected_bucket_6 = bucketing.DocumentBucket(results[5].document, [results[5]])
assert timeframes == [
timeframe_with(
"Last 7 days",
{
results[0].document: expected_bucket_1,
results[1].document: expected_bucket_2,
results[2].document: expected_bucket_3,
},
),
timeframe_with(
"Mar 1968",
{
results[3].document: expected_bucket_4,
results[4].document: expected_bucket_5,
results[5].document: expected_bucket_6,
},
),
]
def test_annotations_from_different_days_in_same_month(self):
"""
Test bucketing multiple annotations from different days of same month.
Annotations from different days of the same month should go into one
bucket.
"""
one_month_ago = UTCNOW - datetime.timedelta(days=30)
annotations = [
factories.Annotation(
target_uri="http://example.com", updated=one_month_ago
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=1),
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=2),
),
]
timeframes = bucketing.bucket(annotations)
expected_bucket = bucketing.DocumentBucket(annotations[0].document)
expected_bucket.update(annotations)
assert timeframes == [
timeframe_with("Jan 1970", {annotations[0].document: expected_bucket})
]
@pytest.fixture
def utcnow(self, patch):
utcnow = patch("h.activity.bucketing.utcnow")
utcnow.return_value = UTCNOW
return utcnow
|
py | 1a41f91574cc97294c624d878bae98006410b2d3 | from django import forms
class SearchForm(forms.Form):
CHOICES = [
(u'ISBN', u'ISBN'),
(u'书名', u'书名'),
(u'作者', u'作者')
]
search_by = forms.ChoiceField(
label='',
choices=CHOICES,
widget=forms.RadioSelect(),
initial=u'书名',
)
keyword = forms.CharField(
label='',
max_length=32,
widget=forms.TextInput(attrs={
'class': 'form-control input-lg',
'placeholder': u'请输入需要检索的图书信息',
'name': 'keyword',
})
) |
py | 1a41f9aaeeae1adad385985c312cd564929844d4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditPeUserOrderSyncModel import ZhimaCreditPeUserOrderSyncModel
class ZhimaCreditPeUserOrderSyncRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditPeUserOrderSyncModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditPeUserOrderSyncModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.pe.user.order.sync'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
py | 1a41fabae9c9050122ee6cd26616b13f98483af4 | """
(C) IBM Corporation 2021
Description:
Creates new config files within the default config file dir.
Uses both user input and authentification file for auth informations.
Repository:
https://github.com/IBM/spectrum-protect-sppmon
Author:
Niels Korschinsky
"""
import argparse
import json
import logging
import os
import re
import signal
import subprocess
import sys
from os.path import dirname, isfile, join, realpath
from typing import Any, Dict, List
from utils import Utils
LOGGER: logging.Logger
class ConfigFileSetup:
"""
Creates new config files within the default config file dir.
Uses both user input and authentification file for auth informations.
Functions:
addSshClient - Asks for ssh-login information for a certain ssh-client type.
createServerDict - Asks SPP-REST-Server information from user and returns them.
createInfluxDict - Reads InfluxDB config from authfile or asks user.
main - See description above.
"""
@staticmethod
def addSshClient(ssh_type: str) -> List[Dict[str, Any]]:
"""Asks for ssh-login information for a certain ssh-client type.
Args:
ssh_type (str): Type of ssh-client.
Returns:
List[Dict[str, Any]]: List of added ssh-clients.
"""
ssh_clients: List[Dict[str, Any]] = []
Utils.printRow()
LOGGER.info(
f"> Collecting {ssh_type} ssh information")
# counter for naming like: vsnap-1 / vsnap-2
counter: int = 1
while(Utils.confirm(f"Do you want to add (another) {ssh_type}-client?")):
try:
ssh_client: Dict[str, Any] = {}
print(
"> Test the requested logins by logging into" +
f"the {ssh_type}-client via ssh yourself.")
ssh_client["name"] = Utils.prompt_string(
f"Please enter the name of the {ssh_type}-client (display only)",
f"{ssh_type}-{counter}")
counter += 1 # resetted on next ssh_type
ssh_client["srv_address"] = Utils.prompt_string(
f"Please enter the server address of the {ssh_type}-client")
ssh_client["srv_port"] = int(
Utils.prompt_string(
f"Please enter the port of the {ssh_type}-client",
"22",
filter=(lambda x: x.isdigit())))
ssh_client["username"] = Utils.prompt_string(
f"Please enter the {ssh_type}-client username (equal to login via ssh)")
ssh_client["password"] = Utils.prompt_string(
f"Please enter the {ssh_type}-client user password (equal to login via ssh)",
is_password=True)
ssh_client["type"] = ssh_type
# Saving config
ssh_clients.append(ssh_client)
Utils.printRow()
except ValueError as err:
LOGGER.error(err)
LOGGER.info(
"Aborted adding this ssh client. Continuing with next client")
return ssh_clients
@staticmethod
def createServerDict() -> Dict[str, Any]:
"""
Asks SPP-REST-Server information from user and returns them.
Returns:
Dict[str, Any]: All Informations for SPP-REST-Access
"""
spp_server: Dict[str, Any] = {}
spp_server["username"] = Utils.prompt_string(
"Please enter the SPP REST-API Username (equal to login via website)")
spp_server["password"] = Utils.prompt_string(
"Please enter the REST-API Users Password (equal to login via website)", is_password=True)
spp_server["srv_address"] = Utils.prompt_string(
"Please enter the SPP server address")
spp_server["srv_port"] = int(
Utils.prompt_string(
"Please enter the SPP server port",
"443",
filter=(lambda x: x.isdigit())))
spp_server["jobLog_retention"] = Utils.prompt_string(
"How long are the JobLogs saved within the Server? (Format: 48h, 60d, 2w)",
"60d",
filter=(lambda x: bool(re.match(r"^[0-9]+[hdw]$", x))))
return spp_server
@staticmethod
def createInfluxDict(server_name: str) -> Dict[str, Any]:
"""
Reads InfluxDB config from authfile or asks user.
Args:
server_name (str): Name of SPP server to set influxDB-name
Returns:
Dict[str, Any]: All Informations for Influx-Access
"""
influxDB: Dict[str, Any] = {}
influxDB["username"] = Utils.readAuthOrInput(
"influxAdminName",
"Please enter the influxAdmin username",
"influxAdmin"
)
influxDB["password"] = Utils.readAuthOrInput(
"influxAdminPassword",
"Please enter the influxAdmin user password",
is_password=True
)
influxDB["ssl"] = bool(Utils.readAuthOrInput(
"sslEnabled",
"Please enter whether ssl is enabled (True/False)",
"True",
filter=(lambda x: bool(re.match(r"^(True)|(False)$", x)))
))
# Only check this if ssl is enabled
# Note: verify_ssl is the logical opposite of unsafeSsl
influxDB["verify_ssl"] = False if (not influxDB["ssl"]) else not bool(Utils.readAuthOrInput(
"unsafeSsl",
"Please enter whether the ssl certificate is selfsigned (True/False)",
filter=(lambda x: bool(re.match(r"^(True)|(False)$", x)))
))
influxDB["srv_address"] = Utils.readAuthOrInput(
"influxAddress",
"Please enter the influx server address"
)
influxDB["srv_port"] = int(Utils.readAuthOrInput(
"influxPort",
"Please enter the influx server port",
"8086",
filter=(lambda x: x.isdigit())
))
# Need to remove any illegal characters from the db name. For now, we will limit the characters
# to letters and numbers
dbName = ''.join(filter(str.isalnum, server_name))
LOGGER.info(
f"> Your influxDB database name for this server is \"{dbName}\"")
influxDB["dbName"] = dbName
return influxDB
def main(self, config_path: str, auth_file: str, auto_confirm: bool):
"""
Creates new config files within the default config file dir.
Uses both user input and authentification file for auth informations.
Args:
config_path (str): Config file DIR
auth_file (str): File with pairs of authentification data
auto_confirm (bool): Skip any confirm messages
"""
fileDirPath = dirname(sys.argv[0])
logPath = join(fileDirPath, "logs", "installLog.txt")
global LOGGER_NAME
LOGGER_NAME = 'configFileLogger'
global LOGGER
LOGGER = Utils.setupLogger(LOGGER_NAME, logPath)
Utils.printRow()
Utils.auto_confirm = auto_confirm
Utils.LOGGER = LOGGER
signal.signal(signal.SIGINT, Utils.signalHandler)
LOGGER.info("> Checking for sudo rights")
# Only works on Linux, therefore error here.
if os.name == 'posix':
if os.geteuid() == 0:
print("Already root")
else:
print("Root rights required to run script.")
subprocess.call(['sudo', 'python3', *sys.argv])
sys.exit()
LOGGER.info("> Generating new Config files")
# ### Config dir setup
config_path = realpath(config_path)
LOGGER.info(
f"> All new configurations files will be written into the directory:\n {config_path}")
# ### authFile setup
try:
if(not auth_file):
LOGGER.info("> No authentification file specifed")
Utils.setupAuthFile(None)
else: # take none if not exists, otherwise take auth path
Utils.setupAuthFile(auth_file)
except Exception as error:
LOGGER.info(f"> Setup of auth-file failed due error: {error}")
# ########## EXECUTION ################
LOGGER.info("> You may add multiple SPP-Server now.")
print("> Each server requires it's own config file")
try:
while(Utils.confirm("\nDo you want to to add a new SPP-Server now?")):
config_file_path: str = ""
server_name: str = ""
while(not config_file_path or not server_name):
# Servername for filename and config
server_name = Utils.prompt_string(
"What is the name of the SPP-Server? (Human Readable, no Spaces)",
filter=(lambda x: not " " in x))
# Replace spaces
config_file_path = join(
realpath(config_path), server_name + ".conf")
if(isfile(config_file_path)):
LOGGER.info(
f"> There is already a file at {config_file_path}.")
if(not Utils.confirm("Do you want to replace it?")):
LOGGER.info(
"> Please re-enter a different server name")
# remove content to allow loop to continue
config_file_path = ""
server_name = ""
else:
LOGGER.info("> Overwriting old config file")
os.system("touch " + config_file_path)
os.system("sudo chmod 600 " + config_file_path)
LOGGER.info(f"> Created config file under {config_file_path}")
# Overwrite existing file
with open(config_file_path, "w") as config_file:
LOGGER.info(
f"> Accessed config file under {config_file_path}")
# Structure of the config file
configs: Dict[str, Any] = {}
# #################### SERVER ###############################
Utils.printRow()
LOGGER.info("> collecting server information")
# Saving config
configs["sppServer"] = ConfigFileSetup.createServerDict()
LOGGER.info("> finished collecting server information")
# #################### influxDB ###############################
Utils.printRow()
LOGGER.info("> collecting influxDB information")
# Saving config
configs["influxDB"] = ConfigFileSetup.createInfluxDict(
server_name)
LOGGER.info("> finished collecting influxdb information")
# #################### ssh clients ###############################
Utils.printRow()
LOGGER.info("> collecting ssh client information")
ssh_clients: List[Dict[str, Any]] = []
print("")
print("> NOTE: You will now be asked for multiple ssh logins")
print(
"> You may test all these logins yourself by logging in via ssh")
print("> Following categories will be asked:")
# server excluded here
ssh_types: List[str] = [
"vsnap", "vadp", "cloudproxy", "other"]
LOGGER.info("> server, " + ", ".join(ssh_types))
print("> Please add all clients accordingly.")
print()
print(
"> If you misstyped anything you may edit the config file manually afterwards")
print(
"> NOTE: It is highly recommended to add at least one vSnap client")
if(not Utils.confirm("Do you want to continue now?")):
json.dump(configs, config_file, indent=4)
LOGGER.info(
f"> saved all information into file {config_file_path}")
LOGGER.info("> finished setup for this server.")
continue # Contiuing to the next server config file loop
# #################### ssh clients: SERVER ###############################
Utils.printRow()
LOGGER.info("> Collecting SPP-Server ssh information")
ssh_server: Dict[str, Any] = {}
print(
"> Test the requested logins by logging into the SPP-Server via ssh yourself.")
ssh_server["name"] = server_name
spp_server_dict: Dict[str, Any] = configs["sppServer"]
ssh_server["srv_address"] = spp_server_dict["srv_address"]
ssh_server["srv_port"] = int(
Utils.prompt_string(
f"Please enter the SSH port of the SPP server",
"22",
filter=(lambda x: x.isdigit())))
ssh_server["username"] = Utils.prompt_string(
"Please enter the SPP-Server SSH username (equal to login via ssh)")
ssh_server["password"] = Utils.prompt_string(
"Please enter the SPP-Server SSH user password (equal to login via ssh)",
is_password=True)
ssh_server["type"] = "server"
# Saving config
ssh_clients.append(ssh_server)
# #################### ssh clients all other ###############################
for ssh_type in ssh_types:
try:
ssh_clients.extend(ConfigFileSetup.addSshClient(ssh_type))
except ValueError as err:
LOGGER.error(err)
LOGGER.info(
"Skipped this type of SSH-Client. Continuing with next type.")
# save all ssh-clients
configs["sshclients"] = ssh_clients
print("> Finished setting up SSH Clients")
# #################### SAVE & EXIT ###############################
LOGGER.info("> Writing into config file")
json.dump(configs, config_file, indent=4)
LOGGER.info(
f"> Configuraton saved into the file:\n{config_file_path}")
Utils.printRow()
continue # Contiuing to the next server config file loop
except ValueError as err:
LOGGER.error(err)
LOGGER.info("> Finished config file creation")
if __name__ == "__main__":
fileDirPath = dirname(sys.argv[0])
configPathDefault = realpath(join(fileDirPath, "..", "config_files"))
authPathDefault = realpath(join(fileDirPath, "delete_me_auth.txt"))
parser = argparse.ArgumentParser(
"Support agent to create new server configuration files for SPPMon")
parser.add_argument("--configPath", dest="config_path",
default=configPathDefault,
help=f"Path to folder containing the config files (default: `{configPathDefault}`)")
parser.add_argument("--authFile", dest="auth_file",
required=False,
default=authPathDefault,
help=f"Path to authentification file (default: `{authPathDefault}`)")
parser.add_argument("--autoConfirm", dest="auto_confirm",
action="store_true",
help="Autoconfirm most confirm prompts")
args = parser.parse_args()
ConfigFileSetup().main(args.config_path, args.auth_file, args.auto_confirm)
|
py | 1a41fad6aaaa07c809a96378a819530f27cfeab3 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from copy import deepcopy
import os
import platform
import pytest
import subprocess
import sys
from anaconda_project.test.environ_utils import minimal_environ, strip_environ
from anaconda_project.test.project_utils import project_no_dedicated_env
from anaconda_project.internal.test.tmpfile_utils import (with_directory_contents,
with_directory_contents_completing_project_file)
from anaconda_project.internal import conda_api
from anaconda_project.prepare import (prepare_without_interaction, unprepare, prepare_in_stages, PrepareSuccess,
PrepareFailure, _after_stage_success, _FunctionPrepareStage)
from anaconda_project.project import Project
from anaconda_project.project_file import DEFAULT_PROJECT_FILENAME
from anaconda_project.project_commands import ProjectCommand
from anaconda_project.requirements_registry.requirement import UserConfigOverrides
from anaconda_project.conda_manager import (push_conda_manager_class, pop_conda_manager_class, CondaManager,
CondaEnvironmentDeviations, CondaLockSet)
@pytest.mark.slow
def test_prepare_empty_directory():
def prepare_empty(dirname):
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
assert result.env_prefix is not None
assert dict(PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict() == strip_environ(environ)
assert result.command_exec_info is None
with_directory_contents(dict(), prepare_empty)
def test_prepare_bad_provide_mode():
def prepare_bad_provide_mode(dirname):
with pytest.raises(ValueError) as excinfo:
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
prepare_in_stages(project, mode="BAD_PROVIDE_MODE", environ=environ)
assert "invalid provide mode" in repr(excinfo.value)
with_directory_contents(dict(), prepare_bad_provide_mode)
@pytest.mark.slow
@pytest.mark.skipif(platform.system() == 'Windows' and
not (sys.version_info.major == 3 and sys.version_info.minor == 4),
reason="on Windows, can't delete env dir except on python 3.4, don't know why")
def test_unprepare_empty_directory():
def unprepare_empty(dirname):
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
status = unprepare(project, result)
assert status.errors == []
assert status
with_directory_contents(dict(), unprepare_empty)
def test_unprepare_problem_project():
def unprepare_problems(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert not result
assert result.env_prefix is None
status = unprepare(project, result)
assert not status
assert status.status_description == 'Unable to load the project.'
assert status.errors == [('%s: variables section contains wrong value type 42, ' +
'should be dict or list of requirements') % project.project_file.basename]
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: "variables:\n 42"}, unprepare_problems)
@pytest.mark.slow
def test_unprepare_nothing_to_do():
def unprepare_nothing(dirname):
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
status = unprepare(project, result, whitelist=[])
assert status.errors == []
assert status
assert status.status_description == 'Nothing to clean up.'
with_directory_contents(dict(), unprepare_nothing)
def test_default_to_system_environ():
def prepare_system_environ(dirname):
project = project_no_dedicated_env(dirname)
os_environ_copy = deepcopy(os.environ)
result = prepare_without_interaction(project)
assert project.directory_path == strip_environ(result.environ)['PROJECT_DIR']
# os.environ wasn't modified
assert os_environ_copy == os.environ
# result.environ inherits everything in os.environ
for key in os_environ_copy:
if key == 'PATH' and platform.system() == 'Windows' and result.environ[key] != os.environ[key]:
print("prepare changed PATH on Windows and ideally it would not.")
else:
if key == 'PATH' and result.environ[key] != os.environ[key]:
original = os.environ[key].split(os.pathsep)
updated = result.environ[key].split(os.pathsep)
print("ORIGINAL PATH: " + repr(original))
print("UPDATED PATH: " + repr(updated))
assert original == updated
assert result.errors == []
assert result
assert result.environ.get(key) == os.environ.get(key)
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
packages: []
"""
}, prepare_system_environ)
def test_prepare_some_env_var_already_set():
def prepare_some_env_var(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(FOO='bar')
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
assert dict(FOO='bar', PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict(FOO='bar') == strip_environ(environ)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_some_env_var)
def test_prepare_some_env_var_not_set():
def prepare_some_env_var(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(BAR='bar')
result = prepare_without_interaction(project, environ=environ)
assert not result
assert result.env_prefix is not None
assert dict(BAR='bar') == strip_environ(environ)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_some_env_var)
def test_prepare_some_env_var_not_set_keep_going():
def prepare_some_env_var_keep_going(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(BAR='bar')
stage = prepare_in_stages(project, environ=environ, keep_going_until_success=True)
assert "Set up project." == stage.description_of_action
assert ['FOO', 'CONDA_PREFIX'] == [status.requirement.env_var for status in stage.statuses_before_execute]
# there's an initial stage to set the conda env
next_stage = stage.execute()
assert ['FOO', 'CONDA_PREFIX'] == [status.requirement.env_var for status in stage.statuses_after_execute]
assert not stage.failed
assert stage.environ['PROJECT_DIR'] == dirname
assert "Set up project." == next_stage.description_of_action
assert ['FOO', 'CONDA_PREFIX'] == [status.requirement.env_var for status in next_stage.statuses_before_execute]
stage = next_stage
for i in range(1, 10):
next_stage = stage.execute()
assert next_stage is not None
assert stage.failed
assert stage.environ['PROJECT_DIR'] == dirname
stage = next_stage
assert dict(BAR='bar') == strip_environ(environ)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_some_env_var_keep_going)
def test_prepare_with_app_entry():
def prepare_with_app_entry(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(FOO='bar')
env_path = conda_api.environ_get_prefix(environ)
result = prepare_without_interaction(project, environ=environ)
assert result
command = result.command_exec_info
assert 'FOO' in command.env
assert command.cwd == project.directory_path
if platform.system() == 'Windows':
commandpath = os.path.join(env_path, "python.exe")
else:
commandpath = os.path.join(env_path, "bin", "python")
assert command.args == [commandpath, 'echo.py', env_path, 'foo', 'bar']
p = command.popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
# strip is to pull off the platform-specific newline
assert out.decode().strip() == ("['echo.py', '%s', 'foo', 'bar']" % (env_path.replace("\\", "\\\\")))
assert err.decode() == ""
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
commands:
default:
conda_app_entry: python echo.py ${PREFIX} foo bar
""",
"echo.py": """
from __future__ import print_function
import sys
print(repr(sys.argv))
"""}, prepare_with_app_entry)
def test_prepare_choose_command():
def check(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, command_name='foo')
assert result.errors == []
assert result
assert os.path.join(project.directory_path, 'foo.py') in result.command_exec_info.args
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, command_name='bar')
assert result.errors == []
assert result
assert os.path.join(project.directory_path, 'bar.py') in result.command_exec_info.args
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
commands:
foo:
bokeh_app: foo.py
bar:
bokeh_app: bar.py
packages:
- bokeh
""",
"foo.py": "# foo",
"bar.py": "# bar"}, check)
def test_prepare_command_not_in_project():
def check(dirname):
# create a command that isn't in the Project
project = project_no_dedicated_env(dirname)
command = ProjectCommand(name="foo",
attributes=dict(bokeh_app="foo.py",
env_spec=project.default_env_spec_name))
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, command=command)
assert result.errors == []
assert result
assert os.path.join(project.directory_path, 'foo.py') in result.command_exec_info.args
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
commands:
decoy:
description: "do not use me"
unix: foobar
windows: foobar
""",
"foo.py": "# foo"}, check)
def test_prepare_bad_command_name():
def check(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(BAR='bar')
result = prepare_without_interaction(project, environ=environ, command_name="blah")
assert not result
assert result.env_prefix is None
assert result.errors
assert "Command name 'blah' is not in" in result.errors[0]
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
"""}, check)
def _push_fake_env_creator():
class HappyCondaManager(CondaManager):
def __init__(self, frontend):
pass
def resolve_dependencies(self, package_specs, channels, platforms):
return CondaLockSet({})
def find_environment_deviations(self, prefix, spec):
return CondaEnvironmentDeviations(summary="all good",
missing_packages=(),
wrong_version_packages=(),
missing_pip_packages=(),
wrong_version_pip_packages=())
def fix_environment_deviations(self, prefix, spec, deviations=None, create=True):
pass
def remove_packages(self, prefix, packages):
pass
push_conda_manager_class(HappyCondaManager)
def _pop_fake_env_creator():
pop_conda_manager_class()
def test_prepare_choose_environment():
def check(dirname):
env_var = conda_api.conda_prefix_variable()
try:
_push_fake_env_creator()
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, env_spec_name='foo')
expected_path = project.env_specs['foo'].path(project.directory_path)
assert result.environ[env_var] == expected_path
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, env_spec_name='bar')
assert result.errors == []
assert result
expected_path = project.env_specs['bar'].path(project.directory_path)
assert result.environ[env_var] == expected_path
finally:
_pop_fake_env_creator()
with_directory_contents(
{DEFAULT_PROJECT_FILENAME: """
name: blah
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
env_specs:
foo: {}
bar: {}
"""}, check)
def test_prepare_use_command_specified_env_spec():
def check(dirname):
env_var = conda_api.conda_prefix_variable()
try:
_push_fake_env_creator()
project = Project(dirname)
environ = minimal_environ()
# we specify the command name but not the
# env_spec_name but it should imply the proper env
# spec name.
result = prepare_without_interaction(project, environ=environ, command_name='hello')
expected_path = project.env_specs['foo'].path(project.directory_path)
assert result.environ[env_var] == expected_path
finally:
_pop_fake_env_creator()
with_directory_contents(
{DEFAULT_PROJECT_FILENAME: """
name: blah
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
env_specs:
default: {}
foo: {}
bar: {}
commands:
hello:
env_spec: foo
unix: echo hello
windows: echo hello
"""}, check)
def test_update_environ():
def prepare_then_update_environ(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(FOO='bar')
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
other = minimal_environ(BAR='baz')
result.update_environ(other)
assert dict(FOO='bar', BAR='baz', PROJECT_DIR=dirname) == strip_environ(other)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_then_update_environ)
def test_attempt_to_grab_result_early():
def early_result_grab(dirname):
project = project_no_dedicated_env(dirname)
first_stage = prepare_in_stages(project)
with pytest.raises(RuntimeError) as excinfo:
first_stage.result
assert "result property isn't available" in repr(excinfo.value)
with_directory_contents(dict(), early_result_grab)
def test_attempt_to_grab_statuses_early():
def early_status_grab(dirname):
project = project_no_dedicated_env(dirname)
first_stage = prepare_in_stages(project)
with pytest.raises(RuntimeError) as excinfo:
first_stage.statuses_after_execute
assert "statuses_after_execute isn't available" in repr(excinfo.value)
with_directory_contents(dict(), early_status_grab)
def test_skip_after_success_function_when_second_stage_fails():
state = {'state': 'start'}
def do_first(stage):
assert state['state'] == 'start'
state['state'] = 'first'
stage.set_result(
PrepareSuccess(statuses=(),
command_exec_info=None,
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='first'),
[])
def last(stage):
assert state['state'] == 'first'
state['state'] = 'second'
stage.set_result(
PrepareFailure(statuses=(),
errors=[],
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='last'),
[])
return None
return _FunctionPrepareStage(dict(), UserConfigOverrides(), "second", [], last)
first_stage = _FunctionPrepareStage(dict(), UserConfigOverrides(), "first", [], do_first)
def after(updated_statuses):
raise RuntimeError("should not have been called")
stage = _after_stage_success(first_stage, after)
assert stage.overrides is first_stage.overrides
assert isinstance(stage.environ, dict)
while stage is not None:
next_stage = stage.execute()
result = stage.result
if result.failed:
assert stage.failed
break
else:
assert not stage.failed
stage = next_stage
assert result.failed
assert state['state'] == 'second'
def test_run_after_success_function_when_second_stage_succeeds():
state = {'state': 'start'}
def do_first(stage):
assert state['state'] == 'start'
state['state'] = 'first'
stage.set_result(
PrepareSuccess(statuses=(),
command_exec_info=None,
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='foo'),
[])
def last(stage):
assert state['state'] == 'first'
state['state'] = 'second'
stage.set_result(
PrepareSuccess(statuses=(),
command_exec_info=None,
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='bar'),
[])
return None
return _FunctionPrepareStage(dict(), UserConfigOverrides(), "second", [], last)
first_stage = _FunctionPrepareStage(dict(), UserConfigOverrides(), "first", [], do_first)
def after(updated_statuses):
assert state['state'] == 'second'
state['state'] = 'after'
stage = _after_stage_success(first_stage, after)
assert stage.overrides is first_stage.overrides
assert stage.description_of_action == first_stage.description_of_action
assert stage.environ == first_stage.environ
assert stage.statuses_before_execute is first_stage.statuses_before_execute
stage.configure() # checking it doesn't raise
while stage is not None:
next_stage = stage.execute()
if hasattr(stage, '_stage'):
assert stage.statuses_after_execute is stage._stage.statuses_after_execute
assert stage.failed is stage._stage.failed
result = stage.result
if result.failed:
assert stage.failed
break
else:
assert not stage.failed
stage = next_stage
assert not result.failed
assert state['state'] == 'after'
def _monkeypatch_download_file(monkeypatch, dirname, filename='MYDATA', checksum=None):
from tornado import gen
@gen.coroutine
def mock_downloader_run(self, loop):
class Res:
pass
res = Res()
res.code = 200
with open(os.path.join(dirname, filename), 'w') as out:
out.write('data')
if checksum:
self._hash = checksum
raise gen.Return(res)
monkeypatch.setattr("anaconda_project.internal.http_client.FileDownloader.run", mock_downloader_run)
def test_provide_whitelist(monkeypatch):
def check(dirname):
from anaconda_project.requirements_registry.requirements.conda_env import CondaEnvRequirement
_monkeypatch_download_file(monkeypatch, dirname, filename="nope")
no_foo = [('missing requirement to run this project: A downloaded file which is ' + 'referenced by FOO.'),
' Environment variable FOO is not set.']
# whitelist only the env req by class
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, provide_whitelist=(CondaEnvRequirement, ), environ=environ)
assert result.errors == no_foo
# whitelist by instance
env_req = [req for req in project.requirements(None) if isinstance(req, CondaEnvRequirement)][0]
result = prepare_without_interaction(project, provide_whitelist=(env_req, ), environ=environ)
assert result.errors == no_foo
# whitelist by variable name
result = prepare_without_interaction(project, provide_whitelist=(env_req.env_var, ), environ=environ)
assert result.errors == no_foo
# whitelist the download
result = prepare_without_interaction(project,
provide_whitelist=(env_req, project.download_requirements(None)[0]),
environ=environ)
assert result.errors == []
assert 'FOO' in result.environ
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
downloads:
FOO: "http://example.com/nope"
"""}, check)
|
py | 1a41fb0340f4522e0e2701acbfd79de0713f286e | # clone
import os
import repo
# フォルダを削除
def rmtree(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, 0o777)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
def run(first_flag):
root_path = "project" # レポジトリのパス
print("解析するレポジトリのURLを入力してください")
url = input(">>> ")
# 最初だけ確認
if os.path.isdir(root_path):
if first_flag == 0:
rmtree(root_path) # projectフォルダが存在していれば削除
clone_flag = True
while clone_flag:
try:
# "://"でURLか判定
if url.find("://"):
file_name = url[url.rfind('/') + 1:] # 一番後ろのファイル名を抽出
path = root_path + "/" + file_name # project/..
# 既にクローン済みの場合、pathの最後に(i)をつける
i = 1
while os.path.isdir(path):
path = root_path + "/" + file_name + "({:d})".format(i)
i += 1
repo.clone_repo(url, path) # レポジトリをクローン
print("レポジトリをクローンしました\n")
clone_flag = False
except Exception as err:
print("レポジトリをクローン出来ませんでした\nもう一度、入力してください")
url = input(">>> ") |
py | 1a41fbd09cdaef728101723adff7688797d68f8d | from django.contrib import admin
from blogs.models import Post, Category_post, Comment
from django_summernote.admin import SummernoteModelAdmin
# Register your models here.
class PostAdmin(SummernoteModelAdmin):
summernote_fields = ('content',)
list_display = ('title', 'slug', 'short_desciption', 'status','created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'body')
admin.site.register(Post, PostAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Category_post)
|
py | 1a41fe10909df9b66c5dbb2d69609c01c537feeb | from scipy.spatial import ConvexHull, Delaunay
import numpy as np
class WeightedDelaunay:
def __init__(self, points, weights):
self.points = points
self.weights = weights
self.complete = False
self.tri = None
def triangulation(self):
if not self.complete:
num, dim = np.shape(self.points)
lifted = np.zeros((num, dim + 1))
for i in range(num):
p = self.points[i, :]
lifted[i, :] = np.append(p, np.sum(p ** 2) - self.weights[i] ** 2)
lifted = np.vstack((lifted, np.append(np.zeros((1, dim)), 1e12)))
hull = ConvexHull(lifted)
delaunay = []
for simplex in hull.simplices:
if num not in simplex:
delaunay.append(simplex.tolist())
self.tri = delaunay
self.complete = True
return self.tri
def add_point(self, point, weight):
num, dim = np.shape(self.points)
tmp = np.ndarray((num + 1, dim))
for i in range(num):
tmp[i] = self.points[i]
tmp[num] = point
self.points = tmp
self.weights.append(weight)
self.complete = False
|
py | 1a41fec9829d02ff8489a45f516d8ca6282b1573 | """All functionality concerned with presentation to the user."""
from pandas_profiling.report.structure.report import get_report_structure
|
py | 1a41feee1931ccb62d52913c3b39ca37b02384c8 | # coding=utf-8
import tensorflow as tf
import tensorflow_compression as tfc
import os
import sys
import math
import numpy as np
# tf.enable_eager_execution()
from collections import namedtuple
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = '/home/wenxuanzheng/pc_compression/pc_compression'
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import part_dataset
from transform_nets import input_transform_net
import tf_utils
def py_func_decorator(output_types=None, output_shapes=None, stateful=True, name=None):
def decorator(func):
def call(*args, **kwargs):
return tf.contrib.framework.py_func(
func=func,
args=args, kwargs=kwargs,
output_types=output_types, output_shapes=output_shapes,
stateful=stateful, name=name
)
return call
return decorator
def from_indexable(iterator, output_types, output_shapes=None, num_parallel_calls=None, stateful=True, name=None):
ds = tf.data.Dataset.range(len(iterator))
@py_func_decorator(output_types, output_shapes, stateful=stateful, name=name)
def index_to_entry(index):
return iterator[index]
return ds.map(index_to_entry, num_parallel_calls=num_parallel_calls)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
0.0001,
batch , # global_step 当前迭代次数
10000,
0.7,
staircase = True) # global_step / decay_steps始终取整数
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
0.5,
batch,
20000,
0.5,
staircase=True)
bn_decay = tf.minimum(0.99, 1 - bn_momentum)
return bn_decay
def input_fn(features, batch_size, preprocess_threads, repeat=True, prefetch_size=1):
with tf.device('/cpu:0'):
# 需要iter对象
# dataset = tf.data.Dataset.from_tensor_slices(features)
dataset = from_indexable(features, output_types=tf.float32,output_shapes=[2048, 3], num_parallel_calls=preprocess_threads)
if repeat:
dataset = dataset.shuffle(buffer_size=len(features))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
# 流水线,一边生成一边使用
dataset = dataset.prefetch(buffer_size=prefetch_size)
return dataset
def model_fn(features, labels, mode, params):
'''
:param features: batch_features from input_fn
:param labels: batch_labels from input_fn
:param mode: An instance of tf.estimator.ModeKeys
:param params: Additional configuration
:return:
'''
#del para
del labels
if params.get('decompress') is None:
params['decompress'] = False
# if params.decompression:
# assert mode == tf.estimator.ModeKeys.PREDICT, 'Decompression must use prediction mode'
params = namedtuple('Struct', params.keys())(*params.values())
training = (mode == tf.estimator.ModeKeys.TRAIN)
num_points = (params.batch_size * params.num_points)
pc = features
bn_decay = get_bn_decay(tf.train.get_global_step())
learning_rate = get_learning_rate(tf.train.get_global_step())
tf.summary.scalar('bn_decay', bn_decay)
tf.summary.scalar('learning_rate', learning_rate)
# ============= encoder =============
nasmples = params.knn
y = pc_encoder(pc, nasmples, is_training=training, bn_decay=bn_decay)
entropy_bottleneck = tfc.EntropyBottleneck()
y_tilde, likelihoods = entropy_bottleneck(y, training=True)
# ============= decoder =============
x_tilde = pc_decoder(y_tilde, is_training=training, bn_decay=bn_decay)
# number of bits divided by number of points
train_bpp = tf.reduce_sum(tf.log(likelihoods)) / (-np.log(2) * num_points)
# 预测模式直接返回结果
if mode == tf.estimator.ModeKeys.PREDICT:
string = entropy_bottleneck.compress(y)
predictions = {
'x_tilde': x_tilde,
'likelihoods': likelihoods,
'y_tilde': y_tilde
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions) # 生成predict字典
# 训练和评估
losses = tf_utils.get_loss(x_tilde, pc)
rd_loss = losses + params.lmbda * train_bpp
# tf.summary.scalar('likelihoods',likelihoods)
tf.summary.scalar('loss', losses)
tf.summary.scalar('rd_loss', rd_loss)
tf.summary.scalar('bpp', train_bpp)
if mode == tf.estimator.ModeKeys.TRAIN:
main_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
main_step = main_optimizer.minimize(rd_loss, global_step=tf.train.get_global_step())
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])
train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
return tf.estimator.EstimatorSpec(mode, loss=rd_loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
summary_hook = tf.train.SummarySaverHook(
save_steps=5,
output_dir=os.path.join(params.checkpoint_dir, 'eval'),
summary_op=tf.summary.merge_all())
return tf.estimator.EstimatorSpec(mode, loss=rd_loss, evaluation_hooks=[summary_hook])
def pc_encoder(point_cloud, nasmples, is_training, bn_decay=None):
nn_dis, idx_batch = tf_utils.get_knn(point_cloud, nasmples)
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
point_dim = point_cloud.get_shape()[2].value
idx_batch = tf.cast(idx_batch, dtype=tf.int32)
latent_feature = {}
# con_point = tf.concat([point_cloud, cov_batch], axis=2)
# encoder_input = tf.expand_dims(con_point, -1) # (32 2048 3 1)
encoder_input = tf.expand_dims(point_cloud, -1) # (32 2048 3 1)
# (32, 2048, 1, 64)
net = tf_utils.conv2d(encoder_input, 64, [1, 3],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_1', bn_decay=bn_decay)
# (32, 2048, 1, 64)
net = tf_utils.conv2d(net, 64, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_2', bn_decay=bn_decay)
# (32, 2048, 1, 64)
net = tf_utils.conv2d(net, 64, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_3', bn_decay=bn_decay)
net = tf_utils.conv2d(net, 128, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_4', bn_decay=bn_decay)
net = tf_utils.conv2d(net, 1024, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_5', bn_decay=bn_decay)
global_feat = tf_utils.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
net = tf.reshape(global_feat, [batch_size, -1])
return net
def pc_decoder(y_tilde, is_training, bn_decay):
# UPCONV Decoder
batch_size = y_tilde.get_shape()[0].value
net = tf_utils.fully_connected(y_tilde, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_utils.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_utils.fully_connected(net, 2048 * 3, activation_fn=None, scope='fc3')
net = tf.reshape(net, (batch_size, 2048, 3))
return net
if __name__=='__main__':
tf.enable_eager_execution()
TRAIN_DATASET = part_dataset.PartDataset(
root='/data/dataset/shapenetcore_partanno_segmentation_benchmark_v0', npoints=2048,
classification=False, class_choice=None, split='trainval')
print('=============')
print(input_fn(TRAIN_DATASET,2,8,repeat=True,prefetch_size=6))
|
py | 1a41ff324f2e91b1b2878c12b6b7d04034689760 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shareyourmeal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a41ff8e46fbad02d278c63c4708fb2df6a03850 | #Input: two strings of text of equal length
#Output: similarity score s based on a +1/-1 scoring scheme
def scoring(seq1, seq2):
s = 0
for i in xrange(len(seq1)):
if seq1[i] == seq2[i]:
s += 1
else:
s += -1
return s
print(scoring('ACCTCGATCGCTAGCTAACTAGC','ACTCCGTAGGCTGCTTAGTTACC'))
print(scoring('ACCTCGATCGCTAGCTAACTAGC','ACCTCGATCGCTAGCTAACTACC'))
|
py | 1a420147eb79a2016b7c3e9bff61ca995c41c45c | # -*- coding: utf-8 -*-
#
# Dataverse Documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 16 09:34:18 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
sys.path.insert(0, os.path.abspath('../../'))
import sphinx_bootstrap_theme
# Activate the theme.
# html_theme = 'bootstrap'
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dataverse'
copyright = u'%d, The President & Fellows of Harvard College' % datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.6'
# The full version, including alpha/beta/rc tags.
release = '5.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Custom Setup: add the CSS file to the app's theme.
# def setup(app):
# app.add_stylesheet( "docsdataverse_org.css" )
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
# 'navbar_title': "Demo",
# Tab name for entire site. (Default: "Site")
# 'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
# 'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
# ],
'navbar_links': [
("View 3.6.2 Guides", "http://docs.dataverse.org/en/3.6.2/", True),
],
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': -1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
#'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
#'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme such
# as "amelia" or "cosmo".
#
# Themes:
# * amelia
# * cerulean
# * cosmo
# * cyborg
# * cupid (v3 only)
# * flatly
# * journal
# * lumen (v3 only)
# * readable
# * simplex
# * slate
# * spacelab
# * spruce (v2 only)
# * superhero
# * united
# * yeti (v3 only)
#'bootswatch_theme': "cupid",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
# ``get_html_theme_path`` returns a list, so you can concatenate with
# any other theme directories you would like.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> Documentation".
html_title = 'Dataverse.org'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_static/fontcustom-preview.html']
#html_js_files = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
#html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
html_sidebars = {'**': ['searchbox.html', 'sidebartoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
htmlhelp_basename = 'Dataversedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Dataverse.tex', u'Dataverse Documentation',
u'Dataverse Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dataverse40', u'Dataverse 4.0 Documentation',
[u'Dataverse Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Dataverse40', u'Dataverse 4.0 Documentation',
u'Dataverse Team', 'Dataverse40', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Dataverse'
epub_author = u'Dataverse Team'
epub_publisher = u'Dataverse Team'
epub_copyright = u'%d, The President & Fellows of Harvard College' % datetime.now().year
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Consilience Documentation'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Suppress "WARNING: unknown mimetype for ..." https://github.com/IQSS/dataverse/issues/3391
suppress_warnings = ['epub.unknown_project_files']
rst_prolog = """
.. |toctitle| replace:: Contents:
.. |anotherSub| replace:: Yes, there can be multiple.
"""
|
py | 1a4201e6b7712afdf5a00574868114d1dc7380bf | #!/usr/bin/env python
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
from contextlib import closing
import os
import socket
from tornado.concurrent import Future
from tornado.netutil import bind_sockets, Resolver
from tornado.tcpclient import TCPClient, _Connector
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncTestCase, gen_test
from tornado.test.util import skipIfNoIPv6, unittest, refusing_port
# Fake address families for testing. Used in place of AF_INET
# and AF_INET6 because some installations do not have AF_INET6.
AF1, AF2 = 1, 2
class TestTCPServer(TCPServer):
def __init__(self, family):
super(TestTCPServer, self).__init__()
self.streams = []
sockets = bind_sockets(None, 'localhost', family)
self.add_sockets(sockets)
self.port = sockets[0].getsockname()[1]
def handle_stream(self, stream, address):
self.streams.append(stream)
def stop(self):
super(TestTCPServer, self).stop()
for stream in self.streams:
stream.close()
class TCPClientTest(AsyncTestCase):
def setUp(self):
super(TCPClientTest, self).setUp()
self.server = None
self.client = TCPClient()
def start_server(self, family):
if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ:
self.skipTest("dual-stack servers often have port conflicts on travis")
self.server = TestTCPServer(family)
return self.server.port
def stop_server(self):
if self.server is not None:
self.server.stop()
self.server = None
def tearDown(self):
self.client.close()
self.stop_server()
super(TCPClientTest, self).tearDown()
def skipIfLocalhostV4(self):
# The port used here doesn't matter, but some systems require it
# to be non-zero if we do not also pass AI_PASSIVE.
Resolver().resolve('localhost', 80, callback=self.stop)
addrinfo = self.wait()
families = set(addr[0] for addr in addrinfo)
if socket.AF_INET6 not in families:
self.skipTest("localhost does not resolve to ipv6")
@gen_test
def do_test_connect(self, family, host):
port = self.start_server(family)
stream = yield self.client.connect(host, port)
with closing(stream):
stream.write(b"hello")
data = yield self.server.streams[0].read_bytes(5)
self.assertEqual(data, b"hello")
def test_connect_ipv4_ipv4(self):
self.do_test_connect(socket.AF_INET, '127.0.0.1')
def test_connect_ipv4_dual(self):
self.do_test_connect(socket.AF_INET, 'localhost')
@skipIfNoIPv6
def test_connect_ipv6_ipv6(self):
self.skipIfLocalhostV4()
self.do_test_connect(socket.AF_INET6, '::1')
@skipIfNoIPv6
def test_connect_ipv6_dual(self):
self.skipIfLocalhostV4()
if Resolver.configured_class().__name__.endswith('TwistedResolver'):
self.skipTest('TwistedResolver does not support multiple addresses')
self.do_test_connect(socket.AF_INET6, 'localhost')
def test_connect_unspec_ipv4(self):
self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1')
@skipIfNoIPv6
def test_connect_unspec_ipv6(self):
self.skipIfLocalhostV4()
self.do_test_connect(socket.AF_UNSPEC, '::1')
def test_connect_unspec_dual(self):
self.do_test_connect(socket.AF_UNSPEC, 'localhost')
@gen_test
def test_refused_ipv4(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with self.assertRaises(IOError):
yield self.client.connect('127.0.0.1', port)
class TestConnectorSplit(unittest.TestCase):
def test_one_family(self):
# These addresses aren't in the right format, but split doesn't care.
primary, secondary = _Connector.split(
[(AF1, 'a'),
(AF1, 'b')])
self.assertEqual(primary, [(AF1, 'a'),
(AF1, 'b')])
self.assertEqual(secondary, [])
def test_mixed(self):
primary, secondary = _Connector.split(
[(AF1, 'a'),
(AF2, 'b'),
(AF1, 'c'),
(AF2, 'd')])
self.assertEqual(primary, [(AF1, 'a'), (AF1, 'c')])
self.assertEqual(secondary, [(AF2, 'b'), (AF2, 'd')])
class ConnectorTest(AsyncTestCase):
class FakeStream(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def setUp(self):
super(ConnectorTest, self).setUp()
self.connect_futures = {}
self.streams = {}
self.addrinfo = [(AF1, 'a'), (AF1, 'b'),
(AF2, 'c'), (AF2, 'd')]
def tearDown(self):
# Unless explicitly checked (and popped) in the test, we shouldn't
# be closing any streams
for stream in self.streams.values():
self.assertFalse(stream.closed)
super(ConnectorTest, self).tearDown()
def create_stream(self, af, addr):
future = Future()
self.connect_futures[(af, addr)] = future
return future
def assert_pending(self, *keys):
self.assertEqual(sorted(self.connect_futures.keys()), sorted(keys))
def resolve_connect(self, af, addr, success):
future = self.connect_futures.pop((af, addr))
if success:
self.streams[addr] = ConnectorTest.FakeStream()
future.set_result(self.streams[addr])
else:
future.set_exception(IOError())
def start_connect(self, addrinfo):
conn = _Connector(addrinfo, self.io_loop, self.create_stream)
# Give it a huge timeout; we'll trigger timeouts manually.
future = conn.start(3600)
return conn, future
def test_immediate_success(self):
conn, future = self.start_connect(self.addrinfo)
self.assertEqual(list(self.connect_futures.keys()),
[(AF1, 'a')])
self.resolve_connect(AF1, 'a', True)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
def test_immediate_failure(self):
# Fail with just one address.
conn, future = self.start_connect([(AF1, 'a')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assertRaises(IOError, future.result)
def test_one_family_second_try(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', True)
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
def test_one_family_second_try_failure(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', False)
self.assertRaises(IOError, future.result)
def test_one_family_second_try_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
# trigger the timeout while the first lookup is pending;
# nothing happens.
conn.on_timeout()
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', True)
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
def test_two_families_immediate_failure(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'), (AF2, 'c'))
self.resolve_connect(AF1, 'b', False)
self.resolve_connect(AF2, 'c', True)
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
def test_two_families_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF2, 'c', True)
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
# resolving 'a' after the connection has completed doesn't start 'b'
self.resolve_connect(AF1, 'a', False)
self.assert_pending()
def test_success_after_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF1, 'a', True)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
# resolving 'c' after completion closes the connection.
self.resolve_connect(AF2, 'c', True)
self.assertTrue(self.streams.pop('c').closed)
def test_all_fail(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF2, 'c', False)
self.assert_pending((AF1, 'a'), (AF2, 'd'))
self.resolve_connect(AF2, 'd', False)
# one queue is now empty
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.assertFalse(future.done())
self.resolve_connect(AF1, 'b', False)
self.assertRaises(IOError, future.result)
|
py | 1a4204819b3a43bbc8f49f150e34f787260ee135 | import os
import paddle
import math
from paddle.optimizer.optimizer import Optimizer
from collections import defaultdict
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.framework import Variable
from paddle.fluid import layers
from paddle.fluid import unique_name
from paddle.fluid.framework import in_dygraph_mode, _dygraph_tracer
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph import base as imperative_base
import numpy as np
from paddle.utils.cpp_extension import get_build_directory
# op
build_dir = get_build_directory()
op_lib = os.path.join(build_dir, "ranger_op/ranger_op.so")
if op_lib is not None and os.path.isfile(op_lib):
# Maybe it has been loadad by `ext_utils.load`
paddle.utils.cpp_extension.load_op_meta_info_and_register_op(
op_lib)
class Ranger(Optimizer):
_moment1_acc_str = "moment1"
_moment2_acc_str = "moment2"
_beta1_pow_acc_str = "beta1_pow_acc"
_beta2_pow_acc_str = "beta2_pow_acc"
_slow_str = "slow"
#_inf_norm_acc_str = "inf_norm"
def __init__(self,
learning_rate=0.001,
alpha=0.5, k=6, # Look Ahead
beta1=0.95, beta2=0.999, epsilon=1e-5, weight_decay=0.0, N_sma_threshhold=5., # RAdam
use_gc=True,gc_conv_only=False, # gradient centralization
parameters=None,
name=None):
if not isinstance(beta1, Variable):
if not 0 <= beta1 < 1:
raise ValueError(
"Invaild value of beta1, expect beta1 in [0,1).")
if not isinstance(beta2, Variable):
if not 0 <= beta2 < 1:
raise ValueError(
"Invaild value of beta2, expect beta2 in [0,1).")
if not isinstance(epsilon, Variable):
if not 0 <= epsilon:
raise ValueError(
"Invaild value of epsilon, expect epsilon >= 0.")
assert (
0.0 <= alpha <= 1.0
), "alpha should be larger or equal to 0.0, and less or equal than 1.0"
assert (isinstance(k, int) and k > 0), "k should be a positive integer"
super(Ranger, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
weight_decay=None,
name=name)
self.type = "ranger"
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._weight_decay = weight_decay
self._N_sma_threshhold = N_sma_threshhold
self._N_sma_max = 2 / (1 - beta2) - 1 # ρ无穷
self.use_gc = use_gc
self.gc_gradient_threshold = 3 if gc_conv_only else 1
self.alpha = alpha
self.k = k
self.helper = LayerHelper(self.__class__.__name__)
#self._k_var = None
#self._global_step_var = None
#self._step_size_var = None
#self._sma_flag_var = None
self._global_step = 0
self._step_size = None
self._sma_flag = None
def _get_accumulator(self, name, param):
if self._name is not None:
name = self._name + "_" + name
if (name not in self._accumulators or
param.name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, param.name))
return self._accumulators[name][param.name]
def _add_moments_pows(self, p):
self._add_accumulator(self._moment1_acc_str, p)
self._add_accumulator(self._moment2_acc_str, p)
#self._add_accumulator(self._inf_norm_acc_str, p)
self._add_accumulator(
name=self._beta1_pow_acc_str,
param=p,
fill_value=self._beta1,
shape=[1])
self._add_accumulator(
name=self._beta2_pow_acc_str,
param=p,
fill_value=self._beta2,
shape=[1])
"""
def _increment_global_var(self):
# 如果不行的话,把加一放到c文件里面
if self._global_step_var is None:
self._global_step_var = layers.create_global_var(
name=unique_name.generate("lookhead_step"),
shape=[1],
value=0,
dtype='int32',
persistable=True
)
self.helper.append_op(
type='increment',
inputs={'X': [self._global_step_var]},
outputs={'Out':[self._global_step_var]},
attrs={'step': 1.0}
)
"""
def _cal_sma(self):
"""
beta2_pow = self._beta2**self._global_step_var
beta1_pow = self._beta1**self._global_step_var
N_sma = self._N_sma_max - 2. * self._global_step_var * beta2_pow / (1. - beta2_pow)
sma_flag = N_sma > self._N_sma_threshhold
if sma_flag:
step_size = paddle.sqrt( (1.-beta2_pow) * (N_sma-4.) / (self._N_sma_max-4.) * (N_sma-2.) / N_sma * self._N_sma_max /(self._N_sma_max-2.)) / (1.-beta1_pow)
else:
step_size = 1./(1. - beta1_pow)
if self._step_size_var is None:
self._step_size_var = layers.create_global_var(
name=unique_name.generate("radam_step_size"),
shape=[1],
value=step_size,
dtype='int32',
persistable=True
)
if self._sma_flag_var is None:
self._sma_flag_var = layers.create_global_var(
name=unique_name.generate("radam_sma_flag"),
shape=[1],
value=sma_flag,
dtype='bool',
persistable=True
)
paddle.assign(step_size, self._step_size_var)
paddle.assign(sma_flag, self._sma_flag_var)
"""
beta2_pow = self._beta2**self._global_step
beta1_pow = self._beta1**self._global_step
N_sma = self._N_sma_max - 2. * self._global_step * beta2_pow / (1. - beta2_pow)
sma_flag = N_sma > self._N_sma_threshhold
if sma_flag:
step_size = math.sqrt( (1.-beta2_pow) * (N_sma-4.) / (self._N_sma_max-4.) * (N_sma-2.) / N_sma * self._N_sma_max /(self._N_sma_max-2.)) / (1.-beta1_pow)
else:
step_size = 1./(1. - beta1_pow)
self._step_size = step_size
self._sma_flag = sma_flag
def _append_optimize_op(self, block, param_and_grad):
# gradient centralization对于grad,不是param
# GC operation for Conv layers and FC layers
# GC可以看作是具有受约束损失函数的投影梯度下降方法。受约束损失函数及其梯度的Lipschitzness更好,
# 因此训练过程变得更加有效和稳定。
g_tmp = param_and_grad[1]
if self.use_gc and g_tmp.dim() > self.gc_gradient_threshold:
#print("grad before gc:", g_tmp)
g_tmp = g_tmp - g_tmp.mean(axis=tuple(range(1, g_tmp.dim())), keepdim=True)
#print("grad after gc:",g_tmp)
"""
moment = self._get_accumulator(self._moment1_acc_str, param_and_grad[0])
inf_norm = self._get_accumulator(self._inf_norm_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
# create the adamax optimize op
adamax_op = block.append_op(
type="adamax",
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"LearningRate": self._create_param_lr(param_and_grad),
"Moment": moment,
"InfNorm": inf_norm,
"Beta1Pow": beta1_pow_acc
},
outputs={
"ParamOut": param_and_grad[0],
"MomentOut": moment,
"InfNormOut": inf_norm
},
attrs={
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon
},
stop_gradient=True)"""
# RAdam
assert isinstance(block, framework.Block)
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": g_tmp,
"LearningRate": self._create_param_lr(param_and_grad),
"Moment1": moment1,
"Moment2": moment2,
"Beta1Pow": beta1_pow_acc,
"Beta2Pow": beta2_pow_acc,
#"StepSize": [self._step_size_var],
#"SmaFlag": [self._sma_flag_var]
},
outputs={
"ParamOut": param_and_grad[0],
"Moment1Out": moment1,
"Moment2Out": moment2,
"Beta1PowOut": beta1_pow_acc,
"Beta2PowOut": beta2_pow_acc
},
attrs={
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon,
"weight_decay": self._weight_decay,
"step_size": self._step_size,
"sma_flag": self._sma_flag
},
stop_gradient=True)
#print("after radam, param:", param_and_grad[0])
#print("after radam, grad:", param_and_grad[1])
# Look ahead
"""
one_var = paddle.ones(shape=[1], dtype='int32', name='lookahead_ones')
zero_var = paddle.zeros(shape=[1], dtype='int32', name='lookhead_zeros')
k_var = layers.create_global_var(
name=unique_name.generate("lookahead_k"),
shape=[1],
value=self.k,
dtype='int32',
persistable=True
)
# paddle.mod代替? https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/mod_cn.html#mod
mod = paddle.mod(self._global_step_var, k_var)
cond_1 = paddle.equal(self._global_step_var, one_var) # global step是不是等于1
cond_1 = paddle.cast(cond_1, dtype='float32')
cond_2 = paddle.equal(mod, zero_var) # global step%k是不是等于0
cond_2 = paddle.cast(cond_2, dtype='float32')
slow_var = self._get_accumulator(self._slow_str, param_and_grad[0]) # 缓存的slow var
# 初始化slow_var
tmp_var = cond_1 * param_and_grad[0] + (1 - cond_1) * slow_var
paddle.assign(tmp_var, slow_var)
# 融合model param
tmp_var = self.alpha * param_and_grad[0] + (1.0 - self.alpha) * slow_var
tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * param_and_grad[0]
paddle.assign(tmp_var_1, param_and_grad[0])
tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * slow_var
paddle.assign(tmp_var_1, slow_var)
"""
# look ahead的if写法
mod = self._global_step % self.k
slow_var = self._get_accumulator(self._slow_str, param_and_grad[0]) # 缓存的slow var
if self._global_step == 1:
paddle.assign(param_and_grad[0], slow_var)
if mod == 0:
tmp_var = self.alpha * param_and_grad[0] + (1.0 - self.alpha) * slow_var
paddle.assign(tmp_var, slow_var)
paddle.assign(tmp_var, param_and_grad[0])
return None
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(self._slow_str, p)
self._add_moments_pows(p)
@imperative_base.no_grad
@framework.dygraph_only
def step(self):
# Look Ahead global_step+1
#self._increment_global_var()
self._global_step += 1
# RAdam 计算N_sma和step_size,然后对于op,传入N_sma>N_sma_threshold的bool值和step_size
self._cal_sma()
if not isinstance(self._parameter_list[0], dict):
params_grads = []
for param in self._parameter_list:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
if hasattr(grad_var, "_is_sparse") and grad_var._is_sparse(
) and self.regularization is not None:
raise RuntimeError(
"Ranger don't support weight_decay with sparse parameters, please set it to None."
)
params_grads.append((param, grad_var))
#print(params_grads[0])
#print(params_grads[1])
optimize_ops = self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
else:
# optimize parameters in groups
for param_group in self._param_groups:
params_grads = defaultdict(lambda: list())
for param in param_group['params']:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads['params'].append((param, grad_var))
params_grads.update(
{k: v
for k, v in param_group.items() if k != 'params'})
#print(params_grads[0])
#print(params_grads[1])
self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
|
py | 1a420485ec55c81382546c60f27568daf8d69d79 | # encoding: utf-8
# module _dbus_bindings
# from /usr/lib/python3/dist-packages/_dbus_bindings.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
Low-level Python bindings for libdbus. Don't use this module directly -
the public API is provided by the `dbus`, `dbus.service`, `dbus.mainloop`
and `dbus.mainloop.glib` modules, with a lower-level API provided by the
`dbus.lowlevel` module.
"""
# imports
import dbus.lowlevel as __dbus_lowlevel
from .list import list
class Array(list):
"""
An array of similar items, implemented as a subtype of list.
As currently implemented, an Array behaves just like a list, but
with the addition of a ``signature`` property set by the constructor;
conversion of its items to D-Bus types is only done when it's sent in
a Message. This might change in future so validation is done earlier.
Constructor::
dbus.Array([iterable][, signature][, variant_level])
``variant_level`` must be non-negative; the default is 0.
``signature`` is the D-Bus signature string for a single element of the
array, or None. If not None it must represent a single complete type, the
type of a single array item; the signature of the whole Array may be
obtained by prepending ``a`` to the given signature.
If None (the default), when the Array is sent over
D-Bus, the item signature will be guessed from the first element.
:IVariables:
`variant_level` : int
Indicates how many nested Variant containers this object
is contained in: if a message's wire format has a variant containing a
variant containing an array, this is represented in Python by an
Array with variant_level==2.
"""
def __init__(self, iterable=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
signature = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The D-Bus signature of each element of this Array (a Signature instance)"""
variant_level = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The number of nested variants wrapping the real data. 0 if not in a variant."""
|
py | 1a4206231d1adb8601519440d18724165a3b6698 | from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, balanced_accuracy_score
def get_commonscores(y_true, y_pred):
"""
Calculate the precision, recall, f1, accuracy, balanced_accuracy scores
Args:
y_true (pd.Series): y_true (with limited set of index)
y_pred (pd.Series): y_pred (with limited set of index)
Returns:
dict : scores calculated on intersection of index. Keys Precision, recall, f1, accuracy, balanced_accuracy
"""
commonindex = y_true.index.intersection(y_pred.index)
myscores = dict()
y2_true = y_true.loc[commonindex]
y2_pred = y_pred.loc[commonindex]
myscores['precision'] = precision_score(y_true=y2_true, y_pred=y2_pred)
myscores['recall'] = recall_score(y_true=y2_true, y_pred=y2_pred)
myscores['f1'] = f1_score(y_true=y2_true, y_pred=y2_pred)
myscores['accuracy'] = accuracy_score(y_true=y2_true, y_pred=y2_pred)
myscores['balanced_accuracy'] = balanced_accuracy_score(y_true=y2_true, y_pred=y2_pred)
return myscores
suffixexact = 'exact'
suffixtoken = 'token'
suffixfuzzy = 'simple'
name_freetext = 'FreeText'
name_exact = 'Exact'
name_pruning_threshold = 'threshold'
name_usescores = 'use_scores'
name_stop_words = 'stop_words'
navalue_score = 0 |
py | 1a42069bda0b044910512e09648a22040df65939 | # Generated by Django 3.2.4 on 2021-06-20 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mfa', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='mfakey',
name='last_code',
field=models.CharField(blank=True, max_length=32),
),
]
|
py | 1a4207a2debd38c7a37704688fde7bb10bd66067 | #build_plasticity model
from abaqus import *
from abaqusConstants import *
from caeModules import *
session.journalOptions.setValues(replayGeometry=COORDINATE, recoverGeometry=COORDINATE)
#roller diameter & width
t = 10 #thickness of blank
r1 = 2*t #roller1 diameter - 2t according to ISO 5173
r2 = 2*t #roller2&3 diameter, same as r1 (ISO 5173)
w = 25.0 # width of rollers
l = 160 #length of blank
d = 2*(3*t+2)#space between outer rollers, equal to die radius, must conform to a min. 4t+3, max. 5t (ISO 5173)
b_w = 20 #width of blank
i_clearance = 0
mesh_size = float(sys.argv[-1]) #mm
half_spacing1 = d/2 + r1
v_spacing1 = r1 + t/2 + i_clearance
half_spacing = d/2 + r2
v_spacing2 = r2 + t/2 + i_clearance
roller_positions = [(0,v_spacing1,0), (-half_spacing,-v_spacing2,0), (half_spacing,-v_spacing2,0)] #list of tuples corresponding to roller1,2 &3
##make parts
#make roller1
s1 = mdb.models['Model-1'].ConstrainedSketch(name='__profile__',
sheetSize=200.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.setPrimaryObject(option=STANDALONE)
s1.ConstructionLine(point1=(0.0, -100.0), point2=(0.0, 100.0))
s1.FixedConstraint(entity=g.findAt((0.0, 0.0)))
s1.Line(point1=(r1, w/2), point2=(r1, -w/2))
s1.VerticalConstraint(entity=g.findAt((r1, 0.0)), addUndoState=False)
p = mdb.models['Model-1'].Part(name='Roller1', dimensionality=THREE_D,
type=ANALYTIC_RIGID_SURFACE)
p = mdb.models['Model-1'].parts['Roller1']
p.AnalyticRigidSurfRevolve(sketch=s1)
s1.unsetPrimaryObject()
p = mdb.models['Model-1'].parts['Roller1']
p.ReferencePoint(point=(0.0, 0.0, 0.0))
session.viewports['Viewport: 1'].setValues(displayedObject=p)
del mdb.models['Model-1'].sketches['__profile__']
#make roller1
s1 = mdb.models['Model-1'].ConstrainedSketch(name='__profile__',
sheetSize=200.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.setPrimaryObject(option=STANDALONE)
s1.ConstructionLine(point1=(0.0, -100.0), point2=(0.0, 100.0))
s1.FixedConstraint(entity=g.findAt((0.0, 0.0)))
s1.Line(point1=(r2, w/2), point2=(r2, -w/2))
s1.VerticalConstraint(entity=g.findAt((r2, 0.0)), addUndoState=False)
p = mdb.models['Model-1'].Part(name='Roller23', dimensionality=THREE_D,
type=ANALYTIC_RIGID_SURFACE)
p = mdb.models['Model-1'].parts['Roller23']
p.AnalyticRigidSurfRevolve(sketch=s1)
s1.unsetPrimaryObject()
p = mdb.models['Model-1'].parts['Roller23']
p.ReferencePoint(point=(0.0, 0.0, 0.0))
session.viewports['Viewport: 1'].setValues(displayedObject=p)
del mdb.models['Model-1'].sketches['__profile__']
#make blank
s = mdb.models['Model-1'].ConstrainedSketch(name='__profile__',
sheetSize=200.0)
g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints
s.setPrimaryObject(option=STANDALONE)
s.rectangle(point1=(-b_w/2, t/2), point2=(b_w/2, -t/2))
p = mdb.models['Model-1'].Part(name='Blank', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p = mdb.models['Model-1'].parts['Blank']
p.BaseSolidExtrude(sketch=s, depth=l)
s.unsetPrimaryObject()
p = mdb.models['Model-1'].parts['Blank']
session.viewports['Viewport: 1'].setValues(displayedObject=p)
del mdb.models['Model-1'].sketches['__profile__']
#create roller instances & move to positions
a = mdb.models['Model-1'].rootAssembly
a.DatumCsysByDefault(CARTESIAN)
p = mdb.models['Model-1'].parts['Roller1']
a.Instance(name='Roller-1', part=p, dependent=OFF)
a.rotate(instanceList=('Roller-1', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(1, 0.0, 0.0), angle=90.0)
a.translate(instanceList=('Roller-1', ), vector=(0.0, v_spacing1, 0.0))
a = mdb.models['Model-1'].rootAssembly
p = mdb.models['Model-1'].parts['Roller23']
a.Instance(name='Roller-2', part=p, dependent=OFF)
a = mdb.models['Model-1'].rootAssembly
a.rotate(instanceList=('Roller-2', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(1.0, 0.0, 0.0), angle=90.0)
a.translate(instanceList=('Roller-2', ), vector=(-half_spacing, -v_spacing2, 0.0))
a = mdb.models['Model-1'].rootAssembly
p = mdb.models['Model-1'].parts['Roller23']
a.Instance(name='Roller-3', part=p, dependent=OFF)
a = mdb.models['Model-1'].rootAssembly
a.rotate(instanceList=('Roller-3', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(1.0, 0.0, 0.0), angle=90.0)
a.translate(instanceList=('Roller-3', ), vector=(half_spacing, -v_spacing2, 0.0))
#create blank instance and move to location
a = mdb.models['Model-1'].rootAssembly
p = mdb.models['Model-1'].parts['Blank']
a.Instance(name='Blank-1', part=p, dependent=OFF)
a = mdb.models['Model-1'].rootAssembly
a.rotate(instanceList=('Blank-1', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(0.0, 1.0, 0.0), angle=90.0)
a.translate(instanceList=('Blank-1', ), vector=(-l/2, 0.0, 0.0))
#create surfaces
a = mdb.models['Model-1'].rootAssembly
s1 = a.instances['Roller-1'].faces
roller_surf = roller_positions[0]
side1Faces1 = s1.findAt(((roller_positions[0][0], roller_positions[0][1]+r1, roller_positions[0][2]), ))
a.Surface(side1Faces=side1Faces1, name='R1_SURF')
a = mdb.models['Model-1'].rootAssembly
s1 = a.instances['Roller-2'].faces
side1Faces1 = s1.findAt(((roller_positions[1][0]+r2, roller_positions[1][1], roller_positions[1][2]), ))
a.Surface(side1Faces=side1Faces1, name='R2_SURF')
a = mdb.models['Model-1'].rootAssembly
s1 = a.instances['Roller-3'].faces
roller_surf = roller_positions[0]
side1Faces1 = s1.findAt(((roller_positions[2][0]+r2, roller_positions[2][1], roller_positions[2][2]), ))
a.Surface(side1Faces=side1Faces1, name='R3_SURF')
#upper side of the blank
a = mdb.models['Model-1'].rootAssembly
s1 = a.instances['Blank-1'].faces
side1Faces1 = s1.findAt(((0, t/2, 0), ))
a.Surface(side1Faces=side1Faces1, name='TOP')
#lower side of the blank
a = mdb.models['Model-1'].rootAssembly
s1 = a.instances['Blank-1'].faces
side1Faces1 = s1.findAt(((0, -t/2, 0), ))
a.Surface(side1Faces=side1Faces1, name='BOTTOM')
#make sets
a = mdb.models['Model-1'].rootAssembly
a.DatumPlaneByPrincipalPlane(principalPlane=XYPLANE, offset=0.0) #15
a = mdb.models['Model-1'].rootAssembly
a.DatumPlaneByPrincipalPlane(principalPlane=YZPLANE, offset=0.0) #16
#partition blank
a = mdb.models['Model-1'].rootAssembly
c = a.instances['Blank-1'].cells
pickedCells = c.findAt(((0, 0, 0), ))
d = a.datums
a.PartitionCellByDatumPlane(datumPlane=d[16], cells=pickedCells)
a = mdb.models['Model-1'].rootAssembly
c = a.instances['Blank-1'].cells
pickedCells = c.findAt(((0,0,2.5), ), ((0,
0, -2.5), ))
a.PartitionCellByDatumPlane(datumPlane=d[15], cells=pickedCells)
#create sets
a = mdb.models['Model-1'].rootAssembly
f = a.instances['Blank-1'].faces
faces1 = f.findAt(((-l/4, 0.0, 0.0), ), ((l/4, 0.0,
0.0), ))
a.Set(faces=faces1, name='xy_midplane')
faces2 = f.findAt(((0.0, 0.0, 2.5), ), ((0.0, 0.0,
-2.5), ))
a.Set(faces=faces2, name='yz_midplane')
c = a.instances['Blank-1'].cells
cells = c.findAt(((-l/4, 0.0, 2.5), ), ((-l/4, 0.0,
-2.5), ), ((l/4, 0.0, 2.5), ), ((l/4, 0.0, -2.5), ))
a.Set(cells=cells, name='all')
a = mdb.models['Model-1'].rootAssembly
#mesh controls/type
elemType = mesh.ElemType(elemCode=C3D8, elemLibrary=STANDARD,
secondOrderAccuracy=OFF, distortionControl=DEFAULT)
a.setElementType(regions=(cells, ), elemTypes=(elemType, elemType, elemType)) #because setElementType needs a tuple for elemTypes
#mesh size/seed
partInstances =(a.instances['Blank-1'], )
a.seedPartInstance(regions=partInstances, size=mesh_size, deviationFactor=0.1, minSizeFactor=0.1)
a.generateMesh(regions=partInstances)
session.viewports['Viewport: 1'].setValues(displayedObject=a)
mdb.models['Model-1'].setValues(noPartsInputFile=ON)
#write input
mdb.Job(name='U_plastic_mesh_only', model='Model-1', description='', type=ANALYSIS,
atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=90,
memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True,
explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF,
modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='',
scratch='', resultsFormat=ODB, parallelizationMethodExplicit=DOMAIN,
numDomains=1, activateLoadBalancing=False, multiprocessingMode=DEFAULT,
numCpus=1, numGPUs=0)
mdb.jobs['U_plastic_mesh_only'].writeInput(consistencyChecking=OFF)
print "Wrote mesh"
|
py | 1a4208750c29ec0d78fb889346796af8392e8a6d | from opytimizer.spaces import HyperComplexSpace
# Defines the number of agents, decision variables,
# and search space dimensions
n_agents = 2
n_variables = 5
n_dimensions = 4
# Creates the HyperComplexSpace
s = HyperComplexSpace(n_agents=n_agents, n_variables=n_variables, n_dimensions=n_dimensions)
# Prints out some properties
print(s.n_agents, s.n_variables, s.n_dimensions)
print(s.agents, s.best_agent)
print(s.best_agent.position)
|
py | 1a42090540a8f45b5a335afb3a7124991b15f9dd | #!/usr/bin/env python2
from Tkinter import *
import Tkinter as tk
import ttk
import tkFileDialog
import tkMessageBox
from tkFileDialog import askdirectory
import six
from pkg_resources import resource_stream
import os
from os import listdir
from os.path import isfile, join
from os import walk
from subprocess import Popen
from subprocess import PIPE
import keyword
import re
from multiprocessing import Process
import paramiko
from access_ssh import access_ssh
from method_dialog import method_dialog
from editor import EditorClass
from find_and_replace_dialog import find_and_replace_dialog
from remote_file_chooser import remote_file_chooser
from new_dialog import new_dialog
from new_folder_dialog import new_folder_dialog
from open_file_dialog import open_file_dialog
from change_color import change_color
from interface import Paramiko_Interface
from create_config import create_config
from run_script import run_script_python_2
from run_script import run_script_python_3
from about_dialog import about_dialog
from project_opener import project_opener
from project_manager import project_manager
class App:
def open_file(self, path):
if isfile(path):
if not path in self.tab_names:
pane = PanedWindow(self.n, orient=HORIZONTAL, opaqueresize=True)
ed = EditorClass(self.root, path, self)
pane.add(ed.frame)
self.n.add(pane, text=path)
self.n.grid(row=0, column=1, rowspan=40, sticky=N+S+E+W)
w = self.root.winfo_width()
h = self.root.winfo_height()
self.tab_names.append(path)
ed.text.config(insertbackground='white')
ed.text.config(background=self.background)
ed.text.config(foreground=self.foreground)
with open(path, 'r') as f_in:
text = f_in.read()
lines = text.split('\n')
for line in lines:
ed.text.insert(END, line+'\n')
ed.lnText.config(foreground=self.line_num_color)
ed.lnText.config(background=self.line_num_background_color)
ed.syntax_coloring(None)
self.eds.append(ed)
self.n.select(self.tab_names.index(path))
def change_ed_colors(self):
for ed in self.eds:
ed.text.config(insertbackground='white')
ed.text.config(background=self.background)
ed.text.config(foreground=self.foreground)
ed.text.tag_configure('Token.Keyword', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Constant', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Declaration', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Namespace', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Pseudo', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Reserved', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Type', foreground=self.token_keyword)
ed.text.tag_configure('Token.Name', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Attribute', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Builtin', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Builtin.Pseudo', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Class', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Constant', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Decorator', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Entity', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Exception', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Function', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Label', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Namespace', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Other', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Tag', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable.Class', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable.Global', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable.Instance', foreground=self.token_name)
ed.text.tag_configure('Token.Literal', foreground=self.token_literal)
ed.text.tag_configure('Token.Literal.Date', foreground=self.token_literal)
ed.text.tag_configure('Token.Literal.String', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Backtick', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Char', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Doc', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Double', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Escape', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Heredoc', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Interpol', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Other', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Regex', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Single', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Symbol', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.Number', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Bin', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Float', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Hex', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Integer', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Integer.Long', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Oct', foreground=self.token_number)
ed.text.tag_configure('Token.Operator', foreground=self.token_operators)
ed.text.tag_configure('Token.Operator.Word', foreground=self.token_operators)
ed.text.tag_configure('Token.Punctuation', foreground=self.token_punctuation)
ed.text.tag_configure('Token.Comment', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Hashbang', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Multiline', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Preproc', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Single', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Special', foreground=self.token_comments)
ed.text.tag_configure('Token.Generic', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Deleted', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Emph', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Error', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Heading', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Inserted', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Output', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Prompt', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Strong', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Subheading', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Traceback', foreground=self.token_generic)
self.tree.tag_configure('directory', background=self.background, foreground=self.dir_color)
self.tree.tag_configure('file', background=self.background, foreground=self.file_color)
self.menubar.config(background=self.file_bar_color)
self.root.configure(background=self.background)
self.menubar.config(background=self.file_bar_color, foreground=self.file_bar_text_color)
ttk.Style().configure("TNotebook", background=self.notebook_background)
def copy_click(self):
index = self.n.tabs().index(self.n.select())
self.eds[index].text.clipboard_clear()
text = self.eds[index].text.get(tk.SEL_FIRST, tk.SEL_LAST)
self.eds[index].text.clipboard_append(text)
def cut_click(self):
index = self.n.tabs().index(self.n.select())
self.copy_click()
self.eds[index].text.delete(tk.SEL_FIRST, tk.SEL_LAST)
def paste_click(self):
index = self.n.tabs().index(self.n.select())
text = self.eds[index].text.selection_get(selection='CLIPBOARD')
self.eds[index].text.insert('insert', text)
def recursive_find_nodes(self, rootDir, parent, indent):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
#self.files.append(path)
if os.name == 'posix':
if os.path.isdir(path):
parent.children.append(node(path[path.rfind('/'):], parent, indent, path, 'folder', self.top, self))
self.recursive_find(path, parent.children[len(parent.children) - 1], indent + 1)
else:
parent.children.append(node(path[path.rfind('/'):], parent, indent, path, 'file', self.top, self))
else:
if os.path.isdir(path):
parent.children.append(node(path[path.rfind('\\'):], parent, indent, path, 'folder', self.top, self))
self.recursive_find(path, parent.children[len(parent.children) - 1], indent + 1)
else:
parent.children.append(node(path[path.rfind('\\'):], parent, indent, path, 'file', self.top, self))
return parent
def recursive_print_nodes(self, parent):
for child in parent.children:
self.recursive_print(child)
print(('-' * parent.indent) + parent.name)
def list_nodes(self, path, tree, parent, full_path):
self.root = node(os.getcwd()[os.getcwd().rfind('\\'):], None, 0, os.getcwd(), 'folder', self.top, self)
self.root = self.recursive_find(os.getcwd(), self.root, 1)
self.recursive_print(self.root)
def draw_structure(self, parent, height):
parent.name_label.place(anchor=NW, x=parent.indent*10 + 20, y=height)
parent.image_label.place(anchor=NW, x=parent.indent*10, y=height)
self.name_labels.append(parent.name_label)
self.image_labels.append(parent.image_label)
if parent.display_children:
for child in parent.children:
height = height + 20
height = self.draw_structure(child, height)
return height
def recursive_find(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
self.files.append(path)
if os.path.isdir(path):
self.recursive_find(path)
def list_files(self, path, tree, parent, full_path):
self.files = [os.getcwd()]
self.recursive_find(os.getcwd())
counter = 0
for f in self.files:
if counter != 0:
if os.name == 'posix':
if(isfile(f)):
tree.insert(f[:f.rfind('/')], 0, f, text=f[f.rfind('/') + 1:], tags = ('file',))
else:
tree.insert(f[:f.rfind('/')], 0, f, text=f[f.rfind('/') + 1:], tags = ('directory',))
else:
if(isfile(f)):
tree.insert(f[:f.rfind('\\')], 0, f, text=f[f.rfind('\\') + 1:], tags = ('file',))
else:
tree.insert(f[:f.rfind('\\')], 0, f, text=f[f.rfind('\\') + 1:], tags = ('directory',))
else:
if os.name == 'posix':
tree.insert('', 3, f, text=f[f.rfind('/') + 1:], tags = ('directory',))
else:
tree.insert('', 3, f, text=f[f.rfind('\\') + 1:], tags = ('directory',))
counter = counter + 1
return tree
def on_double_click(self, event):
item = self.tree.selection()[0]
self.open_file(item)
def close_all_tabs(self):
val = tkMessageBox.askokcancel('Open New Folder', "This will close all current tabs, continue?")
if val:
for i in range(0, len(self.n.tabs())):
self.n.forget(0)
del(self.tab_names[0])
del(self.eds[0])
return val
def close_tab(self):
index = self.n.tabs().index(self.n.select())
self.n.forget(self.n.select())
del(self.tab_names[index])
del(self.eds[index])
def close_tab_event(self, event):
index = self.n.tabs().index(self.n.select())
self.n.forget(self.n.select())
del(self.tab_names[index])
del(self.eds[index])
def open_click(self):
of = open_file_dialog(self.root, self, os.getcwd().replace('\\', '/'))
def save_click(self):
path = self.n.tab(self.n.select())['text']
index = self.n.tabs().index(self.n.select())
with open(path, 'w') as f_out:
f_out.write(self.eds[index].text.get("1.0",END))
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd().replace('\\', '/'), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(path,path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('Could not push for some reason')
def save_type(self, event):
path = self.n.tab(self.n.select())['text']
index = self.n.tabs().index(self.n.select())
with open(path, 'w') as f_out:
f_out.write(self.eds[index].text.get("1.0",END))
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(path,path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('Could not push for some reason')
def exit_click(self):
sys.exit()
def keyPressed(self, event):
print("--")
if event.keysym == 's':
self.save_click
def open_folder_click(self):
val = self.close_all_tabs()
if val:
folder = askdirectory().replace('\\', '/')
os.chdir(folder)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
self.folder = folder
self.lines[19] = self.lines[19][:self.lines[19].find('=')+1]+self.folder
self.write_config()
self.editing_pi = False
def open_folder(self, folder):
val = self.close_all_tabs()
if val:
os.chdir(folder)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
self.folder = folder
self.lines[19] = self.lines[19][:self.lines[19].find('=')+1]+self.folder
self.write_config()
self.editing_pi = False
def find_text_dialog(self):
temp = find_and_replace_dialog(self.root, self)
self.root.wait_window(temp.top)
def find(self, f):
index = self.n.tabs().index(self.n.select())
ed = self.eds[index]
ed.highlight_pattern(f, "highlight")
def find_one(self, f):
index = self.n.tabs().index(self.n.select())
ed = self.eds[index]
text = ed.text.get("1.0",END)
count = text.count(f)
if self.find_counter >= count:
self.find_counter = 0
ed.highlight_one(f, "highlight", self.find_counter)
self.find_counter = self.find_counter + 1
def replace(self, f, r):
index = self.n.tabs().index(self.n.select())
text = self.eds[index].text.get("1.0",END)
self.eds[index].text.delete("1.0",END)
text = text.replace(f, r, 1)
self.eds[index].text.insert(END, text[:-1])
def replace_all(self, f, r):
index = self.n.tabs().index(self.n.select())
text = self.eds[index].text.get("1.0",END)
self.eds[index].text.delete("1.0",END)
text = text.replace(f, r)
self.eds[index].text.insert(END, text[:-1])
def undo_command(self):
index = self.n.tabs().index(self.n.select())
self.eds[index].undo(None)
def redo_command(self):
index = self.n.tabs().index(self.n.select())
self.eds[index].redo(None)
def reset_counters(self):
self.find_counter = 0
def find_type(self, event):
path = self.n.tab(self.n.select())['text']
self.find_text_dialog()
def tree_rename(self):
item = self.tree.selection()[0]
path = item
found = True
if found:
args = ['python2', self.meringue_path + '/' + 'rename.py', 'test']
p = Popen(args, stdin=PIPE, stdout=PIPE, shell=False)
p.wait()
out = p.stdout.read().replace('\n', '')
if not out == '!!DO NOT RENAME!!':
i = path.replace('\\', '/').rfind('/')
try:
if i != -1:
os.rename(path, path[:path.rfind('/')]+'/'+out)
else:
os.rename(path, out)
except:
print('file does not exist, not renaming anything but the tab')
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd().replace('\\', '/'), open=True)
if self.editing_pi:
new_name = path[:path.rfind('/')]+'/'+out
new_name = new_name[new_name.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.rename(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):], new_name)
except:
print('not a file')
try:
sftp.rmdir(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):], new_name)
except:
print('not a directory')
def delete(self):
if tkMessageBox.askyesno("Delete", "Delte this file or folder?"):
item = self.tree.selection()[0]
try:
os.remove(item)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.remove(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('not a file')
except:
print('Not a file')
try:
self.delete_file(item)
except:
print('Not a directory')
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
def delete_file(self, path):
dirs = [f for f in listdir(path) if not isfile(join(path, f))]
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
os.remove(path+'/'+f)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.remove(path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+f)
except:
print('not a file')
for d in dirs:
self.delete_file(path+'/'+d)
os.rmdir(path)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.rmdir(path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+d)
except:
print('not a directory')
def show_menu(self, event):
self.directory_menu.post(event.x_root, event.y_root)
def on_right_click(self, event):
if len(self.tree.selection()) > 0:
self.selected_file_dir = self.tree.selection()[0]
self.show_menu(event)
def save_project(self):
with open(self.meringue_path + '/data/projects.txt', 'w') as f_out:
f_out.write('TEMP;{}'.format(self.folder))
def tab_rename(self, event):
path = self.n.tab(self.n.select())['text']
if os.name == 'nt':
print(self.meringue_path)
args = ['python', self.meringue_path + 'rename.py', path[path.rfind('\\')+1:]]
else:
args = ['python2', self.meringue_path + 'rename.py', path[path.rfind('/')+1:]]
p = Popen(args, stdin=PIPE, stdout=PIPE, shell=False)
p.wait()
out = p.stdout.read().replace('\n', '')
if not out == '!!DO NOT RENAME!!':
self.n.tab(self.n.select(), text=out)
def end_find(self, event):
for ed in self.eds:
ed.remove_highlight(None)
def function_dialog(self, event):
dialog = method_dialog(self.root, self)
def ssh(self, event=None):
dialog = access_ssh(self.root, self)
def open_terminal(self):
if sys.platform == "linux" or sys.platform == "linux2":
os.system('gnome-terminal')
if sys.platform == 'darwin':
os.system('open Terminal')
if sys.platform == 'win32':
os.system('start cmd')
def open_project(self):
project_opener(self.root, self)
def manage_projects(self):
project_manager(self.root, self)
def start(self, noOfEditors, noOfLines):
'''
scroll_style = ttk.Style()
scroll_style.element_create("My.Scrollbar.trough", "from", "default")
scroll_style.element_create("My.Scrollbar.bg", "from", "default")
scroll_style.element_create("My.Scrollbar.activebackground", "from", "default")
# Redefine the horizontal scrollbar layout to use the custom trough.
# This one is appropriate for the 'vista' theme.
scroll_style.layout("My.TScrollbar",
[('My.Scrollbar.trough', {'children':
[('Horizontal.Scrollbar.leftarrow', {'side': 'left', 'sticky': ''}),
('Horizontal.Scrollbar.rightarrow', {'side': 'right', 'sticky': ''}),
('Horizontal.Scrollbar.thumb', {'unit': '1', 'children':
[('Horizontal.Scrollbar.grip', {'sticky': ''})],
'sticky': 'nswe'})],
'sticky': 'we'})])
# Copy original style configuration and add our new custom configuration option.
scroll_style.configure("My.TScrollbar", *scroll_style.configure("Horizontal.TScrollbar"))
scroll_style.configure("My.TScrollbar", troughcolor="black")
'''
#s.configure('Tab_Style', background='cyan')
try:
self.read_config()
except:
create_config(self.meringue_path)
self.read_config()
'''
self.pane = PanedWindow(self.n, orient=HORIZONTAL, opaqueresize=True)
ed = EditorClass(self.root, 'untitled')
ed.text.config(insertbackground='white')
ed.text.config(background=self.background)
ed.text.config(foreground=self.foreground)
#ed.vScrollbar.config(style="My.TScrollbar")
ed.text.tag_configure("highlight", background=self.highlight_background, foreground=self.highlight_foreground)
ed.text.tag_configure("keyword", foreground=self.highlight_keyword)
ed.text.tag_configure("function_name", foreground=self.highlight_function_name)
ed.text.tag_configure("function", foreground=self.highlight_function)
ed.text.tag_configure("boolean", foreground=self.highlight_boolean)
ed.text.tag_configure("string", foreground=self.highlight_string)
ed.text.tag_configure("number", foreground=self.highlight_number)
ed.text.tag_configure("operator", foreground=self.highlight_operator)
#ed.text.tag_configure('normal', foreground=self.highlight_normal)
ed.text.tag_configure('comment', foreground=self.highlight_comment)
ed.lnText.config(foreground=self.line_num_color)
ed.lnText.config(background=self.line_num_background_color)
self.pane.add(ed.frame)
self.eds.append(ed)
'''
ttk.Style().configure('TFrame', fieldbackground=self.background, background=self.background)
self.tree_frame = Frame(self.root, bg=self.background, width=200, height=10000)
#ttk.Style().configure('TFrame', fieldbackground=self.background, background=self.background)
#self.tree_frame = Frame(self.root, bg=self.background, width=200, height=10000)
self.bg_frame = Frame(self.tree_frame, width=200, height=10000, bg=self.background)
#self.display_frame = Frame(self.root, width=150, height=10000, bg=self.background)
self.tree = ttk.Treeview(self.tree_frame)
#self.tree["columns"]=("Files_and_Folders")
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if os.name != 'nt':
self.tree.tag_configure('directory', background=self.background, foreground=self.dir_color)
self.tree.tag_configure('file', background=self.background, foreground=self.file_color)
ttk.Style().configure("Treeview", fieldbackground=self.background, background=self.background)
self.treeScroll = ttk.Scrollbar(self.tree_frame, orient=VERTICAL)
self.treeScroll.configure(command=self.tree.yview)
self.treeScroll.grid(row=0, column=1, rowspan=40, sticky=N+S)
self.tree.configure(xscrollcommand=self.treeScroll.set)
self.tree.bind("<3>", self.on_right_click)
self.tree.bind("<2>", self.on_right_click)
self.tree.bind("<Double-1>", self.on_double_click)
self.tree.grid(row=0, column=0, rowspan=40, sticky=N+S)
self.tree_frame.grid(row=0, column=0, rowspan=40, sticky=N+S)
#self.display_frame.pack(side=RIGHT, fill=Y, expand=0)
#self.pane.pack(fill='both', expand=1)
#self.n.add(self.pane, text='untitled')
self.n.bind("<Double-1>", self.tab_rename)
self.n.bind('<3>', self.close_tab_event)
self.n.bind('<2>', self.close_tab_event)
#self.n.bind("<1>", self.reset_display_text)
self.n.grid(row=0, column=1, rowspan=40, columnspan=60, sticky=N+S+E+W)
ttk.Style().configure("TNotebook", background=self.notebook_background)
#ttk.Style().configure("TPanedwindow", background=self.pane_color, foreground=self.notebook_foreground)
#self.tab_names.append('untitled')
filemenu = Menu(self.menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.open_click)
filemenu.add_command(label="Open Folder", command=self.open_folder_click)
filemenu.add_command(label="Save", command=self.save_click)
filemenu.add_command(label="Close Tab", command=self.close_tab)
filemenu.add_separator()
filemenu.add_command(label='Open Project', command = self.open_project)
filemenu.add_command(label='Save Project', command = self.save_project)
filemenu.add_command(label='Manage Projects', command = self.manage_projects)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.exit_click)
self.menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(self.menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.undo_command)
editmenu.add_command(label="Redo", command=self.redo_command)
editmenu.add_separator()
editmenu.add_command(label="Change Editor Colors", command=self.color_config)
self.menubar.add_cascade(label="Edit", menu=editmenu)
viewmenu = Menu(self.menubar, tearoff=0)
viewmenu.add_command(label="Toggle Menubar", command=self.hide_show_menubar_command)
viewmenu.add_command(label="Toggle File Explorer", command=self.hide_show_tree_command)
self.menubar.add_cascade(label="View", menu=viewmenu)
#optionsmenu = Menu(self.menubar, tearoff=0)
#optionsmenu.add_command(label="Change Colors", command=self.color_config)
#self.menubar.add_cascade(label="Options", menu=optionsmenu)
helpmenu = Menu(self.menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.open_about)
self.menubar.add_cascade(label="Help", menu=helpmenu)
#self.menubar.add_command(label="Close Tab", command=self.close_tab)
terminalmenu = Menu(self.menubar, tearoff=0)
terminalmenu.add_command(label="Local Terminal", command=self.open_terminal)
terminalmenu.add_command(label="Remote Terminal", command=self.open_remote_terminal)
self.menubar.add_cascade(label="Open Terminal", menu=terminalmenu)
remotemenu = Menu(self.menubar, tearoff=0)
remotemenu.add_command(label='Connect to Remote', command=self.ssh)
#remotemenu.add_command(label='Edit Directory', command=self.remote_folder_choose)
remotemenu.add_command(label="Open Explorer", command=self.paramiko_interface_open)
self.menubar.add_cascade(label="Remote Actions", menu=remotemenu)
#runmenu = Menu(self.menubar, tearoff=0)
runmenu = Menu(self.menubar, tearoff=0)
runmenu.add_command(label='Python 2', command=self.run_file_python_2)
runmenu.add_command(label='Python 3', command=self.run_file_python_3)
self.menubar.add_cascade(label="Run File", menu=runmenu)
#self.menubar.add_command(label="Open Terminal", command=self.open_terminal)
self.menubar.config(background=self.file_bar_color, foreground=self.file_bar_text_color)
self.root.configure(background=self.background)
self.root.title("meringue")
self.root.bind('<Control-s>', self.save_type)
self.root.bind('<Control-f>', self.find_type)
#self.root.bind('<Control-Shift-p>', self.git_commands)
self.root.bind('<Escape>', self.end_find)
#self.root.bind('<Control-r>', self.function_dialog)
self.root.bind('<Control-h>', self.ssh)
self.root.bind('<Alt_R>', self.hide_show_menubar);
self.root.bind('<Control-e>', self.hide_show_tree);
#self.root.bind("<Configure>", self.configure)
self.root['bg'] = 'black'
self.root.geometry('{}x{}'.format(600, 400))
self.root.config(menu=self.menubar)
if os.name == 'nt':
ttk.Style().theme_use('default')
for x in range(60):
Grid.columnconfigure(self.n, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.n, y, weight=1)
for x in range(60):
Grid.columnconfigure(self.tree, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.tree, y, weight=1)
for x in range(60):
Grid.columnconfigure(self.tree_frame, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.tree_frame, y, weight=1)
for x in range(60):
Grid.columnconfigure(self.root, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.root, y, weight=1)
self.hide_menubar = True;
self.hide_tree = True;
self.emptyMenu = Menu(self.root)
def open_about(self):
about_dialog(self)
def hide_show_menubar(self, event):
if self.hide_menubar:
self.root.config(menu=self.emptyMenu)
self.hide_menubar = False;
else:
self.root.config(menu=self.menubar)
self.hide_menubar = True
def hide_show_tree(self, event):
if self.hide_tree:
self.tree_frame.grid_forget()
self.hide_tree = False
else:
self.tree_frame.grid(row=0, column=0, rowspan=40, sticky=N+S)
self.hide_tree = True
def hide_show_menubar_command(self):
self.hide_show_menubar(None)
def hide_show_tree_command(self):
self.hide_show_tree(None)
def run_file_python_2(self):
index = self.n.tabs().index(self.n.select())
print(self.tab_names[index])
run_script_python_2(self.tab_names[index], self.root)
def run_file_python_3(self):
index = self.n.tabs().index(self.n.select())
print(self.tab_names[index])
run_script_python_3(self.tab_names[index], self.root)
def paramiko_interface_open(self):
Paramiko_Interface(self, self.username, self.password, self.ip, self.port)
def remote_folder_choose(self):
#We're going to store the directory tree here
self.remote_tree_array = []
#Let's ssh into the remote machine
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.ip, username=self.username, password=self.password, port=int(self.port))
#Capture the directory output
print('Running and capturing directories')
tkMessageBox.showwarning("SSH Connect", "Pulling the directory structure -- please wait")
stdin, stdout, stderr = ssh.exec_command('tree -f -i -l -d')
stdin.close()
#Extract the name of all of the directories from the tree and store them
for line in stdout.read().splitlines():
if ' -> ' in line:
self.remote_tree_array.append(line[:line.find(' -> ')])
else:
self.remote_tree_array.append(line)
#Elimiate the top directory as it is not needed
self.remote_tree_array = self.remote_tree_array[:-1]
#Go to letting the user select the directory that they want
rfc = remote_file_chooser(self, self, self.username, self.ip, self.password, ssh, int(self.port))
except:
#If something failed throw an error message
tkMessageBox.showwarning("SSH Connect", "Something failed -- Please try again")
ssh.close()
def open_remote_terminal(self):
self.current_directory = '.'
if sys.platform == "win32":
try:
#os.system('start python ' + self.meringue_path + 'paramiko_terminal.py "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
os.system('start python "' + self.meringue_path + 'paramiko_terminal.py" "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
except:
# try:
# os.system('start python2 paramiko_terminal.py {} {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
# except:
pass
if sys.platform == "darwin":
#try:
# os.system('open python paramiko_terminal.py {} {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
#except:
try:
os.system('open python2 "' + self.meringue_path + 'paramiko_terminal.py" "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
except:
pass
if sys.platform == "linux" or sys.platform == "linux2":
#try:
# os.system('xterm -hold -e python paramiko_terminal.py {} {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
#except:
try:
os.system('xterm -e python2 "' + self.meringue_path + 'paramiko_terminal.py" "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
except:
pass
def copy_file(self):
if len(self.tree.selection()) > 0:
item = self.tree.selection()[0]
self.copy_path = item
def paste_file(self):
if self.copy_path != '':
if len(self.tree.selection()) > 0:
item = self.tree.selection()[0]
dirs = [item+'/'+f for f in listdir(item) if not isfile(join(item, f))]
files = [item+'/'+f for f in listdir(item) if isfile(join(item, f))]
if not isfile(item):
f_name = self.copy_path[self.copy_path.rfind('/')+1:]
write_path = item+'/'+f_name
if isfile(self.copy_path):
counter = 1
temp_write_path = write_path
while temp_write_path in files:
temp_write_path = write_path+'.'+str(counter)
counter = counter + 1
write_path = temp_write_path
with open(write_path, 'w') as f_out:
with open(self.copy_path, 'r') as f_in:
text = f_in.read()
f_out.write(text)
else:
counter = 1
temp_write_path = write_path
while temp_write_path in dirs:
temp_write_path = write_path+'.'+str(counter)
counter = counter + 1
write_path = temp_write_path
self.recursive_paste(write_path)
copy_path = ''
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(write_path, write_path[write_path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('not a file')
self.recursive_paste_sftp(write_path, sftp)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
def recursive_paste(self, path):
os.mkdir(path)
dirs = [f for f in listdir(self.copy_path) if not isfile(join(self.copy_path, f))]
files = [f for f in listdir(self.copy_path) if isfile(join(self.copy_path, f))]
for f in files:
with open(path+'/'+f, 'w') as f_out:
with open(self.copy_path+'/'+f, 'r') as f_in:
text = f_in.read()
f_out.write(text)
for d in dirs:
self.recursive_paste(path+'/'+d)
def recursive_paste_sftp(self, path, sftp):
sftp.mkdir(path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
dirs = [path+'/'+f for f in listdir(path) if not isfile(join(path, f))]
files = [path+'/'+f for f in listdir(path) if isfile(join(path, f))]
print(dirs)
print(files)
for f in files:
sftp.put(f, f[f.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
for d in dirs:
self.recursive_paste(d, sftp)
def read_config(self):
with open(self.meringue_path + '/data/meringue_config.ini', 'r') as f_in:
self.lines = f_in.read().split('\n')
self.foreground = self.lines[0].split('=')[1]
self.foreground = self.foreground[:7]
self.background = self.lines[1].split('=')[1]
self.background = self.background[:7]
self.file_color = self.lines[2].split('=')[1]
self.file_color = self.file_color[:7]
self.dir_color = self.lines[3].split('=')[1]
self.dir_color = self.dir_color[:7]
self.line_num_color = self.lines[4].split('=')[1]
self.line_num_color = self.line_num_color[:7]
self.line_num_background_color = self.lines[5].split('=')[1]
self.line_num_background_color = self.line_num_background_color[:7]
self.file_bar_color = self.lines[6].split('=')[1]
self.file_bar_color = self.file_bar_color[:7]
self.file_bar_text_color = self.lines[7].split('=')[1]
self.file_bar_text_color = self.file_bar_text_color[:7]
self.notebook_background = self.lines[8].split('=')[1]
self.notebook_background = self.notebook_background[:7]
self.highlight_foreground = self.lines[9].split('=')[1]
self.highlight_foreground = self.highlight_foreground[:7]
self.highlight_background = self.lines[10].split('=')[1]
self.highlight_background = self.highlight_background[:7]
self.token_keyword = self.lines[11].split('=')[1]
self.token_keyword = self.token_keyword[:7]
self.token_name = self.lines[12].split('=')[1]
self.token_name = self.token_name[:7]
self.token_literal = self.lines[13].split('=')[1]
self.token_literal = self.token_literal[:7]
self.token_string = self.lines[14].split('=')[1]
self.token_string = self.token_string[:7]
self.token_number = self.lines[15].split('=')[1]
self.token_number = self.token_number[:7]
self.token_operators = self.lines[16].split('=')[1]
self.token_operators = self.token_operators[:7]
self.token_punctuation = self.lines[17].split('=')[1]
self.token_punctuation = self.token_punctuation[:7]
self.token_comments = self.lines[18].split('=')[1]
self.token_comments = self.token_comments[:7]
self.token_generic = self.lines[19].split('=')[1]
self.token_generic = self.token_generic[:7]
self.folder = self.lines[20].split('=')[1]
if not self.folder:
self.folder = askdirectory()
self.lines[20] = self.lines[20][:self.lines[20].find('=')+1]+self.folder
self.write_config()
try:
os.chdir(self.folder)
except:
self.folder = askdirectory()
self.lines[20] = self.lines[20][:self.lines[20].find('=')+1]+self.folder
self.write_config()
os.chdir(self.folder)
def new_file(self):
nd = new_dialog(self.root, self)
def new_file_func(self, name):
item = self.tree.selection()[0]
if not isfile(item):
with open(item+'/'+name, 'w') as f_out:
f_out.write('')
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(item+'/'+name,item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+name)
except:
print('Could not push for some reason')
else:
tkMessageBox.showwarning("File Creation", "Please select the parent folder for the new file and then try creating it again")
def new_folder(self):
nfd = new_folder_dialog(self.root, self)
def new_folder_func(self, name):
item = self.tree.selection()[0]
if not isfile(item):
#with open(item+'/'+name, 'w') as f_out:
# f_out.write('')
os.mkdir(item+'/'+name)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.mkdir(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+name)
except:
print('Could not push for some reason')
else:
tkMessageBox.showwarning("File Creation", "Please select the parent folder for the new file and then try creating it again")
def color_config(self):
cc = change_color(self.root, self)
def make_directory_menu(self, w):
self.directory_menu = Menu(self.root, tearoff=0)
self.directory_menu.add_command(label="Delete", command=self.delete)
self.directory_menu.add_command(label="Rename", command=self.tree_rename)
self.directory_menu.add_command(label="Copy", command=self.copy_file)
self.directory_menu.add_command(label="Paste", command=self.paste_file)
self.directory_menu.add_command(label='New File', command=self.new_file)
self.directory_menu.add_command(label='New Folder', command=self.new_folder)
#self.directory.menu.add_command(label='Copy', command=self.copy_item)
#self.directory.menu.add_command(label='Paste', command=self.paste_item)
def write_config(self):
print('writing')
with open(self.meringue_path + '/data/meringue_config.ini', 'w') as f_out:
for line in self.lines:
f_out.write(line + '\n')
f_out.flush()
def __init__(self):
self.meringue_path = os.path.realpath(__file__)
if os.name == 'nt':
self.meringue_path = self.meringue_path[:self.meringue_path.rfind('\\') + 1]
else:
self.meringue_path = self.meringue_path[:self.meringue_path.rfind('/') + 1]
print(self.meringue_path)
sys.stdout.flush()
#os.chdir(os.path.join(os.path.expanduser('~'), 'Documents'))
self.root = Tk()
img = PhotoImage(file=self.meringue_path + 'icon.gif')
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
#self.root.iconbitmap(self.meringue_path + '/' + 'icon.gif')
self.eds = []
self.n = ttk.Notebook(self.root)
self.menubar = Menu(self.root)
self.tab_names = []
self.find_string = ''
self.find_counter = 0
self.copy_path = ''
self.selected_file_dir = ''
self.tree_array = []
self.remote_tree_array = []
self.remote_tree_file_array = []
self.editing_pi = False
self.username = ''
self.password = ''
self.ip = ''
self.port = 22
self.new_file_or_folder_name = ''
self.folder = ''
self.highligh_foreground = ''
self.highlight_background = ''
self.highlight_keyword = ''
self.highlight_function_name = ''
self.highlight_function = ''
self.highlight_boolean = ''
self.highlight_string = ''
self.highlight_number = ''
self.highlight_operator = ''
#self.highlight_normal = ''
self.foreground = ''
self.background = ''
self.start(1, 9999)
self.make_directory_menu(self.root)
self.jump_counter = 0
self.find_counter = 0
try:
if os.name == 'posix':
os.makedirs(self.meringue_path+'local')
else:
os.makedirs(self.meringue_path.replace('\\', '/')+'local')
except:
pass
if os.name == 'posix':
self.recursive_delete(self.meringue_path+'local')
else:
self.recursive_delete(self.meringue_path.replace('\\', '/')+'local')
self.sftp_stem = ''
mainloop()
def recursive_delete(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.recursive_delete(path)
else:
try:
os.remove(path)
except:
pass
try:
os.rmdir(path)
except:
pass
def __main__(self):
App()
def main():
App()
if __name__ == '__main__':
main()
|
py | 1a4209ab3e6e140e569316c95d852741c5bdab4d | import random
import httpx
from utils.log import logger
'''
api返回格式为
字段名 数据类型 说明
pid int 作品 pid
p int 作品所在页
uid int 作者 uid
title string 作品标题
author string 作者名(入库时,并过滤掉 @ 及其后内容)
r18 boolean 是否 R18(在库中的分类,不等同于作品本身的 R18 标识)
width int 原图宽度 px
height int 原图高度 px
tags string[] 作品标签,包含标签的中文翻译(有的话)
ext string 图片扩展名
uploadDate number 作品上传日期;时间戳,单位为毫秒
urls object 包含了所有指定size的图片地址
'''
_url = "https://api.lolicon.app/setu/v2"
async def fetch_lolicon_random_img():
"""
从lolicon接口获取一张随机色图,并按照规范输出
"""
j = httpx.get(_url).json()
error = j['error']
logger.info(f'请求结果: {j}')
if len(error) > 0:
raise Exception(f'接口异常: {error}')
# 返回图片列表的随机一张图
data = j['data']
data_len = len(data)
if data_len == 0:
raise Exception(f'返回数据为空')
# 随机获取一张图片对象
random_idx = random.randint(0, data_len - 1)
logger.info(f'随机位置: {random_idx}\n图片列表: {data}')
item = data[random_idx]
return item['title'], item["author"], item["urls"]["original"] |
py | 1a420a6ecebaf7f88797251c4ed354b4561e375f | import random
import string
from django.db import transaction
from django.db.models import fields
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from vbb_backend.users.models import Teacher, User, UserTypeEnum
def random_char(y):
return "".join(random.choice(string.ascii_letters) for x in range(y))
class TeacherUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
"first_name",
"last_name",
"date_of_birth",
"time_zone",
"initials",
"personal_email",
"phone",
"city",
"notes",
)
def validate(self, attrs):
attrs["user_type"] = UserTypeEnum.TEACHER.value
return super().validate(attrs)
class TeacherSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(source="external_id", read_only=True)
user = TeacherUserSerializer(required=True)
class Meta:
model = Teacher
exclude = ("deleted", "external_id")
def validate(self, attrs):
user = attrs["user"]
with transaction.atomic():
if self.instance:
user_obj = self.instance.user
user = TeacherUserSerializer(user_obj, data=user)
user.is_valid(raise_exception=True)
instance = user.save()
attrs["user"] = instance
else:
user = TeacherUserSerializer(data=user)
user.is_valid(raise_exception=True)
instance = user.save(email=random_char(20) + "@vbb.com")
attrs["user"] = instance
return super().validate(attrs) |
py | 1a420afc56f54b6cb308c6fe3b32b536a3b384f3 | import math, logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..tokenization import WordTokenizer
class GRUEncoder(nn.Module):
def __init__(self,
token2id,
max_length=128,
hidden_size=230,
word_size=50,
blank_padding=True,
word2vec=None,
bidirectional=True,
dropout=0,
activation_function=F.tanh,
mask_entity=False):
"""
Args:
token2id: dictionary of token->idx mapping
max_length: max length of sentence
hidden_size: hidden size
word_size: size of word embedding
blank_padding: padding for RNN
word2vec: pretrained word2vec numpy
bidirectional: if it is a bidirectional RNN
activation_function: the activation function of RNN, tanh/relu
"""
# Hyperparameters
super(GRUEncoder, self).__init__()
self.token2id = token2id
self.max_length = max_length + 4 # 4 == take into account PIs
self.num_token = len(token2id)
self.num_position = max_length * 2
self.bidirectional = bidirectional
self.mask_entity = mask_entity
if word2vec is None:
self.word_size = word_size
else:
self.word_size = word2vec.shape[-1]
self.hidden_size = hidden_size
self.input_size = word_size
self.blank_padding = blank_padding
# Position Indicators (PI)
if not '<head>' in self.token2id:
self.token2id['<head>'] = len(self.token2id)
self.num_token += 1
if not '</head>' in self.token2id:
self.token2id['</head>'] = len(self.token2id)
self.num_token += 1
if not '<tail>' in self.token2id:
self.token2id['<tail>'] = len(self.token2id)
self.num_token += 1
if not '</tail>' in self.token2id:
self.token2id['</tail>'] = len(self.token2id)
self.num_token += 1
# add [UNK] and [PAD] tokens
if not '[UNK]' in self.token2id:
self.token2id['[UNK]'] = len(self.token2id)
self.num_token += 1
if not '[PAD]' in self.token2id:
self.token2id['[PAD]'] = len(self.token2id)
self.num_token += 1
# Word embedding
self.word_embedding = nn.Embedding(self.num_token, self.word_size)
if word2vec is not None:
logging.info("Initializing word embedding with word2vec.")
word2vec = torch.from_numpy(word2vec)
if self.num_token == len(word2vec) + 6: # 6 == <head>, </head>, <tail>, </tail>, [UNK], [PAD]
hsp = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
hep = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
tsp = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
tep = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
unk = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
pad = torch.zeros(1, self.word_size)
self.word_embedding.weight.data.copy_(torch.cat([word2vec, hsp, hep, tsp, tep, unk, pad], 0))
else:
self.word_embedding.weight.data.copy_(word2vec)
self.tokenizer = WordTokenizer(vocab=self.token2id, unk_token="[UNK]")
self.drop = nn.Dropout(dropout)
self.act = activation_function
self.gru_fw = nn.GRU(self.input_size, self.hidden_size, batch_first=True)
if self.bidirectional:
self.gru_bw = nn.GRU(self.input_size, self.hidden_size, batch_first=True)
self.pool = nn.MaxPool1d(self.max_length)
def forward(self, token):
"""
Args:
token: (B, L), index of tokens
Return:
(B, H), representations for sentences
"""
# Check size of tensors
if len(token.size()) != 2:
raise Exception("Size of token should be (B, L)")
# Get non padding mask and sentence lengths (B,)
non_pad_mask, length = self.non_padding_mask(token)
x = self.word_embedding(token) # (B, L, EMBED)
out_fw, _ = self.gru_fw(x)
out = non_pad_mask * out_fw
if self.bidirectional:
x_bw = self.reverse_padded_sequence(x, length, batch_first=True)
out_bw, _ = self.gru_bw(x_bw)
out_bw = non_pad_mask * out_bw
out_bw = self.reverse_padded_sequence(out_bw, length, batch_first=True)
out = torch.add(out, out_bw) # (B, L, H)
out = out.transpose(1, 2) # (B, H, L)
out = self.pool(out).squeeze(-1) # (B, H)
out = self.act(out)
out = self.drop(out)
return out
def tokenize(self, item):
"""
Args:
item: input instance, including sentence, entity positions, etc.
Return:
index number of tokens and positions
"""
if 'text' in item:
sentence = item['text']
is_token = False
else:
sentence = item['token']
is_token = True
pos_head = item['h']['pos']
pos_tail = item['t']['pos']
# Sentence -> token
if not is_token:
if pos_head[0] > pos_tail[0]:
pos_min, pos_max = [pos_tail, pos_head]
rev = True
else:
pos_min, pos_max = [pos_head, pos_tail]
rev = False
sent_0 = self.tokenizer.tokenize(sentence[:pos_min[0]])
sent_1 = self.tokenizer.tokenize(sentence[pos_min[1]:pos_max[0]])
sent_2 = self.tokenizer.tokenize(sentence[pos_max[1]:])
ent_0 = self.tokenizer.tokenize(sentence[pos_min[0]:pos_min[1]])
ent_1 = self.tokenizer.tokenize(sentence[pos_max[0]:pos_max[1]])
if self.mask_entity:
ent_0 = ['[UNK]']
ent_1 = ['[UNK]']
if rev:
ent_0 = ['<tail>'] + ent_0 + ['</tail>']
ent_1 = ['<head>'] + ent_1 + ['</head>']
else:
ent_0 = ['<head>'] + ent_0 + ['</head>']
ent_1 = ['<tail>'] + ent_1 + ['</tail>']
tokens = sent_0 + ent_0 + sent_1 + ent_1 + sent_2
else:
tokens = sentence
# Token -> index
if self.blank_padding:
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens, self.max_length, self.token2id['[PAD]'], self.token2id['[UNK]'])
else:
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens, unk_id = self.token2id['[UNK]'])
if self.blank_padding:
indexed_tokens = indexed_tokens[:self.max_length]
indexed_tokens = torch.tensor(indexed_tokens).long().unsqueeze(0) # (1, L)
return indexed_tokens
def reverse_padded_sequence(self, x, lengths, batch_first=True):
"""Reverses sequences according to their lengths.
Inputs should have size ``T x B x *`` if ``batch_first`` is False, or
``B x T x *`` if True. T is the length of the longest sequence (or larger),
B is the batch size, and * is any number of dimensions (including 0).
Arguments:
x (tensor): padded batch of variable length sequences.
lengths (list[int]): list of sequence lengths
batch_first (bool, optional): if True, inputs should be B x T x *.
Returns:
A tensor with the same size as inputs, but with each sequence
reversed according to its length.
"""
if not batch_first:
x = x.transpose(0, 1)
if x.size(0) != len(lengths):
raise ValueError('inputs incompatible with lengths.')
reversed_indices = [list(range(x.size(1))) for _ in range(x.size(0))]
for i, length in enumerate(lengths):
if length > 0:
reversed_indices[i][:length] = reversed_indices[i][length - 1::-1]
reversed_indices = (torch.LongTensor(reversed_indices).unsqueeze(2).expand_as(x))
reversed_indices = reversed_indices.to(x.device)
reversed_x = torch.gather(x, 1, reversed_indices)
if not batch_first:
reversed_x = reversed_x.transpose(0, 1)
return reversed_x
def non_padding_mask(self, token):
non_pad_mask = token.ne(self.token2id['[PAD]']).type(torch.float)
length = torch.count_nonzero(non_pad_mask, dim=1)
return non_pad_mask.unsqueeze(-1), length
|
py | 1a420d346e447f19310f59c569698b4a82b8424c | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable, Iterator, Optional
import setproctitle
from pants.base.exiter import Exiter
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
self._ignoring_sigint_v2_engine = False
def _check_sigint_gate_is_correct(self):
assert self._threads_ignoring_sigint >= 0, \
"This should never happen, someone must have modified the counter outside of SignalHandler."
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
ignoring_sigint_v2_engine = self._ignoring_sigint_v2_engine
if threads_ignoring_sigint == 0 and not ignoring_sigint_v2_engine:
self.handle_sigint(signum, _frame)
def _toggle_ignoring_sigint_v2_engine(self, toggle: bool):
with self._ignore_sigint_lock:
self._ignoring_sigint_v2_engine = toggle
@contextmanager
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt('User interrupted execution with control-c!')
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and causes
the signal handler to return. We want to (eventually) exit after these signals, not ignore them,
so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, 'SIGQUIT')
def handle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, 'SIGTERM')
class ExceptionSink:
"""A mutable singleton object representing where exceptions should be logged to."""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# We need an exiter in order to know what to do after we log a fatal exception or handle a
# catchable signal.
_exiter: Optional[Exiter] = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# Whether to print a stacktrace in any fatal error message printed to the terminal.
_should_print_backtrace_to_terminal = True
# An instance of `SignalHandler` which is invoked to handle a static set of specific
# nonfatal signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they
# don't need to exit immediately).
_signal_handler: Optional[SignalHandler] = None
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError('Instances of {} are not allowed to be constructed!'
.format(cls.__name__))
class ExceptionSinkError(Exception): pass
@classmethod
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location):
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
# Create the directory if possible, or raise if not writable.
cls._check_or_create_new_destination(new_log_location)
pid_specific_error_stream, shared_error_stream = cls._recapture_fatal_error_log_streams(
new_log_location)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug('re-enabling faulthandler')
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
class AccessGlobalExiterMixin:
@property
def _exiter(self) -> Optional[Exiter]:
return ExceptionSink.get_global_exiter()
@classmethod
def get_global_exiter(cls) -> Optional[Exiter]:
return cls._exiter
@classmethod
@contextmanager
def exiter_as(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter.
NB: We don't want to try/finally here, because we want exceptions to propagate
with the most recent exiter installed in sys.excepthook.
If we wrap this in a try:finally, exceptions will be caught and exiters unset.
"""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
cls._reset_exiter(new_exiter)
yield
cls._reset_exiter(previous_exiter)
@classmethod
@contextmanager
def exiter_as_until_exception(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter, except this will unset it when an exception happens."""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
try:
cls._reset_exiter(new_exiter)
yield
finally:
cls._reset_exiter(previous_exiter)
@classmethod
def _reset_exiter(cls, exiter: Optional[Exiter]) -> None:
"""
Class state:
- Overwrites `cls._exiter`.
Python state:
- Overwrites sys.excepthook.
"""
assert(isinstance(exiter, Exiter))
logger.debug(f"overriding the global exiter with {exiter} (from {cls._exiter})")
# NB: mutate the class variables! This is done before mutating the exception hook, because the
# uncaught exception handler uses cls._exiter to exit.
cls._exiter = exiter
# NB: mutate process-global state!
sys.excepthook = cls._log_unhandled_exception_and_exit
@classmethod
def reset_interactive_output_stream(
cls,
interactive_output_stream,
override_faulthandler_destination=True
):
"""
Class state:
- Overwrites `cls._interactive_output_stream`.
OS state:
- Overwrites the SIGUSR2 handler.
This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
"""
try:
# NB: mutate process-global state!
# This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
if override_faulthandler_destination:
faulthandler.register(signal.SIGUSR2, interactive_output_stream,
all_threads=True, chain=False)
# NB: mutate the class variables!
cls._interactive_output_stream = interactive_output_stream
except ValueError:
# Warn about "ValueError: IO on closed file" when the stream is closed.
cls.log_exception(
"Cannot reset interactive_output_stream -- stream (probably stderr) is closed")
@classmethod
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ''
else:
assert(isinstance(for_pid, Pid))
intermediate_filename_component = '.{}'.format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir,
'logs',
'exceptions{}.log'.format(intermediate_filename_component))
@classmethod
def log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}"
.format(msg, cls._log_dir, pid, e))
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}"
.format(msg, cls._log_dir, pid, e))
@classmethod
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
@classmethod
def _check_or_create_new_destination(cls, destination):
try:
safe_mkdir(destination)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided exception sink path at '{}' is not writable or could not be created: {}."
.format(destination, str(e)),
e)
@classmethod
def _recapture_fatal_error_log_streams(cls, new_log_location):
# NB: We do not close old file descriptors under the assumption their lifetimes are managed
# elsewhere.
# We recapture both log streams each time.
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert(pid_specific_log_path != shared_log_path)
try:
# Truncate the pid-specific error log file.
pid_specific_error_stream = safe_open(pid_specific_log_path, mode='w')
# Append to the shared error file.
shared_error_stream = safe_open(shared_log_path, mode='a')
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}"
.format(new_log_location, str(e)))
return (pid_specific_error_stream, shared_error_stream)
@classmethod
def reset_signal_handler(cls, signal_handler):
"""
Class state:
- Overwrites `cls._signal_handler`.
OS state:
- Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
:returns: The :class:`SignalHandler` that was previously registered, or None if this is
the first time this method was called.
"""
assert(isinstance(signal_handler, SignalHandler))
# NB: Modify process-global state!
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
# NB: Mutate the class variables!
cls._signal_handler = signal_handler
return previous_signal_handler
@classmethod
@contextmanager
def trapped_signals(cls, new_signal_handler):
"""
A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
@classmethod
@contextmanager
def ignoring_sigint(cls):
"""
A contextmanager which disables handling sigint in the current signal handler.
This allows threads that are not the main thread to ignore sigint.
NB: Only use this if you can't use ExceptionSink.trapped_signals().
Class state:
- Toggles `self._ignore_sigint` in `cls._signal_handler`.
"""
with cls._signal_handler._ignoring_sigint():
yield
@classmethod
def toggle_ignoring_sigint_v2_engine(cls, toggle: bool) -> None:
assert cls._signal_handler is not None
cls._signal_handler._toggle_ignoring_sigint_v2_engine(toggle)
@classmethod
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
# NB: This includes a trailing newline, but no leading newline.
_EXCEPTION_LOG_FORMAT = """\
timestamp: {timestamp}
process title: {process_title}
sys.argv: {args}
pid: {pid}
{message}
"""
@classmethod
def _format_exception_message(cls, msg, pid):
return cls._EXCEPTION_LOG_FORMAT.format(
timestamp=cls._iso_timestamp_for_now(),
process_title=setproctitle.getproctitle(),
args=sys.argv,
pid=pid,
message=msg)
_traceback_omitted_default_text = '(backtrace omitted)'
@classmethod
def _format_traceback(cls, traceback_lines, should_print_backtrace):
if should_print_backtrace:
traceback_string = '\n{}'.format(''.join(traceback_lines))
else:
traceback_string = ' {}'.format(cls._traceback_omitted_default_text)
return traceback_string
_UNHANDLED_EXCEPTION_LOG_FORMAT = """\
Exception caught: ({exception_type}){backtrace}
Exception message: {exception_message}{maybe_newline}
"""
@classmethod
def _format_unhandled_exception_log(cls, exc, tb, add_newline, should_print_backtrace):
exc_type = type(exc)
exception_full_name = '{}.{}'.format(exc_type.__module__, exc_type.__name__)
exception_message = str(exc) if exc else '(no message)'
maybe_newline = '\n' if add_newline else ''
return cls._UNHANDLED_EXCEPTION_LOG_FORMAT.format(
exception_type=exception_full_name,
backtrace=cls._format_traceback(traceback_lines=traceback.format_tb(tb),
should_print_backtrace=should_print_backtrace),
exception_message=exception_message,
maybe_newline=maybe_newline)
_EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT = """\
{timestamp_msg}{terminal_msg}{details_msg}
"""
@classmethod
def _exit_with_failure(cls, terminal_msg):
timestamp_msg = (f'timestamp: {cls._iso_timestamp_for_now()}\n'
if cls._should_print_backtrace_to_terminal else '')
details_msg = ('' if cls._should_print_backtrace_to_terminal
else '\n\n(Use --print-exception-stacktrace to see more error details.)')
terminal_msg = terminal_msg or '<no exit reason provided>'
formatted_terminal_msg = cls._EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT.format(
timestamp_msg=timestamp_msg, terminal_msg=terminal_msg, details_msg=details_msg)
# Exit with failure, printing a message to the terminal (or whatever the interactive stream is).
cls._exiter.exit_and_fail(msg=formatted_terminal_msg, out=cls._interactive_output_stream)
@classmethod
def _log_unhandled_exception_and_exit(cls, exc_class=None, exc=None, tb=None, add_newline=False):
"""A sys.excepthook implementation which logs the error and exits with failure."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the traceback.
exception_log_entry = cls._format_unhandled_exception_log(exc, tb, add_newline,
should_print_backtrace=True)
cls.log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = 'Additional error logging unhandled exception {}: {}'.format(exc, e)
logger.error(extra_err_msg)
# Generate an unhandled exception report fit to be printed to the terminal (respecting the
# Exiter's should_print_backtrace field).
if cls._should_print_backtrace_to_terminal:
stderr_printed_error = cls._format_unhandled_exception_log(
exc, tb, add_newline,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
if extra_err_msg:
stderr_printed_error = '{}\n{}'.format(stderr_printed_error, extra_err_msg)
else:
# If the user didn't ask for a backtrace, show a succinct error message without
# all the exception-related preamble. A power-user/pants developer can still
# get all the preamble info along with the backtrace, but the end user shouldn't
# see that boilerplate by default.
error_msgs = getattr(exc, 'end_user_messages', lambda: [str(exc)])()
stderr_printed_error = '\n' + '\n'.join(f'ERROR: {msg}' for msg in error_msgs)
cls._exit_with_failure(stderr_printed_error)
_CATCHABLE_SIGNAL_ERROR_LOG_FORMAT = """\
Signal {signum} ({signame}) was raised. Exiting with failure.{formatted_traceback}
"""
@classmethod
def _handle_signal_gracefully(cls, signum, signame, traceback_lines):
"""Signal handler for non-fatal signals which raises or logs an error and exits with failure."""
# Extract the stack, and format an entry to be written to the exception log.
formatted_traceback = cls._format_traceback(traceback_lines=traceback_lines,
should_print_backtrace=True)
signal_error_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum,
signame=signame,
formatted_traceback=formatted_traceback)
# TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file
# descriptors re-entrantly, which raises an IOError).
# This method catches any exceptions raised within it.
cls.log_exception(signal_error_log_entry)
# Create a potentially-abbreviated traceback for the terminal or other interactive stream.
formatted_traceback_for_terminal = cls._format_traceback(
traceback_lines=traceback_lines,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
terminal_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum,
signame=signame,
formatted_traceback=formatted_traceback_for_terminal)
# Exit, printing the output to the terminal.
cls._exit_with_failure(terminal_log_entry)
# Setup global state such as signal handlers and sys.excepthook with probably-safe values at module
# import time.
# Set the log location for writing logs before bootstrap options are parsed.
ExceptionSink.reset_log_location(os.getcwd())
# Sets except hook for exceptions at import time.
ExceptionSink._reset_exiter(Exiter(exiter=sys.exit))
# Sets a SIGUSR2 handler.
ExceptionSink.reset_interactive_output_stream(sys.stderr.buffer)
# Sets a handler that logs nonfatal signals to the exception sink before exiting.
ExceptionSink.reset_signal_handler(SignalHandler())
# Set whether to print stacktraces on exceptions or signals during import time.
# NB: This will be overridden by bootstrap options in PantsRunner, so we avoid printing out a full
# stacktrace when a user presses control-c during import time unless the environment variable is set
# to explicitly request it. The exception log will have any stacktraces regardless so this should
# not hamper debugging.
ExceptionSink.reset_should_print_backtrace_to_terminal(
should_print_backtrace=os.environ.get('PANTS_PRINT_EXCEPTION_STACKTRACE', 'True') == 'True')
|
py | 1a420d4656896b9b92dc374a723eb21822141a86 | import pytest
from world.layer import Layer
from data import TileType
@pytest.fixture(name="tilemap")
def _tilemap(origin, a, b):
layer = Layer()
layer[origin] = TileType.GROUND
layer[a] = TileType.SPACE
layer[b] = TileType.WATER
return layer
def test_serializable(tilemap, origin, a, b):
data = tilemap.json
print(data)
assert data["__TYPE__"] == "Layer"
assert data["0,0,0"] == 1
assert data["1,2,-3"] == 0
assert data["-4,1,3"] == 2
reserialized = Layer.load(data)
assert tilemap[origin] == reserialized[origin]
assert tilemap[a] == reserialized[a]
assert tilemap[b] == reserialized[b]
assert tilemap[a] == TileType.SPACE
assert tilemap[b] == TileType.WATER
|
py | 1a420d499a85292537238bdc0fa5f556a3b9a468 | from sympy.algebras.quaternion import Quaternion
from sympy.assumptions.ask import Q
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.combinatorics.partitions import Partition
from sympy.concrete.summations import (Sum, summation)
from sympy.core.add import Add
from sympy.core.containers import (Dict, Tuple)
from sympy.core.expr import UnevaluatedExpr, Expr
from sympy.core.function import (Derivative, Function, Lambda, Subs, WildFunction)
from sympy.core.mul import Mul
from sympy.core import (Catalan, EulerGamma, GoldenRatio, TribonacciConstant)
from sympy.core.numbers import (E, Float, I, Integer, Rational, nan, oo, pi, zoo)
from sympy.core.parameters import _exp_is_pow
from sympy.core.power import Pow
from sympy.core.relational import (Eq, Rel, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild, symbols)
from sympy.functions.combinatorial.factorials import (factorial, factorial2, subfactorial)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.functions.special.delta_functions import Heaviside
from sympy.functions.special.zeta_functions import zeta
from sympy.integrals.integrals import Integral
from sympy.logic.boolalg import (Equivalent, false, true, Xor)
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.slice import MatrixSlice
from sympy.matrices import SparseMatrix
from sympy.polys.polytools import factor
from sympy.series.limits import Limit
from sympy.series.order import O
from sympy.sets.sets import (Complement, FiniteSet, Interval, SymmetricDifference)
from sympy.external import import_module
from sympy.physics.control.lti import TransferFunction, Series, Parallel, \
Feedback, TransferFunctionMatrix, MIMOSeries, MIMOParallel, MIMOFeedback
from sympy.physics.units import second, joule
from sympy.polys import (Poly, rootof, RootSum, groebner, ring, field, ZZ, QQ,
ZZ_I, QQ_I, lex, grlex)
from sympy.geometry import Point, Circle, Polygon, Ellipse, Triangle
from sympy.tensor import NDimArray
from sympy.tensor.array.expressions.array_expressions import ArraySymbol, ArrayElement
from sympy.testing.pytest import raises
from sympy.printing import sstr, sstrrepr, StrPrinter
from sympy.physics.quantum.trace import Tr
x, y, z, w, t = symbols('x,y,z,w,t')
d = Dummy('d')
def test_printmethod():
class R(Abs):
def _sympystr(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert sstr(R(x)) == "foo(x)"
class R(Abs):
def _sympystr(self, printer):
return "foo"
assert sstr(R(x)) == "foo"
def test_Abs():
assert str(Abs(x)) == "Abs(x)"
assert str(Abs(Rational(1, 6))) == "1/6"
assert str(Abs(Rational(-1, 6))) == "1/6"
def test_Add():
assert str(x + y) == "x + y"
assert str(x + 1) == "x + 1"
assert str(x + x**2) == "x**2 + x"
assert str(Add(0, 1, evaluate=False)) == "0 + 1"
assert str(Add(0, 0, 1, evaluate=False)) == "0 + 0 + 1"
assert str(1.0*x) == "1.0*x"
assert str(5 + x + y + x*y + x**2 + y**2) == "x**2 + x*y + x + y**2 + y + 5"
assert str(1 + x + x**2/2 + x**3/3) == "x**3/3 + x**2/2 + x + 1"
assert str(2*x - 7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2"
assert str(x - y) == "x - y"
assert str(2 - x) == "2 - x"
assert str(x - 2) == "x - 2"
assert str(x - y - z - w) == "-w + x - y - z"
assert str(x - z*y**2*z*w) == "-w*y**2*z**2 + x"
assert str(x - 1*y*x*y) == "-x*y**2 + x"
assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)"
def test_Catalan():
assert str(Catalan) == "Catalan"
def test_ComplexInfinity():
assert str(zoo) == "zoo"
def test_Derivative():
assert str(Derivative(x, y)) == "Derivative(x, y)"
assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)"
assert str(Derivative(
x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)"
def test_dict():
assert str({1: 1 + x}) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}"
def test_Dict():
assert str(Dict({1: 1 + x})) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str(Dict({1: x**2, 2: y*x})) in (
"{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}"
def test_Dummy():
assert str(d) == "_d"
assert str(d + x) == "_d + x"
def test_EulerGamma():
assert str(EulerGamma) == "EulerGamma"
def test_Exp():
assert str(E) == "E"
with _exp_is_pow(True):
assert str(exp(x)) == "E**x"
def test_factorial():
n = Symbol('n', integer=True)
assert str(factorial(-2)) == "zoo"
assert str(factorial(0)) == "1"
assert str(factorial(7)) == "5040"
assert str(factorial(n)) == "factorial(n)"
assert str(factorial(2*n)) == "factorial(2*n)"
assert str(factorial(factorial(n))) == 'factorial(factorial(n))'
assert str(factorial(factorial2(n))) == 'factorial(factorial2(n))'
assert str(factorial2(factorial(n))) == 'factorial2(factorial(n))'
assert str(factorial2(factorial2(n))) == 'factorial2(factorial2(n))'
assert str(subfactorial(3)) == "2"
assert str(subfactorial(n)) == "subfactorial(n)"
assert str(subfactorial(2*n)) == "subfactorial(2*n)"
def test_Function():
f = Function('f')
fx = f(x)
w = WildFunction('w')
assert str(f) == "f"
assert str(fx) == "f(x)"
assert str(w) == "w_"
def test_Geometry():
assert sstr(Point(0, 0)) == 'Point2D(0, 0)'
assert sstr(Circle(Point(0, 0), 3)) == 'Circle(Point2D(0, 0), 3)'
assert sstr(Ellipse(Point(1, 2), 3, 4)) == 'Ellipse(Point2D(1, 2), 3, 4)'
assert sstr(Triangle(Point(1, 1), Point(7, 8), Point(0, -1))) == \
'Triangle(Point2D(1, 1), Point2D(7, 8), Point2D(0, -1))'
assert sstr(Polygon(Point(5, 6), Point(-2, -3), Point(0, 0), Point(4, 7))) == \
'Polygon(Point2D(5, 6), Point2D(-2, -3), Point2D(0, 0), Point2D(4, 7))'
assert sstr(Triangle(Point(0, 0), Point(1, 0), Point(0, 1)), sympy_integers=True) == \
'Triangle(Point2D(S(0), S(0)), Point2D(S(1), S(0)), Point2D(S(0), S(1)))'
assert sstr(Ellipse(Point(1, 2), 3, 4), sympy_integers=True) == \
'Ellipse(Point2D(S(1), S(2)), S(3), S(4))'
def test_GoldenRatio():
assert str(GoldenRatio) == "GoldenRatio"
def test_Heaviside():
assert str(Heaviside(x)) == str(Heaviside(x, S.Half)) == "Heaviside(x)"
assert str(Heaviside(x, 1)) == "Heaviside(x, 1)"
def test_TribonacciConstant():
assert str(TribonacciConstant) == "TribonacciConstant"
def test_ImaginaryUnit():
assert str(I) == "I"
def test_Infinity():
assert str(oo) == "oo"
assert str(oo*I) == "oo*I"
def test_Integer():
assert str(Integer(-1)) == "-1"
assert str(Integer(1)) == "1"
assert str(Integer(-3)) == "-3"
assert str(Integer(0)) == "0"
assert str(Integer(25)) == "25"
def test_Integral():
assert str(Integral(sin(x), y)) == "Integral(sin(x), y)"
assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))"
def test_Interval():
n = (S.NegativeInfinity, 1, 2, S.Infinity)
for i in range(len(n)):
for j in range(i + 1, len(n)):
for l in (True, False):
for r in (True, False):
ival = Interval(n[i], n[j], l, r)
assert S(str(ival)) == ival
def test_AccumBounds():
a = Symbol('a', real=True)
assert str(AccumBounds(0, a)) == "AccumBounds(0, a)"
assert str(AccumBounds(0, 1)) == "AccumBounds(0, 1)"
def test_Lambda():
assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)"
# issue 2908
assert str(Lambda((), 1)) == "Lambda((), 1)"
assert str(Lambda((), x)) == "Lambda((), x)"
assert str(Lambda((x, y), x+y)) == "Lambda((x, y), x + y)"
assert str(Lambda(((x, y),), x+y)) == "Lambda(((x, y),), x + y)"
def test_Limit():
assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)"
assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)"
assert str(
Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')"
def test_list():
assert str([x]) == sstr([x]) == "[x]"
assert str([x**2, x*y + 1]) == sstr([x**2, x*y + 1]) == "[x**2, x*y + 1]"
assert str([x**2, [y + x]]) == sstr([x**2, [y + x]]) == "[x**2, [x + y]]"
def test_Matrix_str():
M = Matrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
M = Matrix([[1]])
assert str(M) == sstr(M) == "Matrix([[1]])"
M = Matrix([[1, 2]])
assert str(M) == sstr(M) == "Matrix([[1, 2]])"
M = Matrix()
assert str(M) == sstr(M) == "Matrix(0, 0, [])"
M = Matrix(0, 1, lambda i, j: 0)
assert str(M) == sstr(M) == "Matrix(0, 1, [])"
def test_Mul():
assert str(x/y) == "x/y"
assert str(y/x) == "y/x"
assert str(x/y/z) == "x/(y*z)"
assert str((x + 1)/(y + 2)) == "(x + 1)/(y + 2)"
assert str(2*x/3) == '2*x/3'
assert str(-2*x/3) == '-2*x/3'
assert str(-1.0*x) == '-1.0*x'
assert str(1.0*x) == '1.0*x'
assert str(Mul(0, 1, evaluate=False)) == '0*1'
assert str(Mul(1, 0, evaluate=False)) == '1*0'
assert str(Mul(1, 1, evaluate=False)) == '1*1'
assert str(Mul(1, 1, 1, evaluate=False)) == '1*1*1'
assert str(Mul(1, 2, evaluate=False)) == '1*2'
assert str(Mul(1, S.Half, evaluate=False)) == '1*(1/2)'
assert str(Mul(1, 1, S.Half, evaluate=False)) == '1*1*(1/2)'
assert str(Mul(1, 1, 2, 3, x, evaluate=False)) == '1*1*2*3*x'
assert str(Mul(1, -1, evaluate=False)) == '1*(-1)'
assert str(Mul(-1, 1, evaluate=False)) == '-1*1'
assert str(Mul(4, 3, 2, 1, 0, y, x, evaluate=False)) == '4*3*2*1*0*y*x'
assert str(Mul(4, 3, 2, 1+z, 0, y, x, evaluate=False)) == '4*3*2*(z + 1)*0*y*x'
assert str(Mul(Rational(2, 3), Rational(5, 7), evaluate=False)) == '(2/3)*(5/7)'
# For issue 14160
assert str(Mul(-2, x, Pow(Mul(y,y,evaluate=False), -1, evaluate=False),
evaluate=False)) == '-2*x/(y*y)'
# issue 21537
assert str(Mul(x, Pow(1/y, -1, evaluate=False), evaluate=False)) == 'x/(1/y)'
class CustomClass1(Expr):
is_commutative = True
class CustomClass2(Expr):
is_commutative = True
cc1 = CustomClass1()
cc2 = CustomClass2()
assert str(Rational(2)*cc1) == '2*CustomClass1()'
assert str(cc1*Rational(2)) == '2*CustomClass1()'
assert str(cc1*Float("1.5")) == '1.5*CustomClass1()'
assert str(cc2*Rational(2)) == '2*CustomClass2()'
assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()'
assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()'
def test_NaN():
assert str(nan) == "nan"
def test_NegativeInfinity():
assert str(-oo) == "-oo"
def test_Order():
assert str(O(x)) == "O(x)"
assert str(O(x**2)) == "O(x**2)"
assert str(O(x*y)) == "O(x*y, x, y)"
assert str(O(x, x)) == "O(x)"
assert str(O(x, (x, 0))) == "O(x)"
assert str(O(x, (x, oo))) == "O(x, (x, oo))"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, (x, oo), (y, oo))) == "O(x, (x, oo), (y, oo))"
def test_Permutation_Cycle():
from sympy.combinatorics import Permutation, Cycle
# general principle: economically, canonically show all moved elements
# and the size of the permutation.
for p, s in [
(Cycle(),
'()'),
(Cycle(2),
'(2)'),
(Cycle(2, 1),
'(1 2)'),
(Cycle(1, 2)(5)(6, 7)(10),
'(1 2)(6 7)(10)'),
(Cycle(3, 4)(1, 2)(3, 4),
'(1 2)(4)'),
]:
assert sstr(p) == s
for p, s in [
(Permutation([]),
'Permutation([])'),
(Permutation([], size=1),
'Permutation([0])'),
(Permutation([], size=2),
'Permutation([0, 1])'),
(Permutation([], size=10),
'Permutation([], size=10)'),
(Permutation([1, 0, 2]),
'Permutation([1, 0, 2])'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation([1, 0], size=6)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation([1, 0], size=10)'),
]:
assert sstr(p, perm_cyclic=False) == s
for p, s in [
(Permutation([]),
'()'),
(Permutation([], size=1),
'(0)'),
(Permutation([], size=2),
'(1)'),
(Permutation([], size=10),
'(9)'),
(Permutation([1, 0, 2]),
'(2)(0 1)'),
(Permutation([1, 0, 2, 3, 4, 5]),
'(5)(0 1)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'(9)(0 1)'),
(Permutation([0, 1, 3, 2, 4, 5], size=10),
'(9)(2 3)'),
]:
assert sstr(p) == s
def test_Pi():
assert str(pi) == "pi"
def test_Poly():
assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')"
assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')"
assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')"
assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')"
assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')"
assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')"
assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')"
assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')"
assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')"
assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')"
assert str(Poly(2*x + x**5, x)) == "Poly(x**5 + 2*x, x, domain='ZZ')"
assert str(Poly(3**(2*x), 3**x)) == "Poly((3**x)**2, 3**x, domain='ZZ')"
assert str(Poly((x**2)**x)) == "Poly(((x**2)**x), (x**2)**x, domain='ZZ')"
assert str(Poly((x + y)**3, (x + y), expand=False)
) == "Poly((x + y)**3, x + y, domain='ZZ')"
assert str(Poly((x - 1)**2, (x - 1), expand=False)
) == "Poly((x - 1)**2, x - 1, domain='ZZ')"
assert str(
Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')"
assert str(
Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='ZZ_I')"
assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='ZZ_I')"
assert str(Poly(-x*y*z + x*y - 1, x, y, z)
) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')"
assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \
"Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')"
assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)"
assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def test_PolyRing():
assert str(ring("x", ZZ, lex)[0]) == "Polynomial ring in x over ZZ with lex order"
assert str(ring("x,y", QQ, grlex)[0]) == "Polynomial ring in x, y over QQ with grlex order"
assert str(ring("x,y,z", ZZ["t"], lex)[0]) == "Polynomial ring in x, y, z over ZZ[t] with lex order"
def test_FracField():
assert str(field("x", ZZ, lex)[0]) == "Rational function field in x over ZZ with lex order"
assert str(field("x,y", QQ, grlex)[0]) == "Rational function field in x, y over QQ with grlex order"
assert str(field("x,y,z", ZZ["t"], lex)[0]) == "Rational function field in x, y, z over ZZ[t] with lex order"
def test_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
Rx_zzi, xz = ring("x", ZZ_I)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x**2) == "x**2"
assert str(x**(-2)) == "x**(-2)"
assert str(x**QQ(1, 2)) == "x**(1/2)"
assert str((u**2 + 3*u*v + 1)*x**2*y + u + 1) == "(u**2 + 3*u*v + 1)*x**2*y + u + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1"
assert str((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == "-(u**2 - 3*u*v + 1)*x**2*y - (u + 1)*x - 1"
assert str(-(v**2 + v + 1)*x + 3*u*v + 1) == "-(v**2 + v + 1)*x + 3*u*v + 1"
assert str(-(v**2 + v + 1)*x - 3*u*v + 1) == "-(v**2 + v + 1)*x - 3*u*v + 1"
assert str((1+I)*xz + 2) == "(1 + 1*I)*x + (2 + 0*I)"
def test_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
Rx_zzi, xz = field("x", QQ_I)
i = QQ_I(0, 1)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x/3) == "x/3"
assert str(x/z) == "x/z"
assert str(x*y/z) == "x*y/z"
assert str(x/(z*t)) == "x/(z*t)"
assert str(x*y/(z*t)) == "x*y/(z*t)"
assert str((x - 1)/y) == "(x - 1)/y"
assert str((x + 1)/y) == "(x + 1)/y"
assert str((-x - 1)/y) == "(-x - 1)/y"
assert str((x + 1)/(y*z)) == "(x + 1)/(y*z)"
assert str(-y/(x + 1)) == "-y/(x + 1)"
assert str(y*z/(x + 1)) == "y*z/(x + 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - u*v*t - 1)"
assert str((1+i)/xz) == "(1 + 1*I)/x"
assert str(((1+i)*xz - i)/xz) == "((1 + 1*I)*x + (0 + -1*I))/x"
def test_GaussianInteger():
assert str(ZZ_I(1, 0)) == "1"
assert str(ZZ_I(-1, 0)) == "-1"
assert str(ZZ_I(0, 1)) == "I"
assert str(ZZ_I(0, -1)) == "-I"
assert str(ZZ_I(0, 2)) == "2*I"
assert str(ZZ_I(0, -2)) == "-2*I"
assert str(ZZ_I(1, 1)) == "1 + I"
assert str(ZZ_I(-1, -1)) == "-1 - I"
assert str(ZZ_I(-1, -2)) == "-1 - 2*I"
def test_GaussianRational():
assert str(QQ_I(1, 0)) == "1"
assert str(QQ_I(QQ(2, 3), 0)) == "2/3"
assert str(QQ_I(0, QQ(2, 3))) == "2*I/3"
assert str(QQ_I(QQ(1, 2), QQ(-2, 3))) == "1/2 - 2*I/3"
def test_Pow():
assert str(x**-1) == "1/x"
assert str(x**-2) == "x**(-2)"
assert str(x**2) == "x**2"
assert str((x + y)**-1) == "1/(x + y)"
assert str((x + y)**-2) == "(x + y)**(-2)"
assert str((x + y)**2) == "(x + y)**2"
assert str((x + y)**(1 + x)) == "(x + y)**(x + 1)"
assert str(x**Rational(1, 3)) == "x**(1/3)"
assert str(1/x**Rational(1, 3)) == "x**(-1/3)"
assert str(sqrt(sqrt(x))) == "x**(1/4)"
# not the same as x**-1
assert str(x**-1.0) == 'x**(-1.0)'
# see issue #2860
assert str(Pow(S(2), -1.0, evaluate=False)) == '2**(-1.0)'
def test_sqrt():
assert str(sqrt(x)) == "sqrt(x)"
assert str(sqrt(x**2)) == "sqrt(x**2)"
assert str(1/sqrt(x)) == "1/sqrt(x)"
assert str(1/sqrt(x**2)) == "1/sqrt(x**2)"
assert str(y/sqrt(x)) == "y/sqrt(x)"
assert str(x**0.5) == "x**0.5"
assert str(1/x**0.5) == "x**(-0.5)"
def test_Rational():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n7 = Rational(3)
n8 = Rational(-3)
assert str(n1*n2) == "1/12"
assert str(n1*n2) == "1/12"
assert str(n3) == "1/2"
assert str(n1*n3) == "1/8"
assert str(n1 + n3) == "3/4"
assert str(n1 + n2) == "7/12"
assert str(n1 + n4) == "-1/4"
assert str(n4*n4) == "1/4"
assert str(n4 + n2) == "-1/6"
assert str(n4 + n5) == "-1/2"
assert str(n4*n5) == "0"
assert str(n3 + n4) == "0"
assert str(n1**n7) == "1/64"
assert str(n2**n7) == "1/27"
assert str(n2**n8) == "27"
assert str(n7**n8) == "1/27"
assert str(Rational("-25")) == "-25"
assert str(Rational("1.25")) == "5/4"
assert str(Rational("-2.6e-2")) == "-13/500"
assert str(S("25/7")) == "25/7"
assert str(S("-123/569")) == "-123/569"
assert str(S("0.1[23]", rational=1)) == "61/495"
assert str(S("5.1[666]", rational=1)) == "31/6"
assert str(S("-5.1[666]", rational=1)) == "-31/6"
assert str(S("0.[9]", rational=1)) == "1"
assert str(S("-0.[9]", rational=1)) == "-1"
assert str(sqrt(Rational(1, 4))) == "1/2"
assert str(sqrt(Rational(1, 36))) == "1/6"
assert str((123**25) ** Rational(1, 25)) == "123"
assert str((123**25 + 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "122"
assert str(sqrt(Rational(81, 36))**3) == "27/8"
assert str(1/sqrt(Rational(81, 36))**3) == "8/27"
assert str(sqrt(-4)) == str(2*I)
assert str(2**Rational(1, 10**10)) == "2**(1/10000000000)"
assert sstr(Rational(2, 3), sympy_integers=True) == "S(2)/3"
x = Symbol("x")
assert sstr(x**Rational(2, 3), sympy_integers=True) == "x**(S(2)/3)"
assert sstr(Eq(x, Rational(2, 3)), sympy_integers=True) == "Eq(x, S(2)/3)"
assert sstr(Limit(x, x, Rational(7, 2)), sympy_integers=True) == \
"Limit(x, x, S(7)/2)"
def test_Float():
# NOTE dps is the whole number of decimal digits
assert str(Float('1.23', dps=1 + 2)) == '1.23'
assert str(Float('1.23456789', dps=1 + 8)) == '1.23456789'
assert str(
Float('1.234567890123456789', dps=1 + 18)) == '1.234567890123456789'
assert str(pi.evalf(1 + 2)) == '3.14'
assert str(pi.evalf(1 + 14)) == '3.14159265358979'
assert str(pi.evalf(1 + 64)) == ('3.141592653589793238462643383279'
'5028841971693993751058209749445923')
assert str(pi.round(-1)) == '0.0'
assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88'
assert sstr(Float("100"), full_prec=False, min=-2, max=2) == '1.0e+2'
assert sstr(Float("100"), full_prec=False, min=-2, max=3) == '100.0'
assert sstr(Float("0.1"), full_prec=False, min=-2, max=3) == '0.1'
assert sstr(Float("0.099"), min=-2, max=3) == '9.90000000000000e-2'
def test_Relational():
assert str(Rel(x, y, "<")) == "x < y"
assert str(Rel(x + y, y, "==")) == "Eq(x + y, y)"
assert str(Rel(x, y, "!=")) == "Ne(x, y)"
assert str(Eq(x, 1) | Eq(x, 2)) == "Eq(x, 1) | Eq(x, 2)"
assert str(Ne(x, 1) & Ne(x, 2)) == "Ne(x, 1) & Ne(x, 2)"
def test_AppliedBinaryRelation():
assert str(Q.eq(x, y)) == "Q.eq(x, y)"
assert str(Q.ne(x, y)) == "Q.ne(x, y)"
def test_CRootOf():
assert str(rootof(x**5 + 2*x - 1, 0)) == "CRootOf(x**5 + 2*x - 1, 0)"
def test_RootSum():
f = x**5 + 2*x - 1
assert str(
RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)"
assert str(RootSum(f, Lambda(
z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(z, z**2))"
def test_GroebnerBasis():
assert str(groebner(
[], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')"
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
assert str(groebner(F, order='grlex')) == \
"GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')"
assert str(groebner(F, order='lex')) == \
"GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')"
def test_set():
assert sstr(set()) == 'set()'
assert sstr(frozenset()) == 'frozenset()'
assert sstr({1}) == '{1}'
assert sstr(frozenset([1])) == 'frozenset({1})'
assert sstr({1, 2, 3}) == '{1, 2, 3}'
assert sstr(frozenset([1, 2, 3])) == 'frozenset({1, 2, 3})'
assert sstr(
{1, x, x**2, x**3, x**4}) == '{1, x, x**2, x**3, x**4}'
assert sstr(
frozenset([1, x, x**2, x**3, x**4])) == 'frozenset({1, x, x**2, x**3, x**4})'
def test_SparseMatrix():
M = SparseMatrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
def test_Sum():
assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))"
assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
"Sum(x*y**2, (x, -2, 2), (y, -5, 5))"
def test_Symbol():
assert str(y) == "y"
assert str(x) == "x"
e = x
assert str(e) == "x"
def test_tuple():
assert str((x,)) == sstr((x,)) == "(x,)"
assert str((x + y, 1 + x)) == sstr((x + y, 1 + x)) == "(x + y, x + 1)"
assert str((x + y, (
1 + x, x**2))) == sstr((x + y, (1 + x, x**2))) == "(x + y, (x + 1, x**2))"
def test_Series_str():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(t*x**2 - t**w*x + w, t - y, y)
assert str(Series(tf1, tf2)) == \
"Series(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y))"
assert str(Series(tf1, tf2, tf3)) == \
"Series(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y), TransferFunction(t*x**2 - t**w*x + w, t - y, y))"
assert str(Series(-tf2, tf1)) == \
"Series(TransferFunction(-x + y, x + y, y), TransferFunction(x*y**2 - z, -t**3 + y**3, y))"
def test_MIMOSeries_str():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tfm_1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
tfm_2 = TransferFunctionMatrix([[tf2, tf1], [tf1, tf2]])
assert str(MIMOSeries(tfm_1, tfm_2)) == \
"MIMOSeries(TransferFunctionMatrix(((TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y)), "\
"(TransferFunction(x - y, x + y, y), TransferFunction(x*y**2 - z, -t**3 + y**3, y)))), "\
"TransferFunctionMatrix(((TransferFunction(x - y, x + y, y), TransferFunction(x*y**2 - z, -t**3 + y**3, y)), "\
"(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y)))))"
def test_TransferFunction_str():
tf1 = TransferFunction(x - 1, x + 1, x)
assert str(tf1) == "TransferFunction(x - 1, x + 1, x)"
tf2 = TransferFunction(x + 1, 2 - y, x)
assert str(tf2) == "TransferFunction(x + 1, 2 - y, x)"
tf3 = TransferFunction(y, y**2 + 2*y + 3, y)
assert str(tf3) == "TransferFunction(y, y**2 + 2*y + 3, y)"
def test_Parallel_str():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(t*x**2 - t**w*x + w, t - y, y)
assert str(Parallel(tf1, tf2)) == \
"Parallel(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y))"
assert str(Parallel(tf1, tf2, tf3)) == \
"Parallel(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y), TransferFunction(t*x**2 - t**w*x + w, t - y, y))"
assert str(Parallel(-tf2, tf1)) == \
"Parallel(TransferFunction(-x + y, x + y, y), TransferFunction(x*y**2 - z, -t**3 + y**3, y))"
def test_MIMOParallel_str():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tfm_1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
tfm_2 = TransferFunctionMatrix([[tf2, tf1], [tf1, tf2]])
assert str(MIMOParallel(tfm_1, tfm_2)) == \
"MIMOParallel(TransferFunctionMatrix(((TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y)), "\
"(TransferFunction(x - y, x + y, y), TransferFunction(x*y**2 - z, -t**3 + y**3, y)))), "\
"TransferFunctionMatrix(((TransferFunction(x - y, x + y, y), TransferFunction(x*y**2 - z, -t**3 + y**3, y)), "\
"(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y)))))"
def test_Feedback_str():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(t*x**2 - t**w*x + w, t - y, y)
assert str(Feedback(tf1*tf2, tf3)) == \
"Feedback(Series(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y)), " \
"TransferFunction(t*x**2 - t**w*x + w, t - y, y), -1)"
assert str(Feedback(tf1, TransferFunction(1, 1, y), 1)) == \
"Feedback(TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(1, 1, y), 1)"
def test_MIMOFeedback_str():
tf1 = TransferFunction(x**2 - y**3, y - z, x)
tf2 = TransferFunction(y - x, z + y, x)
tfm_1 = TransferFunctionMatrix([[tf2, tf1], [tf1, tf2]])
tfm_2 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
assert (str(MIMOFeedback(tfm_1, tfm_2)) \
== "MIMOFeedback(TransferFunctionMatrix(((TransferFunction(-x + y, y + z, x), TransferFunction(x**2 - y**3, y - z, x))," \
" (TransferFunction(x**2 - y**3, y - z, x), TransferFunction(-x + y, y + z, x)))), " \
"TransferFunctionMatrix(((TransferFunction(x**2 - y**3, y - z, x), " \
"TransferFunction(-x + y, y + z, x)), (TransferFunction(-x + y, y + z, x), TransferFunction(x**2 - y**3, y - z, x)))), -1)")
assert (str(MIMOFeedback(tfm_1, tfm_2, 1)) \
== "MIMOFeedback(TransferFunctionMatrix(((TransferFunction(-x + y, y + z, x), TransferFunction(x**2 - y**3, y - z, x)), " \
"(TransferFunction(x**2 - y**3, y - z, x), TransferFunction(-x + y, y + z, x)))), " \
"TransferFunctionMatrix(((TransferFunction(x**2 - y**3, y - z, x), TransferFunction(-x + y, y + z, x)), "\
"(TransferFunction(-x + y, y + z, x), TransferFunction(x**2 - y**3, y - z, x)))), 1)")
def test_TransferFunctionMatrix_str():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(t*x**2 - t**w*x + w, t - y, y)
assert str(TransferFunctionMatrix([[tf1], [tf2]])) == \
"TransferFunctionMatrix(((TransferFunction(x*y**2 - z, -t**3 + y**3, y),), (TransferFunction(x - y, x + y, y),)))"
assert str(TransferFunctionMatrix([[tf1, tf2], [tf3, tf2]])) == \
"TransferFunctionMatrix(((TransferFunction(x*y**2 - z, -t**3 + y**3, y), TransferFunction(x - y, x + y, y)), (TransferFunction(t*x**2 - t**w*x + w, t - y, y), TransferFunction(x - y, x + y, y))))"
def test_Quaternion_str_printer():
q = Quaternion(x, y, z, t)
assert str(q) == "x + y*i + z*j + t*k"
q = Quaternion(x,y,z,x*t)
assert str(q) == "x + y*i + z*j + t*x*k"
q = Quaternion(x,y,z,x+t)
assert str(q) == "x + y*i + z*j + (t + x)*k"
def test_Quantity_str():
assert sstr(second, abbrev=True) == "s"
assert sstr(joule, abbrev=True) == "J"
assert str(second) == "second"
assert str(joule) == "joule"
def test_wild_str():
# Check expressions containing Wild not causing infinite recursion
w = Wild('x')
assert str(w + 1) == 'x_ + 1'
assert str(exp(2**w) + 5) == 'exp(2**x_) + 5'
assert str(3*w + 1) == '3*x_ + 1'
assert str(1/w + 1) == '1 + 1/x_'
assert str(w**2 + 1) == 'x_**2 + 1'
assert str(1/(1 - w)) == '1/(1 - x_)'
def test_wild_matchpy():
from sympy.utilities.matchpy_connector import WildDot, WildPlus, WildStar
matchpy = import_module("matchpy")
if matchpy is None:
return
wd = WildDot('w_')
wp = WildPlus('w__')
ws = WildStar('w___')
assert str(wd) == 'w_'
assert str(wp) == 'w__'
assert str(ws) == 'w___'
assert str(wp/ws + 2**wd) == '2**w_ + w__/w___'
assert str(sin(wd)*cos(wp)*sqrt(ws)) == 'sqrt(w___)*sin(w_)*cos(w__)'
def test_zeta():
assert str(zeta(3)) == "zeta(3)"
def test_issue_3101():
e = x - y
a = str(e)
b = str(e)
assert a == b
def test_issue_3103():
e = -2*sqrt(x) - y/sqrt(x)/2
assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y",
"-2*x**1/2(-1/2)*x**(-1/2)*y", "-2*x**1/2-1/2*x**-1/2*w"]
assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))"
def test_issue_4021():
e = Integral(x, x) + 1
assert str(e) == 'Integral(x, x) + 1'
def test_sstrrepr():
assert sstr('abc') == 'abc'
assert sstrrepr('abc') == "'abc'"
e = ['a', 'b', 'c', x]
assert sstr(e) == "[a, b, c, x]"
assert sstrrepr(e) == "['a', 'b', 'c', x]"
def test_infinity():
assert sstr(oo*I) == "oo*I"
def test_full_prec():
assert sstr(S("0.3"), full_prec=True) == "0.300000000000000"
assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000"
assert sstr(S("0.3"), full_prec=False) == "0.3"
assert sstr(S("0.3")*x, full_prec=True) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert sstr(S("0.3")*x, full_prec="auto") in [
"0.3*x",
"x*0.3"
]
assert sstr(S("0.3")*x, full_prec=False) in [
"0.3*x",
"x*0.3"
]
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert sstr(A*B*C**-1) == "A*B*C**(-1)"
assert sstr(C**-1*A*B) == "C**(-1)*A*B"
assert sstr(A*C**-1*B) == "A*C**(-1)*B"
assert sstr(sqrt(A)) == "sqrt(A)"
assert sstr(1/sqrt(A)) == "A**(-1/2)"
def test_empty_printer():
str_printer = StrPrinter()
assert str_printer.emptyPrinter("foo") == "foo"
assert str_printer.emptyPrinter(x*y) == "x*y"
assert str_printer.emptyPrinter(32) == "32"
def test_settings():
raises(TypeError, lambda: sstr(S(4), method="garbage"))
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert str(where(X > 0)) == "Domain: (0 < x1) & (x1 < oo)"
D = Die('d1', 6)
assert str(where(D > 4)) == "Domain: Eq(d1, 5) | Eq(d1, 6)"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert str(pspace(Tuple(A, B)).domain) == "Domain: (0 <= a) & (0 <= b) & (a < oo) & (b < oo)"
def test_FiniteSet():
assert str(FiniteSet(*range(1, 51))) == (
'{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,'
' 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,'
' 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50}'
)
assert str(FiniteSet(*range(1, 6))) == '{1, 2, 3, 4, 5}'
assert str(FiniteSet(*[x*y, x**2])) == '{x**2, x*y}'
assert str(FiniteSet(FiniteSet(FiniteSet(x, y), 5), FiniteSet(x,y), 5)
) == 'FiniteSet(5, FiniteSet(5, {x, y}), {x, y})'
def test_Partition():
assert str(Partition(FiniteSet(x, y), {z})) == 'Partition({z}, {x, y})'
def test_UniversalSet():
assert str(S.UniversalSet) == 'UniversalSet'
def test_PrettyPoly():
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y))
assert sstr(R.convert(x + y)) == sstr(x + y)
def test_categories():
from sympy.categories import (Object, NamedMorphism,
IdentityMorphism, Category)
A = Object("A")
B = Object("B")
f = NamedMorphism(A, B, "f")
id_A = IdentityMorphism(A)
K = Category("K")
assert str(A) == 'Object("A")'
assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")'
assert str(id_A) == 'IdentityMorphism(Object("A"))'
assert str(K) == 'Category("K")'
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert str(t) == 'Tr(A*B)'
def test_issue_6387():
assert str(factor(-3.0*z + 3)) == '-3.0*(1.0*z - 1.0)'
def test_MatMul_MatAdd():
X, Y = MatrixSymbol("X", 2, 2), MatrixSymbol("Y", 2, 2)
assert str(2*(X + Y)) == "2*X + 2*Y"
assert str(I*X) == "I*X"
assert str(-I*X) == "-I*X"
assert str((1 + I)*X) == '(1 + I)*X'
assert str(-(1 + I)*X) == '(-1 - I)*X'
def test_MatrixSlice():
n = Symbol('n', integer=True)
X = MatrixSymbol('X', n, n)
Y = MatrixSymbol('Y', 10, 10)
Z = MatrixSymbol('Z', 10, 10)
assert str(MatrixSlice(X, (None, None, None), (None, None, None))) == 'X[:, :]'
assert str(X[x:x + 1, y:y + 1]) == 'X[x:x + 1, y:y + 1]'
assert str(X[x:x + 1:2, y:y + 1:2]) == 'X[x:x + 1:2, y:y + 1:2]'
assert str(X[:x, y:]) == 'X[:x, y:]'
assert str(X[:x, y:]) == 'X[:x, y:]'
assert str(X[x:, :y]) == 'X[x:, :y]'
assert str(X[x:y, z:w]) == 'X[x:y, z:w]'
assert str(X[x:y:t, w:t:x]) == 'X[x:y:t, w:t:x]'
assert str(X[x::y, t::w]) == 'X[x::y, t::w]'
assert str(X[:x:y, :t:w]) == 'X[:x:y, :t:w]'
assert str(X[::x, ::y]) == 'X[::x, ::y]'
assert str(MatrixSlice(X, (0, None, None), (0, None, None))) == 'X[:, :]'
assert str(MatrixSlice(X, (None, n, None), (None, n, None))) == 'X[:, :]'
assert str(MatrixSlice(X, (0, n, None), (0, n, None))) == 'X[:, :]'
assert str(MatrixSlice(X, (0, n, 2), (0, n, 2))) == 'X[::2, ::2]'
assert str(X[1:2:3, 4:5:6]) == 'X[1:2:3, 4:5:6]'
assert str(X[1:3:5, 4:6:8]) == 'X[1:3:5, 4:6:8]'
assert str(X[1:10:2]) == 'X[1:10:2, :]'
assert str(Y[:5, 1:9:2]) == 'Y[:5, 1:9:2]'
assert str(Y[:5, 1:10:2]) == 'Y[:5, 1::2]'
assert str(Y[5, :5:2]) == 'Y[5:6, :5:2]'
assert str(X[0:1, 0:1]) == 'X[:1, :1]'
assert str(X[0:1:2, 0:1:2]) == 'X[:1:2, :1:2]'
assert str((Y + Z)[2:, 2:]) == '(Y + Z)[2:, 2:]'
def test_true_false():
assert str(true) == repr(true) == sstr(true) == "True"
assert str(false) == repr(false) == sstr(false) == "False"
def test_Equivalent():
assert str(Equivalent(y, x)) == "Equivalent(x, y)"
def test_Xor():
assert str(Xor(y, x, evaluate=False)) == "x ^ y"
def test_Complement():
assert str(Complement(S.Reals, S.Naturals)) == 'Complement(Reals, Naturals)'
def test_SymmetricDifference():
assert str(SymmetricDifference(Interval(2, 3), Interval(3, 4),evaluate=False)) == \
'SymmetricDifference(Interval(2, 3), Interval(3, 4))'
def test_UnevaluatedExpr():
a, b = symbols("a b")
expr1 = 2*UnevaluatedExpr(a+b)
assert str(expr1) == "2*(a + b)"
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert(str(A[0, 0]) == "A[0, 0]")
assert(str(3 * A[0, 0]) == "3*A[0, 0]")
F = C[0, 0].subs(C, A - B)
assert str(F) == "(A - B)[0, 0]"
def test_MatrixSymbol_printing():
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
assert str(A - A*B - B) == "A - A*B - B"
assert str(A*B - (A+B)) == "-A + A*B - B"
assert str(A**(-1)) == "A**(-1)"
assert str(A**3) == "A**3"
def test_MatrixExpressions():
n = Symbol('n', integer=True)
X = MatrixSymbol('X', n, n)
assert str(X) == "X"
# Apply function elementwise (`ElementwiseApplyFunc`):
expr = (X.T*X).applyfunc(sin)
assert str(expr) == 'Lambda(_d, sin(_d)).(X.T*X)'
lamda = Lambda(x, 1/x)
expr = (n*X).applyfunc(lamda)
assert str(expr) == 'Lambda(x, 1/x).(n*X)'
def test_Subs_printing():
assert str(Subs(x, (x,), (1,))) == 'Subs(x, x, 1)'
assert str(Subs(x + y, (x, y), (1, 2))) == 'Subs(x + y, (x, y), (1, 2))'
def test_issue_15716():
e = Integral(factorial(x), (x, -oo, oo))
assert e.as_terms() == ([(e, ((1.0, 0.0), (1,), ()))], [e])
def test_str_special_matrices():
from sympy.matrices import Identity, ZeroMatrix, OneMatrix
assert str(Identity(4)) == 'I'
assert str(ZeroMatrix(2, 2)) == '0'
assert str(OneMatrix(2, 2)) == '1'
def test_issue_14567():
assert factorial(Sum(-1, (x, 0, 0))) + y # doesn't raise an error
def test_issue_21823():
assert str(Partition([1, 2])) == 'Partition({1, 2})'
assert str(Partition({1, 2})) == 'Partition({1, 2})'
def test_issue_21119_21460():
ss = lambda x: str(S(x, evaluate=False))
assert ss('4/2') == '4/2'
assert ss('4/-2') == '4/(-2)'
assert ss('-4/2') == '-4/2'
assert ss('-4/-2') == '-4/(-2)'
assert ss('-2*3/-1') == '-2*3/(-1)'
assert ss('-2*3/-1/2') == '-2*3/(-1*2)'
assert ss('4/2/1') == '4/(2*1)'
assert ss('-2/-1/2') == '-2/(-1*2)'
assert ss('2*3*4**(-2*3)') == '2*3/4**(2*3)'
assert ss('2*3*1*4**(-2*3)') == '2*3*1/4**(2*3)'
def test_Str():
from sympy.core.symbol import Str
assert str(Str('x')) == 'x'
assert sstrrepr(Str('x')) == "Str('x')"
def test_diffgeom():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField
x,y = symbols('x y', real=True)
m = Manifold('M', 2)
assert str(m) == "M"
p = Patch('P', m)
assert str(p) == "P"
rect = CoordSystem('rect', p, [x, y])
assert str(rect) == "rect"
b = BaseScalarField(rect, 0)
assert str(b) == "x"
def test_NDimArray():
assert sstr(NDimArray(1.0), full_prec=True) == '1.00000000000000'
assert sstr(NDimArray(1.0), full_prec=False) == '1.0'
assert sstr(NDimArray([1.0, 2.0]), full_prec=True) == '[1.00000000000000, 2.00000000000000]'
assert sstr(NDimArray([1.0, 2.0]), full_prec=False) == '[1.0, 2.0]'
def test_Predicate():
assert sstr(Q.even) == 'Q.even'
def test_AppliedPredicate():
assert sstr(Q.even(x)) == 'Q.even(x)'
def test_printing_str_array_expressions():
assert sstr(ArraySymbol("A", (2, 3, 4))) == "A"
assert sstr(ArrayElement("A", (2, 1/(1-x), 0))) == "A[2, 1/(1 - x), 0]"
M = MatrixSymbol("M", 3, 3)
N = MatrixSymbol("N", 3, 3)
assert sstr(ArrayElement(M*N, [x, 0])) == "(M*N)[x, 0]"
|
py | 1a420d4f96819607a8607443f06a8a9b665ae480 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import urlparse
from telemetry.core import exceptions
from telemetry.internal.actions.drag import DragAction
from telemetry.internal.actions.javascript_click import ClickElementAction
from telemetry.internal.actions.key_event import KeyPressAction
from telemetry.internal.actions.load_media import LoadMediaAction
from telemetry.internal.actions.loop import LoopAction
from telemetry.internal.actions.mouse_click import MouseClickAction
from telemetry.internal.actions.navigate import NavigateAction
from telemetry.internal.actions.page_action import GESTURE_SOURCE_DEFAULT
from telemetry.internal.actions.page_action import SUPPORTED_GESTURE_SOURCES
from telemetry.internal.actions.pinch import PinchAction
from telemetry.internal.actions.play import PlayAction
from telemetry.internal.actions.repaint_continuously import (
RepaintContinuouslyAction)
from telemetry.internal.actions.repeatable_scroll import RepeatableScrollAction
from telemetry.internal.actions.scroll import ScrollAction
from telemetry.internal.actions.scroll_bounce import ScrollBounceAction
from telemetry.internal.actions.scroll_to_element import ScrollToElementAction
from telemetry.internal.actions.seek import SeekAction
from telemetry.internal.actions.swipe import SwipeAction
from telemetry.internal.actions.tap import TapAction
from telemetry.internal.actions.wait import WaitForElementAction
from telemetry.web_perf import timeline_interaction_record
from py_trace_event import trace_event
import py_utils
_DUMP_WAIT_TIME = 3
class ActionRunner(object):
__metaclass__ = trace_event.TracedMetaClass
def __init__(self, tab, skip_waits=False):
self._tab = tab
self._skip_waits = skip_waits
@property
def tab(self):
"""Returns the tab on which actions are performed."""
return self._tab
def _RunAction(self, action):
action.WillRunAction(self._tab)
action.RunAction(self._tab)
def CreateInteraction(self, label, repeatable=False):
""" Create an action.Interaction object that issues interaction record.
An interaction record is a labeled time period containing
interaction that developers care about. Each set of metrics
specified in flags will be calculated for this time period.
To mark the start of interaction record, call Begin() method on the returned
object. To mark the finish of interaction record, call End() method on
it. Or better yet, use the with statement to create an
interaction record that covers the actions in the with block.
e.g:
with action_runner.CreateInteraction('Animation-1'):
action_runner.TapElement(...)
action_runner.WaitForJavaScriptCondition(...)
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
Returns:
An instance of action_runner.Interaction
"""
flags = []
if repeatable:
flags.append(timeline_interaction_record.REPEATABLE)
return Interaction(self, label, flags)
def CreateGestureInteraction(self, label, repeatable=False):
""" Create an action.Interaction object that issues gesture-based
interaction record.
This is similar to normal interaction record, but it will
auto-narrow the interaction time period to only include the
synthetic gesture event output by Chrome. This is typically use to
reduce noise in gesture-based analysis (e.g., analysis for a
swipe/scroll).
The interaction record label will be prepended with 'Gesture_'.
e.g:
with action_runner.CreateGestureInteraction('Scroll-1'):
action_runner.ScrollPage()
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
Returns:
An instance of action_runner.Interaction
"""
return self.CreateInteraction('Gesture_' + label, repeatable)
def WaitForNetworkQuiescence(self, timeout_in_seconds=10):
""" Wait for network quiesence on the page.
Args:
timeout_in_seconds: maximum amount of time (seconds) to wait for network
quiesence unil raising exception.
Raises:
py_utils.TimeoutException when the timeout is reached but the page's
network is not quiet.
"""
py_utils.WaitFor(self.tab.HasReachedQuiescence, timeout_in_seconds)
def MeasureMemory(self, deterministic_mode=False):
"""Add a memory measurement to the trace being recorded.
Behaves as a no-op if tracing is not enabled.
TODO(perezju): Also behave as a no-op if tracing is enabled but
memory-infra is not.
Args:
deterministic_mode: A boolean indicating whether to attempt or not to
control the environment (force GCs, clear caches) before making the
measurement in an attempt to obtain more deterministic results.
Returns:
GUID of the generated dump if one was triggered, None otherwise.
"""
if not self.tab.browser.platform.tracing_controller.is_tracing_running:
logging.warning('Tracing is off. No memory dumps are being recorded.')
return None
if deterministic_mode:
self.Wait(_DUMP_WAIT_TIME)
self.ForceGarbageCollection()
self.Wait(_DUMP_WAIT_TIME)
dump_id = self.tab.browser.DumpMemory()
if not dump_id:
raise exceptions.StoryActionError('Unable to obtain memory dump')
return dump_id
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout_in_seconds=60):
"""Navigates to |url|.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
if urlparse.urlparse(url).scheme == 'file':
url = self._tab.browser.platform.http_server.UrlOf(url[7:])
self._RunAction(NavigateAction(
url=url,
script_to_evaluate_on_commit=script_to_evaluate_on_commit,
timeout_in_seconds=timeout_in_seconds))
def NavigateBack(self):
""" Navigate back to the previous page."""
self.ExecuteJavaScript('window.history.back()')
def WaitForNavigate(self, timeout_in_seconds_seconds=60):
start_time = time.time()
self._tab.WaitForNavigate(timeout_in_seconds_seconds)
time_left_in_seconds = (start_time + timeout_in_seconds_seconds
- time.time())
time_left_in_seconds = max(0, time_left_in_seconds)
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter(
time_left_in_seconds)
def ReloadPage(self):
"""Reloads the page."""
self._tab.ExecuteJavaScript('window.location.reload()')
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
def ExecuteJavaScript(self, *args, **kwargs):
"""Executes a given JavaScript statement. Does not return the result.
Example: runner.ExecuteJavaScript('var foo = {{ value }};', value='hi');
Args:
statement: The statement to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the statement to execute.
Additional keyword arguments provide values to be interpolated within
the statement. See telemetry.util.js_template for details.
Raises:
EvaluationException: The statement failed to execute.
"""
return self._tab.ExecuteJavaScript(*args, **kwargs)
def EvaluateJavaScript(self, *args, **kwargs):
"""Returns the result of evaluating a given JavaScript expression.
The evaluation results must be convertible to JSON. If the result
is not needed, use ExecuteJavaScript instead.
Example: runner.ExecuteJavaScript('document.location.href');
Args:
expression: The expression to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the expression to evaluate.
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
Raises:
EvaluationException: The statement expression failed to execute
or the evaluation result can not be JSON-ized.
"""
return self._tab.EvaluateJavaScript(*args, **kwargs)
def WaitForJavaScriptCondition(self, *args, **kwargs):
"""Wait for a JavaScript condition to become true.
Example: runner.WaitForJavaScriptCondition('window.foo == 10');
Args:
condition: The JavaScript condition (provided as string).
Optional keyword args:
timeout: The number in seconds to wait for the condition to become
True (default to 60).
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
"""
return self._tab.WaitForJavaScriptCondition(*args, **kwargs)
def Wait(self, seconds):
"""Wait for the number of seconds specified.
Args:
seconds: The number of seconds to wait.
"""
if not self._skip_waits:
time.sleep(seconds)
def WaitForElement(self, selector=None, text=None, element_function=None,
timeout_in_seconds=60):
"""Wait for an element to appear in the document.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
timeout_in_seconds: The timeout in seconds (default to 60).
"""
self._RunAction(WaitForElementAction(
selector=selector, text=text, element_function=element_function,
timeout_in_seconds=timeout_in_seconds))
def TapElement(self, selector=None, text=None, element_function=None):
"""Tap an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(TapAction(
selector=selector, text=text, element_function=element_function))
def ClickElement(self, selector=None, text=None, element_function=None):
"""Click an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(ClickElementAction(
selector=selector, text=text, element_function=element_function))
def DragPage(self, left_start_ratio, top_start_ratio, left_end_ratio,
top_end_ratio, speed_in_pixels_per_second=800, use_touch=False,
selector=None, text=None, element_function=None):
"""Perform a drag gesture on the page.
You should specify a start and an end point in ratios of page width and
height (see drag.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
left_end_ratio: The horizontal ending coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_end_ratio: The vertical ending coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether dragging should be done with touch input.
"""
self._RunAction(DragAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
left_end_ratio=left_end_ratio, top_end_ratio=top_end_ratio,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, selector=selector, text=text,
element_function=element_function))
def PinchPage(self, left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on the page.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is
3.0 / (window.outerWidth/window.innerWidth).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def PinchElement(self, selector=None, text=None, element_function=None,
left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on an element.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is
3.0 / (window.outerWidth/window.innerWidth).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
selector=selector, text=text, element_function=element_function,
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollPage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False,
synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
"""Perform scroll gesture on the page.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
synthetic_gesture_source: the source input device type for the
synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
"""
assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
self._RunAction(ScrollAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
def ScrollPageToElement(self, selector=None, element_function=None,
container_selector=None,
container_element_function=None,
speed_in_pixels_per_second=800):
"""Perform scroll gesture on container until an element is in view.
Both the element and the container can be specified by a CSS selector
xor a JavaScript function, provided as a string, which returns an element.
The element is required so exactly one of selector and element_function
must be provided. The container is optional so at most one of
container_selector and container_element_function can be provided.
The container defaults to document.scrollingElement or document.body if
scrollingElement is not set.
Args:
selector: A CSS selector describing the element.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
container_selector: A CSS selector describing the container element.
container_element_function: A JavaScript function (as a string) that is
used to retrieve the container element.
speed_in_pixels_per_second: Speed to scroll.
"""
self._RunAction(ScrollToElementAction(
selector=selector, element_function=element_function,
container_selector=container_selector,
container_element_function=container_element_function,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def RepeatableBrowserDrivenScroll(self, x_scroll_distance_ratio=0.0,
y_scroll_distance_ratio=0.5,
repeat_count=0,
repeat_delay_ms=250,
timeout=60,
prevent_fling=None,
speed=None):
"""Perform a browser driven repeatable scroll gesture.
The scroll gesture is driven from the browser, this is useful because the
main thread often isn't resposive but the browser process usually is, so the
delay between the scroll gestures should be consistent.
Args:
x_scroll_distance_ratio: The horizontal length of the scroll as a fraction
of the screen width.
y_scroll_distance_ratio: The vertical length of the scroll as a fraction
of the screen height.
repeat_count: The number of additional times to repeat the gesture.
repeat_delay_ms: The delay in milliseconds between each scroll gesture.
prevent_fling: Prevents a fling gesture.
speed: Swipe speed in pixels per second.
"""
self._RunAction(RepeatableScrollAction(
x_scroll_distance_ratio=x_scroll_distance_ratio,
y_scroll_distance_ratio=y_scroll_distance_ratio,
repeat_count=repeat_count,
repeat_delay_ms=repeat_delay_ms, timeout=timeout,
prevent_fling=prevent_fling, speed=speed))
def ScrollElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False,
synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
"""Perform scroll gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
synthetic_gesture_source: the source input device type for the
synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
"""
assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
self._RunAction(ScrollAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
def ScrollBouncePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the page.
This gesture scrolls the page by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the givendistance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollBounceElement(
self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the element.
This gesture scrolls on the element by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the given distance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def MouseClick(self, selector=None):
"""Mouse click the given element.
Args:
selector: A CSS selector describing the element.
"""
self._RunAction(MouseClickAction(selector=selector))
def SwipePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100, speed_in_pixels_per_second=800):
"""Perform swipe gesture on the page.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def SwipeElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100,
speed_in_pixels_per_second=800):
"""Perform swipe gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def PressKey(self, key, repeat_count=1, repeat_delay_ms=100, timeout=60):
"""Perform a key press.
Args:
key: DOM value of the pressed key (e.g. 'PageDown', see
https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key).
repeat_count: How many times the key should be pressed.
repeat_delay_ms: Delay after each keypress (including the last one) in
milliseconds.
"""
for _ in xrange(repeat_count):
self._RunAction(KeyPressAction(key, timeout=timeout))
self.Wait(repeat_delay_ms / 1000.0)
def EnterText(self, text, character_delay_ms=100, timeout=60):
"""Enter text by performing key presses.
Args:
text: The text to enter.
character_delay_ms: Delay after each keypress (including the last one) in
milliseconds.
"""
for c in text:
self.PressKey(c, repeat_delay_ms=character_delay_ms, timeout=timeout)
def LoadMedia(self, selector=None, event_timeout_in_seconds=0,
event_to_await='canplaythrough'):
"""Invokes load() on media elements and awaits an event.
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
event_timeout_in_seconds: Maximum waiting time for the event to be fired.
0 means do not wait.
event_to_await: Which event to await. For example: 'canplaythrough' or
'loadedmetadata'.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(LoadMediaAction(
selector=selector, timeout_in_seconds=event_timeout_in_seconds,
event_to_await=event_to_await))
def PlayMedia(self, selector=None,
playing_event_timeout_in_seconds=0,
ended_event_timeout_in_seconds=0):
"""Invokes the "play" action on media elements (such as video).
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
playing_event_timeout_in_seconds: Maximum waiting time for the "playing"
event (dispatched when the media begins to play) to be fired.
0 means do not wait.
ended_event_timeout_in_seconds: Maximum waiting time for the "ended"
event (dispatched when playback completes) to be fired.
0 means do not wait.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(PlayAction(
selector=selector,
playing_event_timeout_in_seconds=playing_event_timeout_in_seconds,
ended_event_timeout_in_seconds=ended_event_timeout_in_seconds))
def SeekMedia(self, seconds, selector=None, timeout_in_seconds=0,
log_time=True, label=''):
"""Performs a seek action on media elements (such as video).
Args:
seconds: The media time to seek to.
selector: A CSS selector describing the element. If none is
specified, seek the first media element on the page. If the
selector matches more than 1 media element, all of them will
be seeked.
timeout_in_seconds: Maximum waiting time for the "seeked" event
(dispatched when the seeked operation completes) to be
fired. 0 means do not wait.
log_time: Whether to log the seek time for the perf
measurement. Useful when performing multiple seek.
label: A suffix string to name the seek perf measurement.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(SeekAction(
seconds=seconds, selector=selector,
timeout_in_seconds=timeout_in_seconds,
log_time=log_time, label=label))
def LoopMedia(self, loop_count, selector=None, timeout_in_seconds=None):
"""Loops a media playback.
Args:
loop_count: The number of times to loop the playback.
selector: A CSS selector describing the element. If none is
specified, loop the first media element on the page. If the
selector matches more than 1 media element, all of them will
be looped.
timeout_in_seconds: Maximum waiting time for the looped playback to
complete. 0 means do not wait. None (the default) means to
wait loop_count * 60 seconds.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(LoopAction(
loop_count=loop_count, selector=selector,
timeout_in_seconds=timeout_in_seconds))
def ForceGarbageCollection(self):
"""Forces garbage collection on all relevant systems.
This includes:
- Java heap for browser and child subprocesses (on Android).
- JavaScript on the current renderer.
- System caches (on supported platforms).
"""
if self._tab.browser.supports_java_heap_garbage_collection:
self._tab.browser.ForceJavaHeapGarbageCollection()
self._tab.CollectGarbage()
if self._tab.browser.platform.SupportFlushEntireSystemCache():
self._tab.browser.platform.FlushEntireSystemCache()
def SimulateMemoryPressureNotification(self, pressure_level):
"""Simulate memory pressure notification.
Args:
pressure_level: 'moderate' or 'critical'.
"""
self._tab.browser.SimulateMemoryPressureNotification(pressure_level)
def PauseInteractive(self):
"""Pause the page execution and wait for terminal interaction.
This is typically used for debugging. You can use this to pause
the page execution and inspect the browser state before
continuing.
"""
raw_input("Interacting... Press Enter to continue.")
def RepaintContinuously(self, seconds):
"""Continuously repaints the visible content.
It does this by requesting animation frames until the given number
of seconds have elapsed AND at least three RAFs have been
fired. Times out after max(60, self.seconds), if less than three
RAFs were fired."""
self._RunAction(RepaintContinuouslyAction(
seconds=0 if self._skip_waits else seconds))
class Interaction(object):
def __init__(self, action_runner, label, flags):
assert action_runner
assert label
assert isinstance(flags, list)
self._action_runner = action_runner
self._label = label
self._flags = flags
self._started = False
def __enter__(self):
self.Begin()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
self.End()
else:
logging.warning(
'Exception was raised in the with statement block, the end of '
'interaction record is not marked.')
def Begin(self):
assert not self._started
self._started = True
self._action_runner.ExecuteJavaScript(
'console.time({{ marker }});',
marker=timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
def End(self):
assert self._started
self._started = False
self._action_runner.ExecuteJavaScript(
'console.timeEnd({{ marker }});',
marker=timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
|
py | 1a420fe05b51a9c02a753ca432a26f81051cd7b9 | import hassapi as hass # pylint: disable=import-error
class cover_tag_scanned(hass.Hass):
""" Opens or closes a cover based on an nfc_tag being scanned """
def initialize(self):
self.listen_event(
self.door_tag_scanned,
"tag_scanned",
tag_id=self.args["tag_id"],
)
def door_tag_scanned(self, event_name, data, kwargs):
"""Open the door if it's closed. Close the door if it's open.
Ignore the event if the door is opening or closing.
If a device list is provided, ignore the scan if it
didn't come from a device in the list.
'data' looks like this:
'data': {'tag_id': 'cae3c8c5-faac-4585-be93-a1199fa98fcd',
'device_id': 'effd5529caba2c3f'}"""
self.log(
"tag_id = " + data["tag_id"] + ". device_id = " + data["device_id"],
level="DEBUG",
)
if "devices" in self.args and data["device_id"] not in self.args["devices"]:
self.log(
"Ignoring scan from unlisted device " + data["device_id"] + ".",
level="INFO",
)
return
if self.get_state(self.args["cover_entity"]) == "open":
self.log(
"Closing garage door due to NFC tag scan by device "
+ data["device_id"]
+ ".",
level="INFO",
)
self.call_service("cover/close_cover", entity_id=self.args["cover_entity"])
elif self.get_state(self.args["cover_entity"]) == "closed":
self.log(
"Opening garage door due to NFC tag scan by device "
+ data["device_id"]
+ ".",
level="INFO",
)
self.call_service("cover/open_cover", entity_id=self.args["cover_entity"])
|
py | 1a42118f65828c7d1d9c6e7df7462c82b849a976 | import json
import time
class Vehicle:
ip = None
brand = None
model = None
vrn = None
rotates = None
gear = None
direction = None
directionAsText = None
speed = None
action = None
actionAsText = None
_lastUpdateAt = None
def update(self, ip, brand, model, vrn, rotates, gear,
direction, directionAsText, speed, action,
actionAsText, t = time.time):
self.ip = ip
self.brand = brand
self.model = model
self.vrn = vrn
self.rotates = rotates
self.gear = gear
self.direction = direction
self.directionAsText = directionAsText
self.speed = speed
self.action = action
self.actionAsText = actionAsText
self._lastUpdateAt = t()
def isReachable(self, time):
return time <= self._lastUpdateAt + 3
def echo(self, name, alt = "Zisťujem..."):
val = getattr(self, name)
if val == None:
return alt
else:
return val
def fromJson(self, data, requiresAboutMe = False):
data = json.loads(data)
if requiresAboutMe and not data.get("aboutMe"):
return None
if self == None:
self = Vehicle()
self.update(data.get("ip"),
data.get("brand"),
data.get("model"),
data.get("vrn"),
data.get("rotates"),
data.get("gear"),
data.get("direction"),
data.get("directionAsText"),
data.get("speed"),
data.get("action"),
data.get("actionAsText"))
return self
|
py | 1a4212780e69d3ba50820b85ed371494ea2976c1 | from flask_unchained import AppBundle
class AutoRouteAppBundle(AppBundle):
pass
|
py | 1a421285cfec78e51c582bb17de4438222e31ea6 | #############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""catalog package test runner
"""
import re
import unittest
import doctest
from zope.testing import module
import zope.component.testing
import zope.component.factory
import zope.component.interfaces
from zope.testing import renormalizing
import zc.catalog
from zc.catalog import index
from zc.catalog import extentcatalog
from zc.catalog import globber
from zc.catalog import catalogindex
from zc.catalog import stemmer
import zc.catalog.interfaces
import BTrees.Interfaces
import BTrees.LOBTree
import BTrees.OLBTree
import BTrees.LFBTree
class TestAbstractIndex(unittest.TestCase):
def test_family_on_cls(self):
self.assertIsInstance(index.AbstractIndex.family,
index.FamilyProperty)
def test_clear_cruft(self):
i = index.AbstractIndex()
i.__dict__['BTreeAPI'] = None
del i.__dict__['family']
self.assertIn('BTreeAPI', i.__dict__)
getattr(i, 'family')
self.assertNotIn('BTreeAPI', i.__dict__)
def test_family(self):
class Family(object):
class OO(object):
class BTree(object):
pass
IO = OO
i = index.AbstractIndex(family=Family)
self.assertIs(i.family, Family)
def test_empty_values(self):
i = index.AbstractIndex()
res = i.values(doc_id=1)
self.assertEqual((), res)
class TestValueIndex(unittest.TestCase):
def test_empty_values(self):
i = index.ValueIndex()
res = i.values(doc_id=1)
self.assertEqual((), res)
class TestSetIndex(unittest.TestCase):
def test_removed(self):
i = index.SetIndex()
i.index_doc(1, ('foo', 'bar'))
i.index_doc(1, ('foo',))
self.assertEqual(1, i.wordCount.value)
def test_appy_all_of_empty(self):
i = index.SetIndex()
res = i.apply({'all_of': ()})
self.assertEqual(len(res), 0)
class TestNormalizationWrapper(unittest.TestCase):
def test_pass_to_index(self):
i = index.SetIndex()
class Normaziler(object):
@classmethod
def value(cls, v):
return v
n = index.NormalizationWrapper(i, Normaziler)
self.assertEqual(i.documentCount(), n.documentCount())
self.assertEqual(i.wordCount(), n.wordCount())
n.clear()
n.index_doc(1, ('foo',))
self.assertEqual(i.wordCount(), n.wordCount())
self.assertEqual(n.containsValue('foo'), i.containsValue('foo'))
class TestExtent(unittest.TestCase):
def test_BTreeAPI(self):
i = extentcatalog.Extent()
self.assertIsNotNone(i.BTreeAPI)
def test_bool(self):
i = extentcatalog.Extent()
self.assertFalse(i)
i.add(1, None)
self.assertTrue(i)
self.assertEqual(1, len(i))
def test_discard_missing(self):
i = extentcatalog.Extent()
i.discard(0)
self.assertEqual(0, len(i))
def test_catalog_update(self):
from zope.interface.interfaces import ComponentLookupError
c = extentcatalog.Catalog(extentcatalog.Extent())
i = index.SetIndex()
i.__parent__ = None
self.assertRaises(ComponentLookupError, c.updateIndex, i)
class TestGlob(unittest.TestCase):
def test_bad_parse(self):
class Lexicon(object):
pass
res = globber.glob('', Lexicon())
self.assertIsNone(res)
class TestCatalogIndex(unittest.TestCase):
def test_datetimevalueindex(self):
i = catalogindex.DateTimeValueIndex(field_name='foo')
self.assertTrue(zc.catalog.interfaces.IValueIndex.providedBy(i))
def test_datetimesetindex(self):
i = catalogindex.DateTimeSetIndex(field_name='foo')
self.assertTrue(zc.catalog.interfaces.ISetIndex.providedBy(i))
@unittest.skipUnless(stemmer.broken, "Only for broken stemmers")
class TestBrokenStemmer(unittest.TestCase):
def test_broken(self):
s = stemmer.Stemmer()
self.assertIs(stemmer.broken, s.stemmer)
self.assertEqual('word', s.stemmer.stem("word"))
def setUp32bit(test):
zope.component.testing.setUp(test)
test.globs["btrees_family"] = BTrees.family32
def modSetUp32bit(test):
setUp32bit(test)
module.setUp(test, 'zc.catalog.doctest_test')
def setUp64bit(test):
zope.component.testing.setUp(test)
test.globs["btrees_family"] = BTrees.family64
def modSetUp64bit(test):
setUp64bit(test)
module.setUp(test, 'zc.catalog.doctest_test')
def tearDown(test):
zope.component.testing.tearDown(test)
def modTearDown(test):
module.tearDown(test)
zope.component.testing.tearDown(test)
def test_suite():
checker = renormalizing.RENormalizing((
(re.compile(r"<class 'BTrees."), "<type 'BTrees."),
(re.compile(r"<module 'BTrees\._"), "<module 'BTrees."),
))
tests = unittest.TestSuite((
# 32 bits
doctest.DocFileSuite(
'extentcatalog.rst', setUp=modSetUp32bit, tearDown=modTearDown),
doctest.DocFileSuite(
'setindex.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'valueindex.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'normalizedindex.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'globber.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'callablewrapper.rst', setUp=setUp32bit, tearDown=tearDown),
# 64 bits
doctest.DocFileSuite(
'extentcatalog.rst', setUp=modSetUp64bit, tearDown=modTearDown),
doctest.DocFileSuite('setindex.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('valueindex.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('normalizedindex.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('globber.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('callablewrapper.rst', setUp=setUp64bit,
tearDown=tearDown),
# legacy data support
doctest.DocFileSuite(
'legacy.rst',
optionflags=doctest.ELLIPSIS,
checker=checker),
))
if not stemmer.broken: # pragma: no cover
tests.addTest(doctest.DocFileSuite('stemmer.rst'))
tests.addTest(unittest.defaultTestLoader.loadTestsFromName(__name__))
return tests
|
py | 1a4212c9d5489e3279763f946fe9534b4b6e144f | # -*- coding: utf-8 -*-
"""Defining functions and classes for arrhythmia detection through ECG signals.
The contents of this module define functions and classes for analyzing,
visualizing, and making predictions based on data from the
MIT-BIH Arrhythmia Database.
Explore this repository at:
https://github.com/chance-alvarado/arrhythmia-detector
Author:
Chance Alvarado
LinkedIn: https://www.linkedin.com/in/chance-alvarado/
GitHub: https://github.com/chance-alvarado/
"""
# Set random seeds for reproducability
from numpy.random import seed
seed(1)
import tensorflow
tensorflow.random.set_seed(2)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import colors
from sklearn.metrics import confusion_matrix
from matplotlib.animation import FuncAnimation
from keras.models import load_model
def create_dataframe(path):
"""Alias of Pandas' read_csv without an additional import."""
df = pd.read_csv(path, header=None)
return df
def sample_dataframe(path):
"""Preview 5 rows of DataFrame."""
df_sample = pd.read_csv(path, nrows=5, header=None)
return df_sample
class DataVisualization:
"""Class for data exploration through visualization."""
def plot_setup(self, axs):
"""Set up general plot attributes."""
# Loop through all axis objects
for ax in axs:
# Set facecolor to black
ax.set_facecolor('k')
# Remove spines
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Add grid
ax.grid(linestyle='-', color='w', alpha=.2)
def save_plot(self, save_location):
"""Save plot based on user's preference."""
# Try to save plot if user speciefies a save location
if save_location:
plt.savefig(save_location, facecolor='k')
plt.close()
# Else show plot
else:
plt.show()
def label_counts(self, df):
"""Create vectors of unique labels and their counts."""
# Find target column
target = df.iloc[:, -1]
# Unique labels
unique_labels = target.unique()
# Count number of unique occurances for each label
unique_count = []
for label in unique_labels:
unique_count.append(target[target == label].count())
return unique_labels, unique_count
def class_bar(self, df, save_location=None):
"""Create bar chart for showing classs balance."""
# Collect necessary data
unique_labels, unique_count = self.label_counts(df)
# Create figure
fig, ax = plt.subplots(1, 1, figsize=(7, 4), facecolor='k')
# General plot setup
self.plot_setup([ax])
# Title
fig.suptitle('Arrhythmia Type Breakdown', c='w', fontsize=18, y=.95)
# Set proper color
ax.tick_params(colors='w')
# Add x label
ax.set_xlabel('Arrhythmia Type', c='w', fontsize=14, alpha=0.8)
# Change scale of y
ax.set_yticks(np.arange(0, sum(unique_count),
sum(unique_count)/10)
)
# Plot with glow
ax.bar(unique_labels, unique_count, width=.9, color='r', alpha=0.75)
ax.bar(unique_labels, unique_count, width=.93, color='r', alpha=0.4)
ax.bar(unique_labels, unique_count, width=.95, color='w', alpha=0.2)
# Save plot
self.save_plot(save_location)
def ecg_scatter(self, df, save_location=None):
"""Create scatter plot of 100 of each type of arrhythmia."""
# Collect necessary data
unique_labels, _ = self.label_counts(df)
target_vect = df.iloc[:, -1]
# Create figure
fig, axs = plt.subplots(nrows=5, ncols=1, figsize=(8, 12),
facecolor='k'
)
# General plot setup
self.plot_setup(axs)
# Add title
fig.suptitle('Averaged ECG Signals', c='w', fontsize=16, y=0.92)
# Iterate through all labels
for col, label in enumerate(unique_labels):
# Plot text box with arrhythmia type
axs[col].text(df.shape[1], .95,
('Arrhythmia Type: %s' % (str(int(label)))),
size=14, ha="right", va="top", c='w',
bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5),
fc='r', alpha=.7
)
)
# Scatter plot for arrhythmia
matching_rows = (target_vect == label)
for i in range(100):
# Dataframe of only relevant rows
temp_df = df.iloc[:, :-1][matching_rows].round(decimals=1)
# Data to plot
data = temp_df.iloc[i, :]
t_span = range(len(data))
# Plot iteration
axs[col].scatter(t_span, data, alpha=0.05, c='r', s=2)
# Save plot
self.save_plot(save_location)
def ecg_line(self, row, viz_type='static', save_location=None):
"""Create a line plot of an individual ecg signal."""
# Get relevant data
signal = row[:-1]
target = row.iloc[-1]
# Create figure
fig, ax = plt.subplots(1, 1, figsize=(7, 3),
facecolor='k')
# Create title
fig.suptitle('ECG Signal',
fontsize=18,
color='white',
)
# General plot setup
self.plot_setup([ax])
# Hide tick labels
ax.set_xticklabels([])
ax.set_yticklabels([])
# Add titles
ax.set_xlabel('Time', c='w', fontsize=14, alpha=0.8)
ax.set_ylabel('Amplitude', c='w', fontsize=14, alpha=0.8,)
# Plot text box with arrhythmia type
plt.text(len(signal), .95,
('Arrhythmia Type: %s' % (str(int(target)))),
size=14, ha="right", va="top", c='w',
bbox=dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc='r',
alpha=.7
)
)
# Check type
if viz_type == 'static':
# Plot with subtle glow effect
ax.plot(signal, color='r', linewidth=2, alpha=.7)
ax.plot(signal, color='r', linewidth=3, alpha=.4)
ax.plot(signal, color='w', linewidth=5, alpha=.2)
# Save plot
self.save_plot(save_location)
# Check type
elif viz_type == 'dynamic':
# Time vector
time_vect = list(range(len(signal)))
# Create line objects
line, = ax.plot(time_vect, signal, color='r',
linewidth=2, alpha=.7
)
line_g1, = ax.plot(time_vect, signal, color='r',
linewidth=3, alpha=.4
)
line_g2, = ax.plot(time_vect, signal, color='w',
linewidth=5, alpha=.2
)
# Update function
def update(num, time_vect, signal, line):
"""Define function to update plot every frame."""
# Scaling value
scaling_factor = 10
end = num*scaling_factor
if end > 100:
start = end-100
else:
start = 0
for line_obj in [line, line_g1, line_g2]:
line_obj.set_data(time_vect[start:end],
signal[start:end]
)
return [(line,), (line_g1,), (line_g2,)]
# Create animation
anim = FuncAnimation(fig, update, interval=40, frames=40,
fargs=[time_vect, signal, line]
)
# Save animation
anim.save(save_location, writer='imagemagick', fps=20,
savefig_kwargs={'facecolor': 'k', 'transparent': True})
plt.close()
class DataProcessing:
"""Class for processing ecg data before training model."""
def resample(self, num_samples, df):
"""Resample data to have 'num_samples' of each label."""
# New DataFrame
df_resample = pd.DataFrame()
# Define target vector
target = df.iloc[:, -1]
# Resample for each unique value in target
for t in target.unique():
temp_df = df[target == t].sample(num_samples, replace=True)
df_resample = pd.concat([df_resample, temp_df], ignore_index=True)
return df_resample
def shuffle(self, df):
"""Randomly shuffle data."""
df = df.sample(frac=1).reset_index(drop=True)
return df
def add_noise(self, df, noise_level=0.05):
"""Add normal noise with standard deviation 'noise_level'."""
# Get shape
rows, cols = df.shape
# Iterate through rows
for index in range(rows):
# Create new noise
noise = np.random.normal(0, 0.05, cols-1)
noise = np.append(noise, 0.)
# Add noise
df.iloc[index, :] += noise
# Keep all values between 0 and 1
for ind, val in enumerate(df.iloc[index, :-1]):
if val > 1:
df.iloc[index, ind] = 1
elif val < 0:
df.iloc[index, ind] = 0
return df
def feature_target_split(self, df):
"""Split DataFrame intto a feature matrix and target vector."""
feature_mat = df.iloc[:, :-1].to_numpy()
target_vect = df.iloc[:, -1].to_numpy()
return feature_mat, target_vect
def one_hot_encoder(self, vect):
"""One hot encode categorical numerical values given Pandas Series."""
# New target list
target_vect_enc = []
# Number of columns in encoded vector
num_cols = len(np.unique(vect))
# Iterate through each value in vector
for val in vect:
# Create vector to append
bin_vect = np.zeros(num_cols)
bin_vect[int(val)] = 1
# Append
target_vect_enc.append(bin_vect)
return np.array(target_vect_enc)
class ModelEvaluation:
"""Class for evaluation of predictive model's metrics."""
def undo_encode(self, vect):
"""Undo one hot encoding used in training and predictions."""
# New target list
unencoded_target_vect = []
# Add array index to list
for val in vect:
unencoded_target_vect.append(np.argmax(val))
return unencoded_target_vect
def import_best_model(self):
"""Import best model saved in directory."""
model = load_model('resources/model/best_model.h5')
return model
def best_parameters(self, model):
"""Print the best parameters for each layer of model."""
# Get configuration json
config = model.get_config()
# Iterate through all layers and print relevant info
for layer in config['layers']:
layer_type = layer['class_name']
if layer_type == 'Dense':
print('Dense Layer Nodes: %d' % (layer['config']['units']))
elif layer_type == 'Dropout':
print('Dropout Rate: %d' % (layer['config']['rate']))
elif layer_type == 'InputLayer':
print('Input Layer Nodes: %d'
% (layer['config']['batch_input_shape'][1])
)
def evaluate_model(self, model, test_X, test_y):
"""Evaluate model on the test data."""
acc = model.evaluate(test_X, test_y, verbose=0)[1]
print('Accuracy on testing data: ', acc)
def plot_confusion_matrix(self, model, test_X, y_true):
"""Plot confusion matrix with custom colormap."""
# List of target labels
labels = [0, 1, 2, 3, 4]
# Make predictions
y_pred = model.predict(test_X)
# Unencode target vector
y_pred = self.undo_encode(y_pred)
# Get number of samples
num_samples = len(y_pred)
# Create confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Normalize confusion matrix and round
cm_norm = np.zeros(shape=cm.shape)
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
val = round((cm[i][j] / num_samples), ndigits=2)
cm_norm[i, j] = val
# Create figure
fig, ax = plt.subplots(facecolor='k', figsize=(7, 6))
# Create black to red color gradient
# Thanks to SpghttCd on stackoverflow for this code
def NonLinCdict(steps, hexcol_array):
cdict = {'red': (), 'green': (), 'blue': ()}
for s, hexcol in zip(steps, hexcol_array):
rgb = colors.hex2color(hexcol)
cdict['red'] = cdict['red'] + ((s, rgb[0], rgb[0]),)
cdict['green'] = cdict['green'] + ((s, rgb[1], rgb[1]),)
cdict['blue'] = cdict['blue'] + ((s, rgb[2], rgb[2]),)
return cdict
hc = ['#000000', '#5b0000', '#ac0000', '#c80000', '#ff0000']
th = [0, 0.01, 0.03, 0.05, 1]
cdict = NonLinCdict(th, hc)
black_red_cmap = colors.LinearSegmentedColormap('black_red_cmap',
cdict
)
# Plot
sns.heatmap(cm_norm, annot=True, cmap=black_red_cmap,
ax=ax, fmt="g", cbar=False,
annot_kws={"size": 14},
linewidths=1, linecolor='w'
)
# Add suptitle
fig.suptitle('Confusion Matrix', c='w', y=.95, fontsize=18)
# Set axis labels
ax.set_xlabel('Predicted Arrhythmia Type', fontsize=14, c='w')
ax.set_ylabel('Actual Arrhythmia Type', fontsize=14, c='w')
# Set tick parameters
ax.tick_params(axis='both', which='major', labelsize=12, )
ax.set_xticklabels(labels=labels, color='w')
ax.set_yticklabels(labels=labels, color='w', rotation=0)
# Show plot
plt.show()
|
py | 1a4212e6b5e42e4d4d93849f18d3f81a18bdcefa | import json
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_POST
from requests import RequestException
from rootnroll import RootnRollClient
from rootnroll.constants import ServerStatus
from games.models import Game
"""
Session structure:
game_id -> {'server_id': '123...',
'terminal_id': '234...',}
"""
def _terminal_response_ok(terminal):
return JsonResponse({
'status': 'ok',
'terminal_id': terminal['id'],
'kaylee_url': terminal['config']['kaylee_url'],
})
def _terminal_response_creating():
return JsonResponse({
'status': 'creating',
})
def _terminal_response_error(info=None):
return JsonResponse({
'status': 'error',
'info': info,
})
def get_rnr_client():
return RootnRollClient(username=settings.ROOTNROLL_USERNAME,
password=settings.ROOTNROLL_PASSWORD,
api_url=settings.RNR_API_URL)
@require_POST
def terminals(request):
"""
The main API endpoint for getting a terminal.
TODO: split into several endpoints?
"""
data = json.loads(request.body.decode())
game_id = data.get('id')
game = get_object_or_404(Game, id=game_id)
rnr_client = get_rnr_client()
terminals_map = request.session.get('terminals_map', {})
game_dict = terminals_map.get(str(game_id), {})
server_id = game_dict.get('server_id')
terminal_id = game_dict.get('terminal_id')
if terminal_id:
# Active terminal exists
terminal = rnr_client.get_terminal(terminal_id)
if terminal:
return _terminal_response_ok(terminal)
if server_id:
# Server exists?
server = rnr_client.get_server(server_id)
if server and server['status'] == ServerStatus.ACTIVE:
# Server is ready, create a terminal
terminal = rnr_client.create_terminal(server)
if terminal:
game_dict['terminal_id'] = terminal['id']
terminals_map[str(game_id)] = game_dict
request.session['terminals_map'] = terminals_map
return _terminal_response_ok(terminal)
elif server and server['status'] != ServerStatus.ERROR:
# Waiting for server to come up
return _terminal_response_creating()
# Server does not exist or invalid
try:
# Compute the current number of servers
servers_count = rnr_client.list_servers().get("count")
if servers_count >= settings.SERVERS_NUMBER_HARD_LIMIT:
return _terminal_response_error("No servers available")
server = rnr_client.create_server(game.rnr_image_id)
except RequestException as e:
return _terminal_response_error()
if server and 'id' in server:
game_dict['server_id'] = server['id']
terminals_map[str(game_id)] = game_dict
request.session['terminals_map'] = terminals_map
return _terminal_response_creating()
else:
# Cannot create the server
return _terminal_response_error()
|
py | 1a421370795e72b64bdee5b1656abb6380709b89 | import random
import numpy as np
#import matplotlib.pyplot as plt
# parameters
N = 100 # No. of training points
D = 2 # 2-dimension
# area between f & g
area = 0
cnt0 = 0
for irun in range(1000):
# training data
x1, x2 = np.zeros((N, 1)), np.zeros((N, 1))
for iN in range(N):
x1[iN] = random.uniform(-1, 1)
x2[iN] = random.uniform(-1, 1)
xtrain = np.c_[np.ones((N, 1)), x1, x2]
# target function: passes through two points
x11, x21 = random.uniform(-1, 1), random.uniform(-1, 1) # 1st point
x12, x22 = random.uniform(-1, 1), random.uniform(-1, 1) # 2nd point
x0 = np.arange(-1, 1, .1) # for plotting purpose
y0 = (x22 - x21)/(x12 - x11) * (x0 - x11) + x21
# target: expected output
y = np.zeros(N)
for iN in range(N):
f = (x22 - x21)/(x12 - x11) * (x1[iN] - x11) + x21
if f < x2[iN]: y[iN] = 1
elif f > x2[iN]: y[iN] = -1
# # visualize
# plt.plot(x0, y0)
# plt.scatter(x1, x2)
# weight vector
w = np.zeros(D+1) # initially, all points mis-classified
# estimated label through Perceptron
yp = np.zeros(N) # initially all 0
cnt = 0
while np.all(yp == y) == False:
evlt = list(np.equal(yp, y))
iN = evlt.index(False)
w += y[iN] * xtrain[iN, :] # update the weight factor
yp = np.sign(w.dot(xtrain.T))
cnt += 1
# # visualize
# g0 = -(w[0] + w[1]*x0)/w[2]
# plt.plot(x0, g0)
# for i in range(N):
# plt.text(x1[i], x2[i], str(yp[i]))
# plt.pause(1)
# input('Press enter to continue')
cnt0 += cnt # No. of iterations to converge
# estimate the difference between f & g using Monte Carlo
ntest = 1000
count = 0
for itest in range(ntest):
x1test, x2test = random.uniform(-1, 1), random.uniform(-1, 1)
# target
fx2 = x21 + (x22-x21)/(x12-x11) * (x1test-x11)
if fx2 < x2test: target = 1
elif fx2 > x2test: target = -1
else: target = 0
# estimate
estimate = np.sign(w.dot([1, x1test, x2test]))
if estimate != target: count += 1
area += count / ntest
print(area / 1000)
print(cnt0 / 1000) |
py | 1a42154dca787ac5a6505a6cee3c316e2c305d26 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class SpecialistPoolServiceTransport(abc.ABC):
"""Abstract transport class for SpecialistPoolService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_specialist_pool: gapic_v1.method.wrap_method(
self.create_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.get_specialist_pool: gapic_v1.method.wrap_method(
self.get_specialist_pool, default_timeout=5.0, client_info=client_info,
),
self.list_specialist_pools: gapic_v1.method.wrap_method(
self.list_specialist_pools,
default_timeout=5.0,
client_info=client_info,
),
self.delete_specialist_pool: gapic_v1.method.wrap_method(
self.delete_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.update_specialist_pool: gapic_v1.method.wrap_method(
self.update_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
Union[
specialist_pool.SpecialistPool, Awaitable[specialist_pool.SpecialistPool]
],
]:
raise NotImplementedError()
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
Union[
specialist_pool_service.ListSpecialistPoolsResponse,
Awaitable[specialist_pool_service.ListSpecialistPoolsResponse],
],
]:
raise NotImplementedError()
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("SpecialistPoolServiceTransport",)
|
py | 1a4215e074d382e78b7759cdf16f3b8228f04a84 | #!/bin/python3
import os
import sys
from subprocess import call, run, PIPE
from getFiles import get_files, get_main
from colorama import Fore, Style
answ_linestart = 'Answer: '
def compile_java(task, output_path='.', source_path=''):
cmd = ['javac', '-d', output_path]
cmd.extend(get_files(task, source_path))
call(cmd)
def run_tests(task, out_path='out', src_path='src', test_path='test'):
compile_java(task, out_path, src_path)
for fn in os.listdir(test_path):
if fn.endswith('.theotest'):
with open(os.path.join(test_path, fn), 'r') as f:
test_case = f.read().split('\n')
answer = test_case.pop()
p = run(['java', get_main(task)], cwd=out_path, input='\n'.join(test_case) + '\n', stdout=PIPE,
universal_newlines=True)
if answer.startswith(answ_linestart):
output = str(p.stdout)[:-1]
if output == answer[len(answ_linestart):]:
print(f'{Fore.GREEN}pass {fn}{Style.RESET_ALL}')
else:
print(
f'{Fore.RED}fail {fn}: expected {answer[len(answ_linestart):]}, got {output}{Style.RESET_ALL}')
run_tests(*sys.argv[1:])
|
py | 1a42162ec426319fe48144c4e7638b6a76da94aa | # -*- coding: utf-8 -*-
from .data_context import DataContext
|
py | 1a4216b70ba8366f9720a96e52daa958cb169d47 | # Copyright 2021 Alexey Tochin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import Callable
from typing import Any, Dict, Optional
import tensorflow as tf
from tf_dataclass.get_type import get_output_type, get_input_type_dict
from tf_dataclass.modified_dataclass import is_dataclass
def unpack(value: Any, temple: Optional[type] = None) -> Any:
if temple is None:
temple = type(value)
if is_dataclass(temple):
return value.as_tuple
elif temple == tuple or (hasattr(temple, "__origin__") and temple.__origin__ == tuple):
return tuple(map(lambda sub_value, sub_temple: unpack(sub_value, sub_temple), value, temple.__args__))
else:
return value
def pack(unpacked_value: Any, temple: type) -> Any:
if is_dataclass(temple):
return temple.from_tuple(data_tuple=unpacked_value)
else:
return unpacked_value
def pack_function(func: Callable, input_type_dict: Dict[str, type], output_type: type):
"""
Returns a version of @param func where its input and outputs are replaced by their unpacked versions.
func -> pack . func . unpack
@param func: input1, input2, ... -> output
@return: input1_tuple, input2_tuple, ... -> output_tuple
"""
def dictorized_func(**kwargs):
if kwargs.keys() != input_type_dict.keys():
raise ValueError(
f"The keyword arguments set from type annotation does not coincide with actual arguments.\n"
f"From type annotations: {set(input_type_dict.keys())}\n"
f"Actual arguments: {set(kwargs.keys())}"
)
packed_arg_dict = {
arg_name: pack(unpacked_value=kwargs[arg_name], temple=type_val)
for arg_name, type_val in input_type_dict.items()
}
output = func(**packed_arg_dict)
unpacked_output = unpack(value=output, temple=output_type)
return unpacked_output
return dictorized_func
def unpack_function(packed_func: Callable, input_type_dict: Dict[str, type], output_type: type):
"""
Returns a version of @param func where its input and outputs are replaced by their unpacked versions.
func -> unpack . func . pack
@param packed_func: input1_tuple, input2_tuple, ... -> output_tuple
@return: input1, input2, ... -> output
"""
def undictorized_func(*args, **kwargs):
if args:
raise ValueError("Only keyword arguments are currently supported.")
if kwargs.keys() != input_type_dict.keys():
raise ValueError(
f"The arguments set from type annotation does not coincide with actual arguments.\n"
f"From type annotations: {set(input_type_dict.keys())}\n"
f"Actual arguments: {set(kwargs.keys())}"
)
input_kwargs = {}
for arg_name, arg_value in kwargs.items():
unpacked_arg = unpack(value=arg_value, temple=input_type_dict[arg_name])
input_kwargs[arg_name] = unpacked_arg
output_dict = packed_func(**input_kwargs)
output = pack(unpacked_value=output_dict, temple=output_type)
return output
return undictorized_func
def function(func: Callable, **kwargs) -> Callable:
"""
Modification of tensorflow.function for dataclass input/output support.
1. dataclass decorator must be imported form tf_dataclass module
2. Type hint for @parm func return type is mandatory
3. Only keword arguments for the returned function are currently supported.
4. Other arguments are the same as for tensorlfow.function
See https://github.com/alexeytochin/tf-dataclass/blob/main/README.md for further details.
@param func: the same as for tensorflow.function but requires typehints for the return type.
@param kwargs: this argumets are pathed to tensorflow.function
@return: callable object that accepts dataclass objects as input and/or output.
Only keyword arguments for the decorated function are currently supported
Example 1:
>>> from tf_dataclass import dataclass, function
>>> @dataclass
>>> class Sequential:
>>> feature: tf.Tensor # shape = [batch, length, channels], dtype = tf.float32
>>> length: tf.Tensor # shape = [batch], dtype = tf.int32
>>> input = Sequential(
>>> feature = tf.random.normal(shape=[2, 6, 3]),
>>> length = tf.constant([6, 4], dtype=tf.int32),
>>> )
>>> @function
>>> def convolution(input: Sequential, filters: tf.Tensor, stride: int) -> Sequential:
>>> return Sequential(
>>> feature = tf.nn.conv1d(input.feature, filters, stride),
>>> length = tf.math.floordiv(input.length, stride),
>>> )
>>> output = convolution(
>>> input = input,
>>> filters = tf.random.normal(shape=[1, 3, 7]),
>>> stride = 2,
>>> )
>>> assert isinstance(output, Sequential)
>>> print(output.length) # -> tf.Tensor([3 2], shape=(2,), dtype=int32)
Example 2:
>>> from typing import Tuple
>>> from tf_dataclass import dataclass, function
>>> @dataclass
>>> class MyDataclass:
>>> ...
>>> @function
>>> def my_func(...) -> Tuple[tf.Tensor, MyDataclass]:
>>> ...
>>> return some_tensor, my_dataclass_instance
"""
input_type_dict = get_input_type_dict(func)
output_type = get_output_type(func)
dictorized_func = pack_function(func, input_type_dict, output_type)
tf_func = tf.function(func=dictorized_func, **kwargs)
undictorized_func = unpack_function(tf_func, input_type_dict, output_type)
return undictorized_func
|
py | 1a4216dccc4c7cfe61ec0497e05a6f68ec974221 | # pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `utils.py`."""
from absl.testing import absltest
from dm_c19_modelling.modelling import definitions
from dm_c19_modelling.modelling.models import utils
import numpy as np
class RolloutFeaturesTest(absltest.TestCase):
def test_expected_rollout(self):
feature_names = ["feature1", definitions.SITE_ID_INTEGER, "feature2",
"feature3", definitions.WEEK_DAY_INTEGER]
target_names = ["feature3", "feature1"]
constant_features = ["feature2"]
cadence = 2
features = np.array([
# First date. Day #2.
[
# First site.
[10.1, 25., 30., 40.1, 2],
# Second site.
[10.2, 27., 30., 40.2, 2],
],
# Second date. Day #4.
[
# First site.
[11.1, 25., 30., 41.1, 4],
# Second site.
[11.2, 27., 30., 41.2, 4],
],
])
next_steps_targets = np.array([
# Third date. Day #6.
[
# First site.
[42.1, 12.1],
# Second site.
[42.2, 12.2],
],
# Fourth date. Day #8.
[
# First site.
[43.1, 13.1],
# Second site.
[43.2, 13.2],
],
])
output = utils.rollout_features_with_predictions(
features=features,
next_steps_targets=next_steps_targets,
feature_names=feature_names,
target_names=target_names,
cadence=cadence,
constant_features=constant_features)
expected_additional_features = np.array([
# Third date. Day #6.
[
# First site.
[12.1, 25., 30., 42.1, 6],
# Second site.
[12.2, 27., 30., 42.2, 6],
],
# Fourth date. Day #8.
[
# First site.
[13.1, 25., 30., 43.1, 1],
# Second site.
[13.2, 27., 30., 43.2, 1],
],
])
expected_output = np.concatenate(
[features, expected_additional_features], axis=0)
np.testing.assert_allclose(output, expected_output)
if __name__ == "__main__":
absltest.main()
|
py | 1a42175bd80b7e02bafead0a99f5fc8665c0f6b6 | import torch
import torch.nn as nn
def dice_loss(input, target):
input = torch.sigmoid(input)
smooth = 1e-5
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
def focal_loss(input,
target,
reduction='mean',
beta=0.5,
gamma=2.,
eps=1e-7,
**kwargs):
"""
Focal loss, see arXiv:1708.02002
input: [B, 1, H, W] tensor that contains predictions to compare
target: [B, 1, H, W] tensor that contains targets to compare to
reduction: one of mean, sum or none. Used to choose how loss is reduced
over batches
beta: weight in [0; 1] to give to positive targets. The higher it is, the
more true positive and false negative are important. Negative targets
have weight 1-beta
gamma: parameter that reduces the loss contribution from easy examples and
extends the range in which an example receives low loss. It also
gives more weight to misclassified examples
eps: constant used for numerical stability
return: [1] or [B] (if reduction='none') tensor containing loss between
input and target
"""
n = input.size(0)
iflat = torch.sigmoid(input).view(n, -1).clamp(eps, 1 - eps)
tflat = target.view(n, -1)
focal = -(beta * tflat * (1 - iflat).pow(gamma) * iflat.log() + (1 - beta) *
(1 - tflat) * iflat.pow(gamma) * (1 - iflat).log()).mean(-1)
if reduction == 'mean':
return focal.mean()
elif reduction == 'sum':
return focal.sum()
else:
return focal
class FocalDiceLoss(nn.Module):
"""
Weighted linear combination of focal and dice losses
a: weight of binary cross-entropy
b: weight of dice
smooth: value added to both numerator and denominator of dice to avoid
division by zero and smooth gradient around 0
beta: weight in [0; 1] to give to positive targets. The higher it is,
the more true positive and false negative are important. Negative
targets have weight 1-beta
gamma: parameter that reduces the loss contribution from easy examples
and extends the range in which an example receives low loss. It
also gives more weight to misclassified examples
reduction: one of mean, sum or none. Used to choose how loss is reduced
over batches
"""
def __init__(self,
a=0.5,
b=0.5,
smooth=1.,
beta=0.5,
gamma=2.,
reduction='mean'):
super().__init__()
self.a = a
self.b = b
self.smooth = smooth
self.beta = beta
self.gamma = gamma
self.reduction = reduction
def forward(self, input, target):
"""
input: [B, 1, H, W] tensor that contains predictions to compare
target: [B, 1, H, W] tensor that contains targets to compare to
return: [1] or [B] (if self.reduction='none') tensor containing loss
between input and target
"""
focal = focal_loss(
input,
target,
beta=self.beta,
gamma=self.gamma,
reduction=self.reduction)
dice = dice_loss(input, target)
return self.a * focal + self.b * dice
|
py | 1a42187eb7ea04b29a2c2dea31ac9d60a32342ad | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ************************************** DefaultControlMechanism ************************************************
"""
The DefaultControlMechanism is created for a `System` if no other controller type is specified. The
DefaultControlMechanism creates an `ControlSignal` for each `ControlProjection` it is assigned, and uses
`defaultControlAllocation` as the `value <ControlSignal.value>` for the ControlSignal. By default,
`defaultControlAllocation` = 1, so that ControlProjections from the DefaultControlMechanism have no effect on their
parameters. However, it can be used to uniformly control the parameters that receive ControlProjections from it,
by manually changing the value of `defaultControlAllocation`. See `ControlMechanism <ControlMechanism>` for additional
details of how ControlMechanism are created, executed and their attributes.
COMMENT:
ADD LINK FOR defaultControlAllocation
TEST FOR defaultControlAllocation: |defaultControlAllocation|
ANOTHER TEST FOR defaultControlAllocation: :py:print:`defaultControlAllocation`
AND YET ANOTHER TEST FOR defaultControlAllocation: :py:print:|defaultControlAllocation|
LINK TO DEFAULTS: :doc:`Defaults`
COMMENT
"""
import numpy as np
import typecheck as tc
from psyneulink.components.mechanisms.adaptive.control.controlmechanism import ControlMechanism
from psyneulink.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.components.states.inputstate import InputState
from psyneulink.globals.defaults import defaultControlAllocation
from psyneulink.globals.keywords import CONTROL, FUNCTION, FUNCTION_PARAMS, INPUT_STATES, INTERCEPT, MODULATION, NAME, OBJECTIVE_MECHANISM, SLOPE
from psyneulink.globals.preferences.componentpreferenceset import is_pref_set
from psyneulink.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.globals.utilities import ContentAddressableList
from psyneulink.scheduling.time import TimeScale
__all__ = [
'DefaultControlMechanism', 'DefaultControlMechanismError'
]
class DefaultControlMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
class DefaultControlMechanism(ControlMechanism):
"""Subclass of `ControlMechanism <ControlMechanism>` that implements a DefaultControlMechanism.
COMMENT:
Description:
Implements default source of control signals, with one inputState and outputState for each.
Uses defaultControlAllocation as input(s) and pass value(s) unchanged to outputState(s) and
ControlProjection(s)
Every ControlProjection is assigned this Mechanism as its sender by default (i.e., unless a sender is
explicitly specified in its constructor).
An inputState and outputState is created for each ControlProjection assigned:
the inputState is assigned the
:py:constant:`defaultControlAllocation <Defaults.defaultControlAllocation>` value;
when the DefaultControlMechanism executes, it simply assigns the same value to the ControlProjection.
Class attributes:
+ componentType (str): System Default Mechanism
+ paramClassDefaults (dict):
+ FUNCTION: Linear
COMMENT
"""
componentType = "DefaultControlMechanism"
classPreferenceLevel = PreferenceLevel.SUBTYPE
# classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TypeDefaultPreferences
# Note: only need to specify setting; level will be assigned to Type automatically
# classPreferences = {
# kwPreferenceSetName: 'DefaultControlMechanismCustomClassPreferences',
# kp<pref>: <setting>...}
from psyneulink.components.functions.function import Linear
paramClassDefaults = ControlMechanism.paramClassDefaults.copy()
paramClassDefaults.update({FUNCTION:Linear,
FUNCTION_PARAMS:{SLOPE:1, INTERCEPT:0},
OBJECTIVE_MECHANISM:None,
MODULATION:None,
})
@tc.typecheck
def __init__(self,
# default_variable=None,
# size=None,
system=None,
objective_mechanism:tc.optional(tc.any(ObjectiveMechanism, list))=None,
control_signals:tc.optional(list)=None,
params=None,
name=None,
prefs:is_pref_set=None):
super(DefaultControlMechanism, self).__init__(# default_variable=default_variable,
# size=size,
objective_mechanism=objective_mechanism,
control_signals=control_signals,
params=params,
name=name,
prefs=prefs,
context=self)
def _instantiate_input_states(self, context=None):
"""Instantiate input_value attribute
Instantiate input_states and monitored_output_states attributes (in case they are referenced)
and assign any OutputStates that project to the input_states to monitored_output_states
IMPLEMENTATION NOTE: At present, these are dummy assignments, simply to satisfy the requirements for
subclasses of ControlMechanism; in the future, an _instantiate_objective_mechanism()
method should be implemented that also implements an _instantiate_monitored_output_states
method, and that can be used to add OutputStates/Mechanisms to be monitored.
"""
if not hasattr(self, INPUT_STATES):
self._input_states = None
elif self.input_states:
for input_state in self.input_states:
for projection in input_state.path_afferents:
self.monitored_output_states.append(projection.sender)
def _instantiate_control_signal(self, control_signal, context=None):
"""Instantiate requested ControlSignal, ControlProjection and associated InputState
"""
from psyneulink.components.states.parameterstate import ParameterState
if isinstance(control_signal, dict):
if CONTROL in control_signal:
projection = control_signal[CONTROL][0]
input_name = 'DefaultControlAllocation for ' + projection.receiver.name + '_ControlSignal'
elif NAME in control_signal:
input_name = 'DefaultControlAllocation for ' + control_signal[NAME] + '_ControlSignal'
elif isinstance(control_signal, tuple):
input_name = 'DefaultControlAllocation for ' + control_signal[0] + '_ControlSignal'
elif isinstance(control_signal, ParameterState):
input_name = 'DefaultControlAllocation for ' + control_signal.name + '_ControlSignal'
else:
raise DefaultControlMechanismError("control signal ({}) was not a dict, tuple, or ParameterState".
format(control_signal))
# Instantiate input_states and allocation_policy attribute for control_signal allocations
self._instantiate_default_input_state(input_name, defaultControlAllocation, context=context)
self.allocation_policy = self.input_values
# Call super to instantiate ControlSignal
# Note: any params specified with ControlProjection for the control_signal
# should be in PARAMS entry of dict passed in control_signal arg
control_signal = super()._instantiate_control_signal(control_signal=control_signal, context=context)
def _instantiate_default_input_state(self, input_state_name, input_state_value, context=None):
"""Instantiate inputState for ControlMechanism
NOTE: This parallels ObjectMechanism._instantiate_input_state_for_monitored_state()
It is implemented here to spare having to instantiate a "dummy" (and superfluous) ObjectiveMechanism
for the sole purpose of creating input_states for each value of defaultControlAllocation to assign
to the ControlProjections.
Extend self.instance_defaults.variable by one item to accommodate new inputState
Instantiate the inputState using input_state_name and input_state_value
Update self.input_state and self.input_states
Args:
input_state_name (str):
input_state_value (2D np.array):
context:
Returns:
input_state (InputState):
"""
# First, test for initialization conditions:
# This is for generality (in case, for any subclass in the future, variable is assigned to None on init)
if self.instance_defaults.variable is None:
self.instance_defaults.variable = np.atleast_2d(input_state_value)
# If there is a single item in self.instance_defaults.variable, it could be the one assigned on initialization
# (in order to validate ``function`` and get its return value as a template for self.value);
# in that case, there should be no input_states yet, so pass
# (i.e., don't bother to extend self.instance_defaults.variable): it will be used for the new inputState
elif len(self.instance_defaults.variable) == 1:
if self.input_states:
self.instance_defaults.variable = np.append(self.instance_defaults.variable, np.atleast_2d(input_state_value), 0)
else:
# If there are no input_states, this is the usual initialization condition;
# Pass to create a new inputState that will be assigned to existing the first item of self.instance_defaults.variable
pass
# Other than on initialization (handled above), it is a PROGRAM ERROR if
# the number of input_states is not equal to the number of items in self.instance_defaults.variable
elif len(self.instance_defaults.variable) != len(self.input_states):
raise DefaultControlMechanismError(
"PROGRAM ERROR: The number of input_states ({}) does not match "
"the number of items found for the variable attribute ({}) of {}"
"when creating {}".format(
len(self.input_states),
len(self.instance_defaults.variable),
self.name,
input_state_name,
)
)
# Extend self.instance_defaults.variable to accommodate new inputState
else:
self.instance_defaults.variable = np.append(self.instance_defaults.variable, np.atleast_2d(input_state_value), 0)
variable_item_index = self.instance_defaults.variable.size-1
# Instantiate inputState
from psyneulink.components.states.state import _instantiate_state
from psyneulink.components.states.inputstate import InputState
input_state = _instantiate_state(owner=self,
state_type=InputState,
name=input_state_name,
# state_spec=defaultControlAllocation,
reference_value=np.array(self.instance_defaults.variable[variable_item_index]),
reference_value_name='Default control allocation',
params=None,
context=context)
# Update inputState and input_states
if self.input_states:
self._input_states[input_state.name] = input_state
else:
from psyneulink.components.states.state import State_Base
self._input_states = ContentAddressableList(component_type=State_Base,
list=[input_state],
name=self.name+'.input_states')
# self.input_value = [state.value for state in self.input_states]
return input_state
|
py | 1a4219a0ac2ba501097e94052000961e20e0e0de | # -*- coding: utf8 -*-
import sys
import os
import unittest
import platform
from pygame.tests.test_utils import example_path, AssertRaisesRegexMixin
import pygame
from pygame import mixer
from pygame.compat import unicode_, as_bytes, bytes_
IS_PYPY = "PyPy" == platform.python_implementation()
################################### CONSTANTS ##################################
FREQUENCIES = [11025, 22050, 44100, 48000]
SIZES = [-16, -8, 8, 16]
if pygame.get_sdl_version()[0] >= 2:
SIZES.append(32)
CHANNELS = [1, 2]
BUFFERS = [3024]
CONFIGS = [
{"frequency": f, "size": s, "channels": c}
for f in FREQUENCIES
for s in SIZES
for c in CHANNELS
]
# Using all CONFIGS fails on a Mac; probably older SDL_mixer; we could do:
# if platform.system() == 'Darwin':
# But using all CONFIGS is very slow (> 10 sec for example)
# And probably, we don't need to be so exhaustive, hence:
CONFIG = {"frequency": 22050, "size": -16, "channels": 2} # base config
if pygame.get_sdl_version()[0] >= 2:
CONFIG = {"frequency": 44100, "size": 32, "channels": 2} # base config
class InvalidBool(object):
"""To help test invalid bool values."""
__nonzero__ = None
__bool__ = None
############################## MODULE LEVEL TESTS ##############################
class MixerModuleTest(unittest.TestCase):
def tearDown(self):
mixer.quit()
mixer.pre_init(0, 0, 0, 0)
def test_init__keyword_args(self):
# note: this test used to loop over all CONFIGS, but it's very slow..
mixer.init(**CONFIG)
mixer_conf = mixer.get_init()
self.assertEqual(mixer_conf[0], CONFIG["frequency"])
# Not all "sizes" are supported on all systems, hence "abs".
self.assertEqual(abs(mixer_conf[1]), abs(CONFIG["size"]))
self.assertEqual(mixer_conf[2], CONFIG["channels"])
def test_pre_init__keyword_args(self):
# note: this test used to loop over all CONFIGS, but it's very slow..
mixer.pre_init(**CONFIG)
mixer.init()
mixer_conf = mixer.get_init()
self.assertEqual(mixer_conf[0], CONFIG["frequency"])
# Not all "sizes" are supported on all systems, hence "abs".
self.assertEqual(abs(mixer_conf[1]), abs(CONFIG["size"]))
self.assertEqual(mixer_conf[2], CONFIG["channels"])
def test_pre_init__zero_values(self):
# Ensure that argument values of 0 are replaced with
# default values. No way to check buffer size though.
mixer.pre_init(22050, -8, 1) # Non default values
mixer.pre_init(0, 0, 0) # Should reset to default values
mixer.init()
self.assertEqual(mixer.get_init(), (44100, -16, 2))
def test_init__zero_values(self):
# Ensure that argument values of 0 are replaced with
# preset values. No way to check buffer size though.
mixer.pre_init(44100, 8, 1, allowedchanges=0) # None default values
mixer.init(0, 0, 0)
self.assertEqual(mixer.get_init(), (44100, 8, 1))
@unittest.skip("SDL_mixer bug")
def test_get_init__returns_exact_values_used_for_init(self):
# fix in 1.9 - I think it's a SDL_mixer bug.
# TODO: When this bug is fixed, testing through every combination
# will be too slow so adjust as necessary, at the moment it
# breaks the loop after first failure
for init_conf in CONFIGS:
frequency, size, channels
if (frequency, size) == (22050, 16):
continue
mixer.init(frequency, size, channels)
mixer_conf = mixer.get_init()
self.assertEqual(init_conf, mixer_conf)
mixer.quit()
def test_get_init__returns_None_if_mixer_not_initialized(self):
self.assertIsNone(mixer.get_init())
def test_get_num_channels__defaults_eight_after_init(self):
mixer.init()
self.assertEqual(mixer.get_num_channels(), 8)
def test_set_num_channels(self):
mixer.init()
default_num_channels = mixer.get_num_channels()
for i in range(1, default_num_channels + 1):
mixer.set_num_channels(i)
self.assertEqual(mixer.get_num_channels(), i)
def test_quit(self):
""" get_num_channels() Should throw pygame.error if uninitialized
after mixer.quit() """
mixer.init()
mixer.quit()
self.assertRaises(pygame.error, mixer.get_num_channels)
# TODO: FIXME: appveyor fails here sometimes.
@unittest.expectedFailure
def test_sound_args(self):
def get_bytes(snd):
return snd.get_raw()
mixer.init()
sample = as_bytes("\x00\xff") * 24
wave_path = example_path(os.path.join("data", "house_lo.wav"))
uwave_path = unicode_(wave_path)
bwave_path = uwave_path.encode(sys.getfilesystemencoding())
snd = mixer.Sound(file=wave_path)
self.assertTrue(snd.get_length() > 0.5)
snd_bytes = get_bytes(snd)
self.assertTrue(len(snd_bytes) > 1000)
self.assertEqual(get_bytes(mixer.Sound(wave_path)), snd_bytes)
self.assertEqual(get_bytes(mixer.Sound(file=uwave_path)), snd_bytes)
self.assertEqual(get_bytes(mixer.Sound(uwave_path)), snd_bytes)
arg_emsg = "Sound takes either 1 positional or 1 keyword argument"
with self.assertRaises(TypeError) as cm:
mixer.Sound()
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(wave_path, buffer=sample)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(sample, file=wave_path)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(buffer=sample, file=wave_path)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(foobar=sample)
self.assertEqual(str(cm.exception), "Unrecognized keyword argument 'foobar'")
snd = mixer.Sound(wave_path, **{})
self.assertEqual(get_bytes(snd), snd_bytes)
snd = mixer.Sound(*[], **{"file": wave_path})
with self.assertRaises(TypeError) as cm:
mixer.Sound([])
self.assertEqual(str(cm.exception), "Unrecognized argument (type list)")
with self.assertRaises(TypeError) as cm:
snd = mixer.Sound(buffer=[])
emsg = "Expected object with buffer interface: got a list"
self.assertEqual(str(cm.exception), emsg)
ufake_path = unicode_("12345678")
self.assertRaises(IOError, mixer.Sound, ufake_path)
self.assertRaises(IOError, mixer.Sound, "12345678")
with self.assertRaises(TypeError) as cm:
mixer.Sound(buffer=unicode_("something"))
emsg = "Unicode object not allowed as buffer object"
self.assertEqual(str(cm.exception), emsg)
self.assertEqual(get_bytes(mixer.Sound(buffer=sample)), sample)
if type(sample) != str:
somebytes = get_bytes(mixer.Sound(sample))
# on python 2 we do not allow using string except as file name.
self.assertEqual(somebytes, sample)
self.assertEqual(get_bytes(mixer.Sound(file=bwave_path)), snd_bytes)
self.assertEqual(get_bytes(mixer.Sound(bwave_path)), snd_bytes)
snd = mixer.Sound(wave_path)
with self.assertRaises(TypeError) as cm:
mixer.Sound(wave_path, array=snd)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(buffer=sample, array=snd)
self.assertEqual(str(cm.exception), arg_emsg)
snd2 = mixer.Sound(array=snd)
self.assertEqual(snd.get_raw(), snd2.get_raw())
def test_sound_unicode(self):
"""test non-ASCII unicode path"""
mixer.init()
import shutil
ep = unicode_(example_path("data"))
temp_file = os.path.join(ep, u"你好.wav")
org_file = os.path.join(ep, u"house_lo.wav")
shutil.copy(org_file, temp_file)
try:
with open(temp_file, "rb") as f:
pass
except IOError:
raise unittest.SkipTest("the path cannot be opened")
try:
sound = mixer.Sound(temp_file)
del sound
finally:
os.remove(temp_file)
@unittest.skipIf(
os.environ.get("SDL_AUDIODRIVER") == "disk",
"this test fails without real sound card",
)
def test_array_keyword(self):
try:
from numpy import (
array,
arange,
zeros,
int8,
uint8,
int16,
uint16,
int32,
uint32,
)
except ImportError:
self.skipTest("requires numpy")
freq = 22050
format_list = [-8, 8, -16, 16]
channels_list = [1, 2]
a_lists = dict((f, []) for f in format_list)
a32u_mono = arange(0, 256, 1, uint32)
a16u_mono = a32u_mono.astype(uint16)
a8u_mono = a32u_mono.astype(uint8)
au_list_mono = [(1, a) for a in [a8u_mono, a16u_mono, a32u_mono]]
for format in format_list:
if format > 0:
a_lists[format].extend(au_list_mono)
a32s_mono = arange(-128, 128, 1, int32)
a16s_mono = a32s_mono.astype(int16)
a8s_mono = a32s_mono.astype(int8)
as_list_mono = [(1, a) for a in [a8s_mono, a16s_mono, a32s_mono]]
for format in format_list:
if format < 0:
a_lists[format].extend(as_list_mono)
a32u_stereo = zeros([a32u_mono.shape[0], 2], uint32)
a32u_stereo[:, 0] = a32u_mono
a32u_stereo[:, 1] = 255 - a32u_mono
a16u_stereo = a32u_stereo.astype(uint16)
a8u_stereo = a32u_stereo.astype(uint8)
au_list_stereo = [(2, a) for a in [a8u_stereo, a16u_stereo, a32u_stereo]]
for format in format_list:
if format > 0:
a_lists[format].extend(au_list_stereo)
a32s_stereo = zeros([a32s_mono.shape[0], 2], int32)
a32s_stereo[:, 0] = a32s_mono
a32s_stereo[:, 1] = -1 - a32s_mono
a16s_stereo = a32s_stereo.astype(int16)
a8s_stereo = a32s_stereo.astype(int8)
as_list_stereo = [(2, a) for a in [a8s_stereo, a16s_stereo, a32s_stereo]]
for format in format_list:
if format < 0:
a_lists[format].extend(as_list_stereo)
for format in format_list:
for channels in channels_list:
try:
mixer.init(freq, format, channels)
except pygame.error:
# Some formats (e.g. 16) may not be supported.
continue
try:
__, f, c = mixer.get_init()
if f != format or c != channels:
# Some formats (e.g. -8) may not be supported.
continue
for c, a in a_lists[format]:
self._test_array_argument(format, a, c == channels)
finally:
mixer.quit()
def _test_array_argument(self, format, a, test_pass):
from numpy import array, all as all_
try:
snd = mixer.Sound(array=a)
except ValueError:
if not test_pass:
return
self.fail("Raised ValueError: Format %i, dtype %s" % (format, a.dtype))
if not test_pass:
self.fail(
"Did not raise ValueError: Format %i, dtype %s" % (format, a.dtype)
)
a2 = array(snd)
a3 = a.astype(a2.dtype)
lshift = abs(format) - 8 * a.itemsize
if lshift >= 0:
# This is asymmetric with respect to downcasting.
a3 <<= lshift
self.assertTrue(all_(a2 == a3), "Format %i, dtype %s" % (format, a.dtype))
def _test_array_interface_fail(self, a):
self.assertRaises(ValueError, mixer.Sound, array=a)
def test_array_interface(self):
mixer.init(22050, -16, 1, allowedchanges=0)
snd = mixer.Sound(buffer=as_bytes("\x00\x7f") * 20)
d = snd.__array_interface__
self.assertTrue(isinstance(d, dict))
if pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN:
typestr = "<i2"
else:
typestr = ">i2"
self.assertEqual(d["typestr"], typestr)
self.assertEqual(d["shape"], (20,))
self.assertEqual(d["strides"], (2,))
self.assertEqual(d["data"], (snd._samples_address, False))
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
def test_newbuf__one_channel(self):
mixer.init(22050, -16, 1)
self._NEWBUF_export_check()
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
def test_newbuf__twho_channel(self):
mixer.init(22050, -16, 2)
self._NEWBUF_export_check()
def _NEWBUF_export_check(self):
freq, fmt, channels = mixer.get_init()
ndim = 1 if (channels == 1) else 2
itemsize = abs(fmt) // 8
formats = {
8: "B",
-8: "b",
16: "=H",
-16: "=h",
32: "=I",
-32: "=i", # 32 and 64 for future consideration
64: "=Q",
-64: "=q",
}
format = formats[fmt]
from pygame.tests.test_utils import buftools
Exporter = buftools.Exporter
Importer = buftools.Importer
is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
fsys, frev = ("<", ">") if is_lil_endian else (">", "<")
shape = (10, channels)[:ndim]
strides = (channels * itemsize, itemsize)[2 - ndim :]
exp = Exporter(shape, format=frev + "i")
snd = mixer.Sound(array=exp)
buflen = len(exp) * itemsize * channels
imp = Importer(snd, buftools.PyBUF_SIMPLE)
self.assertEqual(imp.ndim, 0)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertTrue(imp.shape is None)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_WRITABLE)
self.assertEqual(imp.ndim, 0)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertTrue(imp.shape is None)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_FORMAT)
self.assertEqual(imp.ndim, 0)
self.assertEqual(imp.format, format)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertTrue(imp.shape is None)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_ND)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertEqual(imp.shape, shape)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_STRIDES)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertEqual(imp.shape, shape)
self.assertEqual(imp.strides, strides)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_FULL_RO)
self.assertEqual(imp.ndim, ndim)
self.assertEqual(imp.format, format)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, 2)
self.assertEqual(imp.shape, shape)
self.assertEqual(imp.strides, strides)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_FULL_RO)
self.assertEqual(imp.ndim, ndim)
self.assertEqual(imp.format, format)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertEqual(imp.shape, exp.shape)
self.assertEqual(imp.strides, strides)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_C_CONTIGUOUS)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.strides, strides)
imp = Importer(snd, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.strides, strides)
if ndim == 1:
imp = Importer(snd, buftools.PyBUF_F_CONTIGUOUS)
self.assertEqual(imp.ndim, 1)
self.assertTrue(imp.format is None)
self.assertEqual(imp.strides, strides)
else:
self.assertRaises(BufferError, Importer, snd, buftools.PyBUF_F_CONTIGUOUS)
def todo_test_fadeout(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.fadeout:
# pygame.mixer.fadeout(time): return None
# fade out the volume on all sounds before stopping
#
# This will fade out the volume on all active channels over the time
# argument in milliseconds. After the sound is muted the playback will
# stop.
#
self.fail()
def todo_test_find_channel(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.find_channel:
# pygame.mixer.find_channel(force=False): return Channel
# find an unused channel
#
# This will find and return an inactive Channel object. If there are
# no inactive Channels this function will return None. If there are no
# inactive channels and the force argument is True, this will find the
# Channel with the longest running Sound and return it.
#
# If the mixer has reserved channels from pygame.mixer.set_reserved()
# then those channels will not be returned here.
#
self.fail()
def todo_test_get_busy(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.get_busy:
# pygame.mixer.get_busy(): return bool
# test if any sound is being mixed
#
# Returns True if the mixer is busy mixing any channels. If the mixer
# is idle then this return False.
#
self.fail()
def todo_test_pause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.pause:
# pygame.mixer.pause(): return None
# temporarily stop playback of all sound channels
#
# This will temporarily stop all playback on the active mixer
# channels. The playback can later be resumed with
# pygame.mixer.unpause()
#
self.fail()
def todo_test_set_reserved(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.set_reserved:
# pygame.mixer.set_reserved(count): return None
# reserve channels from being automatically used
#
# The mixer can reserve any number of channels that will not be
# automatically selected for playback by Sounds. If sounds are
# currently playing on the reserved channels they will not be stopped.
#
# This allows the application to reserve a specific number of channels
# for important sounds that must not be dropped or have a guaranteed
# channel to play on.
#
self.fail()
def todo_test_stop(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.stop:
# pygame.mixer.stop(): return None
# stop playback of all sound channels
#
# This will stop all playback of all active mixer channels.
self.fail()
def todo_test_unpause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.unpause:
# pygame.mixer.unpause(): return None
# resume paused playback of sound channels
#
# This will resume all active sound channels after they have been paused.
self.fail()
def test_get_sdl_mixer_version(self):
"""Ensures get_sdl_mixer_version works correctly with no args."""
expected_length = 3
expected_type = tuple
expected_item_type = int
version = pygame.mixer.get_sdl_mixer_version()
self.assertIsInstance(version, expected_type)
self.assertEqual(len(version), expected_length)
for item in version:
self.assertIsInstance(item, expected_item_type)
def test_get_sdl_mixer_version__args(self):
"""Ensures get_sdl_mixer_version works correctly using args."""
expected_length = 3
expected_type = tuple
expected_item_type = int
for value in (True, False):
version = pygame.mixer.get_sdl_mixer_version(value)
self.assertIsInstance(version, expected_type)
self.assertEqual(len(version), expected_length)
for item in version:
self.assertIsInstance(item, expected_item_type)
def test_get_sdl_mixer_version__kwargs(self):
"""Ensures get_sdl_mixer_version works correctly using kwargs."""
expected_length = 3
expected_type = tuple
expected_item_type = int
for value in (True, False):
version = pygame.mixer.get_sdl_mixer_version(linked=value)
self.assertIsInstance(version, expected_type)
self.assertEqual(len(version), expected_length)
for item in version:
self.assertIsInstance(item, expected_item_type)
def test_get_sdl_mixer_version__invalid_args_kwargs(self):
"""Ensures get_sdl_mixer_version handles invalid args and kwargs."""
invalid_bool = InvalidBool()
with self.assertRaises(TypeError):
version = pygame.mixer.get_sdl_mixer_version(invalid_bool)
with self.assertRaises(TypeError):
version = pygame.mixer.get_sdl_mixer_version(linked=invalid_bool)
def test_get_sdl_mixer_version__linked_equals_compiled(self):
"""Ensures get_sdl_mixer_version's linked/compiled versions are equal.
"""
linked_version = pygame.mixer.get_sdl_mixer_version(linked=True)
complied_version = pygame.mixer.get_sdl_mixer_version(linked=False)
self.assertTupleEqual(linked_version, complied_version)
############################## CHANNEL CLASS TESTS #############################
class ChannelTypeTest(AssertRaisesRegexMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
# Initializing the mixer is slow, so minimize the times it is called.
mixer.init()
@classmethod
def tearDownClass(cls):
mixer.quit()
def setUp(cls):
# This makes sure the mixer is always initialized before each test (in
# case a test calls pygame.mixer.quit()).
if mixer.get_init() is None:
mixer.init()
def test_channel(self):
"""Ensure Channel() creation works."""
channel = mixer.Channel(0)
self.assertIsInstance(channel, mixer.ChannelType)
self.assertEqual(channel.__class__.__name__, "Channel")
def test_channel__without_arg(self):
"""Ensure exception for Channel() creation with no argument."""
with self.assertRaises(TypeError):
mixer.Channel()
def test_channel__invalid_id(self):
"""Ensure exception for Channel() creation with an invalid id."""
with self.assertRaises(IndexError):
mixer.Channel(-1)
def test_channel__before_init(self):
"""Ensure exception for Channel() creation with non-init mixer."""
mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
mixer.Channel(0)
def todo_test_fadeout(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.fadeout:
# Channel.fadeout(time): return None
# stop playback after fading channel out
#
# Stop playback of a channel after fading out the sound over the given
# time argument in milliseconds.
#
self.fail()
def test_get_busy(self):
"""Ensure an idle channel's busy state is correct."""
expected_busy = False
channel = mixer.Channel(0)
busy = channel.get_busy()
self.assertEqual(busy, expected_busy)
def todo_test_get_busy__active(self):
"""Ensure an active channel's busy state is correct."""
self.fail()
def todo_test_get_endevent(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_endevent:
# Channel.get_endevent(): return type
# get the event a channel sends when playback stops
#
# Returns the event type to be sent every time the Channel finishes
# playback of a Sound. If there is no endevent the function returns
# pygame.NOEVENT.
#
self.fail()
def todo_test_get_queue(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_queue:
# Channel.get_queue(): return Sound
# return any Sound that is queued
#
# If a Sound is already queued on this channel it will be returned.
# Once the queued sound begins playback it will no longer be on the
# queue.
#
self.fail()
def todo_test_get_sound(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_sound:
# Channel.get_sound(): return Sound
# get the currently playing Sound
#
# Return the actual Sound object currently playing on this channel. If
# the channel is idle None is returned.
#
self.fail()
def test_get_volume(self):
"""Ensure a channel's volume can be retrieved."""
expected_volume = 1.0 # default
channel = mixer.Channel(0)
volume = channel.get_volume()
self.assertAlmostEqual(volume, expected_volume)
def todo_test_get_volume__while_playing(self):
"""Ensure a channel's volume can be retrieved while playing."""
self.fail()
def todo_test_pause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.pause:
# Channel.pause(): return None
# temporarily stop playback of a channel
#
# Temporarily stop the playback of sound on a channel. It can be
# resumed at a later time with Channel.unpause()
#
self.fail()
def todo_test_play(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.play:
# Channel.play(Sound, loops=0, maxtime=0, fade_ms=0): return None
# play a Sound on a specific Channel
#
# This will begin playback of a Sound on a specific Channel. If the
# Channel is currently playing any other Sound it will be stopped.
#
# The loops argument has the same meaning as in Sound.play(): it is
# the number of times to repeat the sound after the first time. If it
# is 3, the sound will be played 4 times (the first time, then three
# more). If loops is -1 then the playback will repeat indefinitely.
#
# As in Sound.play(), the maxtime argument can be used to stop
# playback of the Sound after a given number of milliseconds.
#
# As in Sound.play(), the fade_ms argument can be used fade in the sound.
self.fail()
def todo_test_queue(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.queue:
# Channel.queue(Sound): return None
# queue a Sound object to follow the current
#
# When a Sound is queued on a Channel, it will begin playing
# immediately after the current Sound is finished. Each channel can
# only have a single Sound queued at a time. The queued Sound will
# only play if the current playback finished automatically. It is
# cleared on any other call to Channel.stop() or Channel.play().
#
# If there is no sound actively playing on the Channel then the Sound
# will begin playing immediately.
#
self.fail()
def todo_test_set_endevent(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.set_endevent:
# Channel.set_endevent(): return None
# Channel.set_endevent(type): return None
# have the channel send an event when playback stops
#
# When an endevent is set for a channel, it will send an event to the
# pygame queue every time a sound finishes playing on that channel
# (not just the first time). Use pygame.event.get() to retrieve the
# endevent once it's sent.
#
# Note that if you called Sound.play(n) or Channel.play(sound,n), the
# end event is sent only once: after the sound has been played "n+1"
# times (see the documentation of Sound.play).
#
# If Channel.stop() or Channel.play() is called while the sound was
# still playing, the event will be posted immediately.
#
# The type argument will be the event id sent to the queue. This can
# be any valid event type, but a good choice would be a value between
# pygame.locals.USEREVENT and pygame.locals.NUMEVENTS. If no type
# argument is given then the Channel will stop sending endevents.
#
self.fail()
def todo_test_set_volume(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.set_volume:
# Channel.set_volume(value): return None
# Channel.set_volume(left, right): return None
# set the volume of a playing channel
#
# Set the volume (loudness) of a playing sound. When a channel starts
# to play its volume value is reset. This only affects the current
# sound. The value argument is between 0.0 and 1.0.
#
# If one argument is passed, it will be the volume of both speakers.
# If two arguments are passed and the mixer is in stereo mode, the
# first argument will be the volume of the left speaker and the second
# will be the volume of the right speaker. (If the second argument is
# None, the first argument will be the volume of both speakers.)
#
# If the channel is playing a Sound on which set_volume() has also
# been called, both calls are taken into account. For example:
#
# sound = pygame.mixer.Sound("s.wav")
# channel = s.play() # Sound plays at full volume by default
# sound.set_volume(0.9) # Now plays at 90% of full volume.
# sound.set_volume(0.6) # Now plays at 60% (previous value replaced).
# channel.set_volume(0.5) # Now plays at 30% (0.6 * 0.5).
self.fail()
def todo_test_stop(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.stop:
# Channel.stop(): return None
# stop playback on a Channel
#
# Stop sound playback on a channel. After playback is stopped the
# channel becomes available for new Sounds to play on it.
#
self.fail()
def todo_test_unpause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.unpause:
# Channel.unpause(): return None
# resume pause playback of a channel
#
# Resume the playback on a paused channel.
self.fail()
############################### SOUND CLASS TESTS ##############################
class SoundTypeTest(AssertRaisesRegexMixin, unittest.TestCase):
@classmethod
def tearDownClass(cls):
mixer.quit()
def setUp(cls):
# This makes sure the mixer is always initialized before each test (in
# case a test calls pygame.mixer.quit()).
if mixer.get_init() is None:
mixer.init()
# See MixerModuleTest's methods test_sound_args(), test_sound_unicode(),
# and test_array_keyword() for additional testing of Sound() creation.
def test_sound(self):
"""Ensure Sound() creation with a filename works."""
filename = example_path(os.path.join("data", "house_lo.wav"))
sound1 = mixer.Sound(filename)
sound2 = mixer.Sound(file=filename)
self.assertIsInstance(sound1, mixer.Sound)
self.assertIsInstance(sound2, mixer.Sound)
def test_sound__from_file_object(self):
"""Ensure Sound() creation with a file object works."""
filename = example_path(os.path.join("data", "house_lo.wav"))
# Using 'with' ensures the file is closed even if test fails.
with open(filename, "rb") as file_obj:
sound = mixer.Sound(file_obj)
self.assertIsInstance(sound, mixer.Sound)
def test_sound__from_sound_object(self):
"""Ensure Sound() creation with a Sound() object works."""
filename = example_path(os.path.join("data", "house_lo.wav"))
sound_obj = mixer.Sound(file=filename)
sound = mixer.Sound(sound_obj)
self.assertIsInstance(sound, mixer.Sound)
def todo_test_sound__from_buffer(self):
"""Ensure Sound() creation with a buffer works."""
self.fail()
def todo_test_sound__from_array(self):
"""Ensure Sound() creation with an array works."""
self.fail()
def test_sound__without_arg(self):
"""Ensure exception raised for Sound() creation with no argument."""
with self.assertRaises(TypeError):
mixer.Sound()
def test_sound__before_init(self):
"""Ensure exception raised for Sound() creation with non-init mixer."""
mixer.quit()
filename = example_path(os.path.join("data", "house_lo.wav"))
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
mixer.Sound(file=filename)
@unittest.skipIf(IS_PYPY, "pypy skip")
def test_samples_address(self):
"""Test the _samples_address getter."""
try:
from ctypes import pythonapi, c_void_p, py_object
try:
Bytes_FromString = pythonapi.PyBytes_FromString # python 3
except:
Bytes_FromString = pythonapi.PyString_FromString # python 2
Bytes_FromString.restype = c_void_p
Bytes_FromString.argtypes = [py_object]
samples = as_bytes("abcdefgh") # keep byte size a multiple of 4
sample_bytes = Bytes_FromString(samples)
snd = mixer.Sound(buffer=samples)
self.assertNotEqual(snd._samples_address, sample_bytes)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
snd._samples_address
def todo_test_fadeout(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Sound.fadeout:
# Sound.fadeout(time): return None
# stop sound playback after fading out
#
# This will stop playback of the sound after fading it out over the
# time argument in milliseconds. The Sound will fade and stop on all
# actively playing channels.
#
self.fail()
def test_get_length(self):
"""Tests if get_length returns a correct length."""
try:
for size in SIZES:
pygame.mixer.quit()
pygame.mixer.init(size=size)
filename = example_path(os.path.join("data", "punch.wav"))
sound = mixer.Sound(file=filename)
# The sound data is in the mixer output format. So dividing the
# length of the raw sound data by the mixer settings gives
# the expected length of the sound.
sound_bytes = sound.get_raw()
mix_freq, mix_bits, mix_channels = pygame.mixer.get_init()
mix_bytes = abs(mix_bits) / 8
expected_length = float(len(sound_bytes)) / mix_freq / mix_bytes / mix_channels
self.assertAlmostEqual(expected_length, sound.get_length())
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.get_length()
def test_get_num_channels(self):
"""
Tests if Sound.get_num_channels returns the correct number
of channels playing a specific sound.
"""
try:
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
self.assertEqual(sound.get_num_channels(), 0)
sound.play()
self.assertEqual(sound.get_num_channels(), 1)
sound.play()
self.assertEqual(sound.get_num_channels(), 2)
sound.stop()
self.assertEqual(sound.get_num_channels(), 0)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.get_num_channels()
def test_get_volume(self):
"""Ensure a sound's volume can be retrieved."""
try:
expected_volume = 1.0 # default
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
volume = sound.get_volume()
self.assertAlmostEqual(volume, expected_volume)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.get_volume()
def todo_test_get_volume__while_playing(self):
"""Ensure a sound's volume can be retrieved while playing."""
self.fail()
def todo_test_play(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Sound.play:
# Sound.play(loops=0, maxtime=0, fade_ms=0): return Channel
# begin sound playback
#
# Begin playback of the Sound (i.e., on the computer's speakers) on an
# available Channel. This will forcibly select a Channel, so playback
# may cut off a currently playing sound if necessary.
#
# The loops argument controls how many times the sample will be
# repeated after being played the first time. A value of 5 means that
# the sound will be played once, then repeated five times, and so is
# played a total of six times. The default value (zero) means the
# Sound is not repeated, and so is only played once. If loops is set
# to -1 the Sound will loop indefinitely (though you can still call
# stop() to stop it).
#
# The maxtime argument can be used to stop playback after a given
# number of milliseconds.
#
# The fade_ms argument will make the sound start playing at 0 volume
# and fade up to full volume over the time given. The sample may end
# before the fade-in is complete.
#
# This returns the Channel object for the channel that was selected.
self.fail()
def test_set_volume(self):
"""Ensure a sound's volume can be set."""
try:
float_delta = 1.0 / 128 # SDL volume range is 0 to 128
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
current_volume = sound.get_volume()
# (volume_set_value : expected_volume)
volumes = (
(-1, current_volume), # value < 0 won't change volume
(0, 0.0),
(0.01, 0.01),
(0.1, 0.1),
(0.5, 0.5),
(0.9, 0.9),
(0.99, 0.99),
(1, 1.0),
(1.1, 1.0),
(2.0, 1.0),
)
for volume_set_value, expected_volume in volumes:
sound.set_volume(volume_set_value)
self.assertAlmostEqual(
sound.get_volume(), expected_volume, delta=float_delta
)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.set_volume(1)
def todo_test_set_volume__while_playing(self):
"""Ensure a sound's volume can be set while playing."""
self.fail()
def test_stop(self):
"""Ensure stop can be called while not playing a sound."""
try:
expected_channels = 0
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
sound.stop()
self.assertEqual(sound.get_num_channels(), expected_channels)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.stop()
def todo_test_stop__while_playing(self):
"""Ensure stop stops a playing sound."""
self.fail()
def test_get_raw(self):
"""Ensure get_raw returns the correct bytestring."""
try:
samples = as_bytes("abcdefgh") # keep byte size a multiple of 4
snd = mixer.Sound(buffer=samples)
raw = snd.get_raw()
self.assertIsInstance(raw, bytes_)
self.assertEqual(raw, samples)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
snd.get_raw()
##################################### MAIN #####################################
if __name__ == "__main__":
unittest.main()
|
py | 1a421bba515f9ae03ee43a2c586e2cdd3b7e6ead | # coding=utf-8
__author__ = 'lxn3032'
import os
import requests
import time
import warnings
import threading
import atexit
from airtest.core.api import connect_device, device as current_device
from airtest.core.android.ime import YosemiteIme
from hrpc.client import RpcClient
from hrpc.transport.http import HttpTransport
from poco.pocofw import Poco
from poco.agent import PocoAgent
from poco.sdk.Attributor import Attributor
from poco.sdk.interfaces.screen import ScreenInterface
from poco.utils.hrpc.hierarchy import RemotePocoHierarchy
from poco.utils.airtest.input import AirtestInput
from poco.utils import six
from poco.drivers.android.utils.installation import install, uninstall
__all__ = ['AndroidUiautomationPoco', 'AndroidUiautomationHelper']
this_dir = os.path.dirname(os.path.realpath(__file__))
PocoServicePackage = 'com.netease.open.pocoservice'
PocoServicePackageTest = 'com.netease.open.pocoservice.test'
class AndroidRpcClient(RpcClient):
def __init__(self, endpoint):
self.endpoint = endpoint
super(AndroidRpcClient, self).__init__(HttpTransport)
def initialize_transport(self):
return HttpTransport(self.endpoint, self)
# deprecated
class AttributorWrapper(Attributor):
"""
部分手机上仍不支持Accessibility.ACTION_SET_TEXT,使用YosemiteIme还是兼容性最好的方案
这个class会hook住set_text,然后改用ime的text方法
"""
def __init__(self, remote, ime):
self.remote = remote
self.ime = ime
def getAttr(self, node, attrName):
return self.remote.getAttr(node, attrName)
def setAttr(self, node, attrName, attrVal):
if attrName == 'text' and attrVal != '':
# 先清除了再设置,虽然这样不如直接用ime的方法好,但是也能凑合用着
current_val = self.remote.getAttr(node, 'text')
if current_val:
self.remote.setAttr(node, 'text', '')
self.ime.text(attrVal)
else:
self.remote.setAttr(node, attrName, attrVal)
class ScreenWrapper(ScreenInterface):
def __init__(self, screen):
super(ScreenWrapper, self).__init__()
self.screen = screen
def getScreen(self, width):
# Android上PocoService的实现为仅返回b64编码的图像,格式固定位jpg
b64img = self.screen.getScreen(width)
return b64img, 'jpg'
def getPortSize(self):
return self.screen.getPortSize()
class AndroidPocoAgent(PocoAgent):
def __init__(self, endpoint, ime, use_airtest_input=False):
self.client = AndroidRpcClient(endpoint)
remote_poco = self.client.remote('poco-uiautomation-framework')
dumper = remote_poco.dumper
selector = remote_poco.selector
attributor = remote_poco.attributor
hierarchy = RemotePocoHierarchy(dumper, selector, attributor)
if use_airtest_input:
inputer = AirtestInput()
else:
inputer = remote_poco.inputer
super(AndroidPocoAgent, self).__init__(hierarchy, inputer, ScreenWrapper(remote_poco.screen), None)
def on_bind_driver(self, driver):
super(AndroidPocoAgent, self).on_bind_driver(driver)
if isinstance(self.input, AirtestInput):
self.input.add_preaction_cb(driver)
class KeepRunningInstrumentationThread(threading.Thread):
"""Keep pocoservice running"""
def __init__(self, poco, port_to_ping):
super(KeepRunningInstrumentationThread, self).__init__()
self._stop_event = threading.Event()
self.poco = poco
self.port_to_ping = port_to_ping
self.daemon = True
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def run(self):
while not self.stopped():
if getattr(self.poco, "_instrument_proc", None) is not None:
stdout, stderr = self.poco._instrument_proc.communicate()
print('[pocoservice.apk] stdout: {}'.format(stdout))
print('[pocoservice.apk] stderr: {}'.format(stderr))
if not self.stopped():
self.poco._start_instrument(self.port_to_ping) # 尝试重启
time.sleep(1)
class AndroidUiautomationPoco(Poco):
"""
Poco Android implementation for testing **Android native apps**.
Args:
device (:py:obj:`Device`): :py:obj:`airtest.core.device.Device` instance provided by ``airtest``. leave the
parameter default and the default device will be chosen. more details refer to ``airtest doc``
using_proxy (:py:obj:`bool`): whether use adb forward to connect the Android device or not
force_restart (:py:obj:`bool`): whether always restart the poco-service-demo running on Android device or not
options: see :py:class:`poco.pocofw.Poco`
Examples:
The simplest way to initialize AndroidUiautomationPoco instance and no matter your device network status::
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
poco = AndroidUiautomationPoco()
poco('android:id/title').click()
...
"""
def __init__(self, device=None, using_proxy=True, force_restart=False, use_airtest_input=False, **options):
# 加这个参数为了不在最新的pocounit方案中每步都截图
self.screenshot_each_action = True
if options.get('screenshot_each_action') is False:
self.screenshot_each_action = False
self.device = device or current_device()
if not self.device:
self.device = connect_device("Android:///")
self.adb_client = self.device.adb
if using_proxy:
self.device_ip = self.adb_client.host or "127.0.0.1"
else:
self.device_ip = self.device.get_ip_address()
# save current top activity (@nullable)
current_top_activity_package = self.device.get_top_activity_name()
if current_top_activity_package is not None:
current_top_activity_package = current_top_activity_package.split('/')[0]
# install ime
self.ime = YosemiteIme(self.adb_client)
self.ime.start()
# install
self._instrument_proc = None
self._install_service()
# forward
if using_proxy:
p0, _ = self.adb_client.setup_forward("tcp:10080")
p1, _ = self.adb_client.setup_forward("tcp:10081")
else:
p0 = 10080
p1 = 10081
# start
if self._is_running('com.github.uiautomator'):
warnings.warn('{} should not run together with "uiautomator". "uiautomator" will be killed.'
.format(self.__class__.__name__))
self.adb_client.shell(['am', 'force-stop', 'com.github.uiautomator'])
ready = self._start_instrument(p0, force_restart=force_restart)
if not ready:
# 启动失败则需要卸载再重启,instrument的奇怪之处
uninstall(self.adb_client, PocoServicePackage)
self._install_service()
ready = self._start_instrument(p0)
if current_top_activity_package is not None:
current_top_activity2 = self.device.get_top_activity_name()
if current_top_activity2 is None or current_top_activity_package not in current_top_activity2:
self.device.start_app(current_top_activity_package, activity=True)
if not ready:
raise RuntimeError("unable to launch AndroidUiautomationPoco")
if ready:
# 首次启动成功后,在后台线程里监控这个进程的状态,保持让它不退出
self._keep_running_thread = KeepRunningInstrumentationThread(self, p0)
self._keep_running_thread.start()
endpoint = "http://{}:{}".format(self.device_ip, p1)
agent = AndroidPocoAgent(endpoint, self.ime, use_airtest_input)
super(AndroidUiautomationPoco, self).__init__(agent, **options)
def _install_service(self):
updated = install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug.apk'))
install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug-androidTest.apk'), updated)
return updated
def _is_running(self, package_name):
processes = self.adb_client.shell(['ps']).splitlines()
for ps in processes:
ps = ps.strip()
if ps.endswith(package_name):
return True
return False
def _start_instrument(self, port_to_ping, force_restart=False):
if not force_restart:
try:
state = requests.get('http://{}:{}/uiautomation/connectionState'.format(self.device_ip, port_to_ping),
timeout=10)
state = state.json()
if state.get('connected'):
# skip starting instrumentation if UiAutomation Service already connected.
return True
except:
pass
if self._instrument_proc is not None:
if self._instrument_proc.poll() is None:
self._instrument_proc.kill()
self._instrument_proc = None
ready = False
self.adb_client.shell(['am', 'force-stop', PocoServicePackage])
# 启动instrument之前,先把主类activity启动起来,不然instrumentation可能失败
self.adb_client.shell('am start -n {}/.TestActivity'.format(PocoServicePackage))
instrumentation_cmd = [
'am', 'instrument', '-w', '-e', 'debug', 'false', '-e', 'class',
'{}.InstrumentedTestAsLauncher'.format(PocoServicePackage),
'{}.test/android.support.test.runner.AndroidJUnitRunner'.format(PocoServicePackage)]
self._instrument_proc = self.adb_client.start_shell(instrumentation_cmd)
def cleanup_proc(proc):
def wrapped():
try:
proc.kill()
except:
pass
return wrapped
atexit.register(cleanup_proc(self._instrument_proc))
time.sleep(2)
for i in range(10):
try:
requests.get('http://{}:{}'.format(self.device_ip, port_to_ping), timeout=10)
ready = True
break
except requests.exceptions.Timeout:
break
except requests.exceptions.ConnectionError:
if self._instrument_proc.poll() is not None:
warnings.warn("[pocoservice.apk] instrumentation test server process is no longer alive")
stdout = self._instrument_proc.stdout.read()
stderr = self._instrument_proc.stderr.read()
print('[pocoservice.apk] stdout: {}'.format(stdout))
print('[pocoservice.apk] stderr: {}'.format(stderr))
time.sleep(1)
print("still waiting for uiautomation ready.")
continue
return ready
def on_pre_action(self, action, ui, args):
if self.screenshot_each_action:
# airteset log用
from airtest.core.api import snapshot
msg = repr(ui)
if not isinstance(msg, six.text_type):
msg = msg.decode('utf-8')
snapshot(msg=msg)
def stop_running(self):
print('[pocoservice.apk] stopping PocoService')
self._keep_running_thread.stop()
self._keep_running_thread.join(3)
self.adb_client.shell(['am', 'force-stop', PocoServicePackage])
class AndroidUiautomationHelper(object):
_nuis = {}
@classmethod
def get_instance(cls, device):
"""
This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and stored.
Args:
device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``
Returns:
poco instance
"""
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device]
|
py | 1a421cc6fa60e40abbcbb2759d75398420057d52 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from test_framework.test_framework import RavenTestFramework
from test_framework.util import satoshi_round, assert_raises_rpc_error, assert_equal, Decimal
from test_framework.script import CScript
from test_framework.mininode import COIN, CTransaction, CTxIn, COutPoint, CTxOut
MAX_REPLACEMENT_LIMIT = 100
def tx_to_hex(tx):
return tx.serialize().hex()
def make_utxo(node, amount, confirmed=True, script_pub_key=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1 * COIN
while node.getbalance() < satoshi_round((amount + fee) / COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, script_pub_key)]
tx2.rehash()
signed_tx = node.signrawtransaction(tx_to_hex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert (new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(RavenTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"],
["-mempoolreplacement=0"]]
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1 * COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_fee_per_kb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("All Tests Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 RVN fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# Replacement still disabled even with "enough fee"
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
# Second node is running mempoolreplacement=0, will not replace originally-seen txn
mempool = self.nodes[1].getrawmempool()
assert tx1a_txid in mempool
assert tx1b_txid not in mempool
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_n_value = 5000 * COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
prevout = tx0_outpoint
remaining_value = initial_n_value
chain_txids = []
while remaining_value > 1000 * COIN:
remaining_value -= 100 * COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, n_sequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = tx_to_hex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 RVN - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - 30 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert (doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_n_value = 50 * COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
def branch(prevout, initial_value, max_txs, tree_width=5, fee_val=0.0001 * COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee_val) // tree_width
if txout_value < fee_val:
return
vout = [CTxOut(txout_value, CScript([i + 1]))
for i in range(tree_width)]
tx_data = CTransaction()
tx_data.vin = [CTxIn(prevout, n_sequence=0)]
tx_data.vout = vout
tx_hex = tx_to_hex(tx_data)
assert (len(tx_data.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx_data
_total_txs[0] += 1
txid = int(txid, 16)
for i, _ in enumerate(tx_data.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee_val=fee_val,
_total_txs=_total_txs):
yield x
fee = int(0.0001 * COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - fee * n, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 RVN fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - fee * n - 1 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.0001 * COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - 2 * fee * n, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_fee_per_kb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2 * COIN))
utxo2 = make_utxo(self.nodes[0], 3 * COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, n_sequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0))
tx2.vout = tx1a.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1b_hex = tx_to_hex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1 * COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1 * COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1_hex = tx_to_hex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_n_value = 10 * COIN
utxo = make_utxo(self.nodes[0], initial_n_value)
fee = int(0.0001 * COIN)
split_value = int((initial_n_value - fee) / (MAX_REPLACEMENT_LIMIT + 1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, n_sequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = tx_to_hex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT + 1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), n_sequence=0)]
tx_i.vout = [CTxOut(split_value - fee, CScript([b'a']))]
tx_i_hex = tx_to_hex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
inputs.append(CTxIn(COutPoint(txid, i), n_sequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = tx_to_hex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = tx_to_hex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx2a_hex = tx_to_hex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx2b_hex = tx_to_hex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))]
tx3a_hex = tx_to_hex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), CScript([b'e']))]
tx3b_hex = tx_to_hex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), CScript([b'f']))]
tx3c_hex = tx_to_hex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))]
tx1b_hex = tx_to_hex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert (tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx2a_hex = tx_to_hex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = tx_to_hex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert (tx2b_txid in self.nodes[0].getrawmempool())
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress(): Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
f_raw_tx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
f_raw_tx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(f_raw_tx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(f_raw_tx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
py | 1a421dc31a45f4563be2dc1228be0a4ace6f9229 | """Setup script for shreddit.
"""
from setuptools import setup
from codecs import open
from os import path
VERSION = "6.1.0"
DESCRIPTION = " Remove your comment history on Reddit as deleting an account does not do so."
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding='utf-8') as filein:
long_description = filein.read()
setup(
name="shreddit",
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
url="https://github.com/niktheblak/Shreddit",
author="David John",
author_email="[email protected]",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python"],
license="FreeBSD License",
packages=["shreddit"],
install_requires=[
"arrow",
"praw>=4",
"PyYAML",
"requests",
"six",
"loremipsum"],
package_data={
"shreddit": ["*.example"]},
entry_points={
"console_scripts": ["shreddit=shreddit.app:main"]})
|
py | 1a421e3d81f2a0bc9d150e26a043d47a72a643fb | import psycopg2, sqlite3, sys
from dotenv import load_dotenv
import os
from psycopg2.extras import execute_values
#Change these values as needed
load_dotenv() #> loads contents of the .env file into the script's environment
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST = os.getenv("DB_HOST")
pgschema="""CREATE TABLE armory_item (
item_id SERIAL PRIMARY KEY,
name VARCHAR(30),
value INTEGER,
weight INTEGER);"""
consq=sqlite3.connect('rpg_db.sqlite3')
cursq=consq.cursor()
conpg = psycopg2.connect(database=DB_NAME, user=DB_USER, password=DB_PASSWORD,
host=DB_HOST)
curpg = conpg.cursor()
query = """
SELECT * FROM armory_item
"""
armory_data = consq.execute(query).fetchall()
#def convert(list):
# return tuple(i for i in list)
#armory_data = convert(armory_data)
insertion_query = f"""
INSERT INTO armory_item (item_id, name, value, weight) VALUES %s
"""
execute_values(curpg, insertion_query, armory_data)
conpg.commit() |
py | 1a421f099b6359247cba3d3f1d685609c6043dc1 | import json
import math
import os.path
from src.cleaning.clean_drinks_4 import main as clean_drinks
from pathlib import Path
# clean_json_5.py
def main():
print("Cleaning json from cleaned Drink Data")
if not os.path.isfile(Path("../Savefiles/drinks_C2.txt")):
print("Cleaned Drinks Savefiles not found. Creating one")
clean_drinks()
Drinks = []
before = ("gin", "rum", "vodka", "tequila", "tonic", "coke", "orange juice", "grenadine", "mate", "cola")
after = ("gin", "rum", "vodka", "tequila", "tonic", "coke", "oj", "gren", "mate", "coke")
with open(Path("../Savefiles/drinks_C2.txt"), "r") as f:
items = json.loads(f.read())["items"]
for item in items:
Drink = {}
print(item["drink_name"])
Drink['name'] = item["drink_name"].strip()
Drink['color'] = "black"
full_ammount = 0
for ing in item["ingredients"]:
full_ammount = full_ammount + int(math.ceil(float(ing["ing_ammount"])))
multiplicator = float(200) / float(full_ammount)
Drink["recipe"] = []
for ing in item["ingredients"]:
Drink["recipe"].append({"name": after[before.index(ing["ing_name"])], "amt": int(float(ing["ing_ammount"]) * multiplicator)})
Drinks.append(Drink)
with open(Path("../Savefiles/Drinks.drk"), "w+") as f:
f.write(json.dumps({"Drinks": Drinks}, indent=4, sort_keys=True))
print("Cleaned JSON from cleaned Drink Data")
if __name__ == "__main__":
main()
|
py | 1a421f336125d9bdbb1d0b3f7032dc56152ddc2b | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_instance_request(
resource_group_name: str,
managed_instance_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"distributedAvailabilityGroupName": _SERIALIZER.url("distributed_availability_group_name", distributed_availability_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"distributedAvailabilityGroupName": _SERIALIZER.url("distributed_availability_group_name", distributed_availability_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-05-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"distributedAvailabilityGroupName": _SERIALIZER.url("distributed_availability_group_name", distributed_availability_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request_initial(
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"distributedAvailabilityGroupName": _SERIALIZER.url("distributed_availability_group_name", distributed_availability_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class DistributedAvailabilityGroupsOperations(object):
"""DistributedAvailabilityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_instance(
self,
resource_group_name: str,
managed_instance_name: str,
**kwargs: Any
) -> Iterable["_models.DistributedAvailabilityGroupsListResult"]:
"""Gets a list of a distributed availability groups in instance.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DistributedAvailabilityGroupsListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.DistributedAvailabilityGroupsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DistributedAvailabilityGroupsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_instance_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_instance.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_instance_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DistributedAvailabilityGroupsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_instance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
**kwargs: Any
) -> "_models.DistributedAvailabilityGroup":
"""Gets a distributed availability group info.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param distributed_availability_group_name: The distributed availability group name.
:type distributed_availability_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DistributedAvailabilityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.DistributedAvailabilityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DistributedAvailabilityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DistributedAvailabilityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
parameters: "_models.DistributedAvailabilityGroup",
**kwargs: Any
) -> Optional["_models.DistributedAvailabilityGroup"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DistributedAvailabilityGroup"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DistributedAvailabilityGroup')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DistributedAvailabilityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DistributedAvailabilityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
parameters: "_models.DistributedAvailabilityGroup",
**kwargs: Any
) -> LROPoller["_models.DistributedAvailabilityGroup"]:
"""Creates a distributed availability group between Sql On-Prem and Sql Managed Instance.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param distributed_availability_group_name: The distributed availability group name.
:type distributed_availability_group_name: str
:param parameters: The distributed availability group info.
:type parameters: ~azure.mgmt.sql.models.DistributedAvailabilityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DistributedAvailabilityGroup or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.sql.models.DistributedAvailabilityGroup]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DistributedAvailabilityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DistributedAvailabilityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Drops a distributed availability group between Sql On-Prem and Sql Managed Instance.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param distributed_availability_group_name: The distributed availability group name.
:type distributed_availability_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
parameters: "_models.DistributedAvailabilityGroup",
**kwargs: Any
) -> Optional["_models.DistributedAvailabilityGroup"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DistributedAvailabilityGroup"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DistributedAvailabilityGroup')
request = build_update_request_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DistributedAvailabilityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
managed_instance_name: str,
distributed_availability_group_name: str,
parameters: "_models.DistributedAvailabilityGroup",
**kwargs: Any
) -> LROPoller["_models.DistributedAvailabilityGroup"]:
"""Updates a distributed availability group replication mode.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param distributed_availability_group_name: The distributed availability group name.
:type distributed_availability_group_name: str
:param parameters: The distributed availability group info.
:type parameters: ~azure.mgmt.sql.models.DistributedAvailabilityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DistributedAvailabilityGroup or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.sql.models.DistributedAvailabilityGroup]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DistributedAvailabilityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
distributed_availability_group_name=distributed_availability_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DistributedAvailabilityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/distributedAvailabilityGroups/{distributedAvailabilityGroupName}'} # type: ignore
|
py | 1a421f9ec3e4f962e51d6cbcd9e9b1fd1bb644c6 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeCapacityReservationSummary(object):
"""
Summary information for a compute capacity reservation.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputeCapacityReservationSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this ComputeCapacityReservationSummary.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this ComputeCapacityReservationSummary.
:type compartment_id: str
:param display_name:
The value to assign to the display_name property of this ComputeCapacityReservationSummary.
:type display_name: str
:param defined_tags:
The value to assign to the defined_tags property of this ComputeCapacityReservationSummary.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this ComputeCapacityReservationSummary.
:type freeform_tags: dict(str, str)
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ComputeCapacityReservationSummary.
:type lifecycle_state: str
:param availability_domain:
The value to assign to the availability_domain property of this ComputeCapacityReservationSummary.
:type availability_domain: str
:param reserved_instance_count:
The value to assign to the reserved_instance_count property of this ComputeCapacityReservationSummary.
:type reserved_instance_count: int
:param used_instance_count:
The value to assign to the used_instance_count property of this ComputeCapacityReservationSummary.
:type used_instance_count: int
:param is_default_reservation:
The value to assign to the is_default_reservation property of this ComputeCapacityReservationSummary.
:type is_default_reservation: bool
:param time_created:
The value to assign to the time_created property of this ComputeCapacityReservationSummary.
:type time_created: datetime
"""
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'display_name': 'str',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)',
'lifecycle_state': 'str',
'availability_domain': 'str',
'reserved_instance_count': 'int',
'used_instance_count': 'int',
'is_default_reservation': 'bool',
'time_created': 'datetime'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'display_name': 'displayName',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags',
'lifecycle_state': 'lifecycleState',
'availability_domain': 'availabilityDomain',
'reserved_instance_count': 'reservedInstanceCount',
'used_instance_count': 'usedInstanceCount',
'is_default_reservation': 'isDefaultReservation',
'time_created': 'timeCreated'
}
self._id = None
self._compartment_id = None
self._display_name = None
self._defined_tags = None
self._freeform_tags = None
self._lifecycle_state = None
self._availability_domain = None
self._reserved_instance_count = None
self._used_instance_count = None
self._is_default_reservation = None
self._time_created = None
@property
def id(self):
"""
**[Required]** Gets the id of this ComputeCapacityReservationSummary.
The OCID of the instance reservation configuration.
:return: The id of this ComputeCapacityReservationSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ComputeCapacityReservationSummary.
The OCID of the instance reservation configuration.
:param id: The id of this ComputeCapacityReservationSummary.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
Gets the compartment_id of this ComputeCapacityReservationSummary.
The OCID of the compartment.
:return: The compartment_id of this ComputeCapacityReservationSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ComputeCapacityReservationSummary.
The OCID of the compartment.
:param compartment_id: The compartment_id of this ComputeCapacityReservationSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def display_name(self):
"""
Gets the display_name of this ComputeCapacityReservationSummary.
A user-friendly name for the capacity reservation. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
Example: `My Reservation`
:return: The display_name of this ComputeCapacityReservationSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this ComputeCapacityReservationSummary.
A user-friendly name for the capacity reservation. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
Example: `My Reservation`
:param display_name: The display_name of this ComputeCapacityReservationSummary.
:type: str
"""
self._display_name = display_name
@property
def defined_tags(self):
"""
Gets the defined_tags of this ComputeCapacityReservationSummary.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this ComputeCapacityReservationSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this ComputeCapacityReservationSummary.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this ComputeCapacityReservationSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this ComputeCapacityReservationSummary.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this ComputeCapacityReservationSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this ComputeCapacityReservationSummary.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this ComputeCapacityReservationSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this ComputeCapacityReservationSummary.
The current state of the capacity reservation.
:return: The lifecycle_state of this ComputeCapacityReservationSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ComputeCapacityReservationSummary.
The current state of the capacity reservation.
:param lifecycle_state: The lifecycle_state of this ComputeCapacityReservationSummary.
:type: str
"""
self._lifecycle_state = lifecycle_state
@property
def availability_domain(self):
"""
**[Required]** Gets the availability_domain of this ComputeCapacityReservationSummary.
The availability domain of the capacity reservation.
:return: The availability_domain of this ComputeCapacityReservationSummary.
:rtype: str
"""
return self._availability_domain
@availability_domain.setter
def availability_domain(self, availability_domain):
"""
Sets the availability_domain of this ComputeCapacityReservationSummary.
The availability domain of the capacity reservation.
:param availability_domain: The availability_domain of this ComputeCapacityReservationSummary.
:type: str
"""
self._availability_domain = availability_domain
@property
def reserved_instance_count(self):
"""
Gets the reserved_instance_count of this ComputeCapacityReservationSummary.
The number of instances for which capacity will be held in this
compute capacity reservation. This number is the sum of the values of the `reservedCount` fields
for all of the instance reservation configurations under this reservation.
The purpose of this field is to calculate the percentage usage of the reservation.
:return: The reserved_instance_count of this ComputeCapacityReservationSummary.
:rtype: int
"""
return self._reserved_instance_count
@reserved_instance_count.setter
def reserved_instance_count(self, reserved_instance_count):
"""
Sets the reserved_instance_count of this ComputeCapacityReservationSummary.
The number of instances for which capacity will be held in this
compute capacity reservation. This number is the sum of the values of the `reservedCount` fields
for all of the instance reservation configurations under this reservation.
The purpose of this field is to calculate the percentage usage of the reservation.
:param reserved_instance_count: The reserved_instance_count of this ComputeCapacityReservationSummary.
:type: int
"""
self._reserved_instance_count = reserved_instance_count
@property
def used_instance_count(self):
"""
Gets the used_instance_count of this ComputeCapacityReservationSummary.
The total number of instances currently consuming space in
this compute capacity reservation. This number is the sum of the values of the `usedCount` fields
for all of the instance reservation configurations under this reservation.
The purpose of this field is to calculate the percentage usage of the reservation.
:return: The used_instance_count of this ComputeCapacityReservationSummary.
:rtype: int
"""
return self._used_instance_count
@used_instance_count.setter
def used_instance_count(self, used_instance_count):
"""
Sets the used_instance_count of this ComputeCapacityReservationSummary.
The total number of instances currently consuming space in
this compute capacity reservation. This number is the sum of the values of the `usedCount` fields
for all of the instance reservation configurations under this reservation.
The purpose of this field is to calculate the percentage usage of the reservation.
:param used_instance_count: The used_instance_count of this ComputeCapacityReservationSummary.
:type: int
"""
self._used_instance_count = used_instance_count
@property
def is_default_reservation(self):
"""
Gets the is_default_reservation of this ComputeCapacityReservationSummary.
Whether this capacity reservation is the default.
For more information, see `Capacity Reservations`__.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/reserve-capacity.htm#default
:return: The is_default_reservation of this ComputeCapacityReservationSummary.
:rtype: bool
"""
return self._is_default_reservation
@is_default_reservation.setter
def is_default_reservation(self, is_default_reservation):
"""
Sets the is_default_reservation of this ComputeCapacityReservationSummary.
Whether this capacity reservation is the default.
For more information, see `Capacity Reservations`__.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/reserve-capacity.htm#default
:param is_default_reservation: The is_default_reservation of this ComputeCapacityReservationSummary.
:type: bool
"""
self._is_default_reservation = is_default_reservation
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this ComputeCapacityReservationSummary.
The date and time the capacity reservation was created, in the format defined by `RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this ComputeCapacityReservationSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this ComputeCapacityReservationSummary.
The date and time the capacity reservation was created, in the format defined by `RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this ComputeCapacityReservationSummary.
:type: datetime
"""
self._time_created = time_created
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 1a42206bd171383e16f17d1d397cdd9b0b375e79 | import io
from setuptools import setup
NAME = 'plex-lastfm-scrobbler'
VERSION = '4.1.1'
description = 'Scrobble audio tracks played via Plex Media Center'
try:
with io.open('README.rst', encoding="utf-8") as fh:
long_description = fh.read()
except IOError:
long_description = description
setup(
name='plex-scrobble',
version=VERSION,
author='Jesse Ward',
author_email='[email protected]',
description=description,
long_description=long_description,
license='MIT',
url='https://github.com/jesseward/plex-lastfm-scrobbler',
packages=['plex_scrobble'],
entry_points={
'console_scripts': [
'plex-scrobble = plex_scrobble.__main__:main'
]
},
install_requires=[
'click>=6.2',
'pylast>=1.6.0',
'toml>=0.9.1',
'requests>=2.12.0',
],
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
py | 1a4221280a384e381b05ebd726bc34004a2ef6d2 | '''
MIT License
Copyright (c) 2021 Chen Guojun
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
bl_info = {
"name": "Grab Frame",
"author": "GuoJun Chen ([email protected])",
"version": (1, 0, 0),
"blender": (2, 80, 0),
"location": "3D View >Tool> Grab Frame",
"description": '''Use keyboard shortcuts to change the current frame by a specified number of frames.ctrl + left arrow : back frame,ctrl + right arrow : forward frame''',
"category": "Animation"}
from . import grab_frame
def register():
grab_frame.register()
def unregister():
grab_frame.unregister()
if __name__ == "__main__":
register()
|
py | 1a4221cec2b1c6e554fde828393e4686693879fd | import numpy as np
import matplotlib.pyplot as plt
# set width of bar
barWidth = 0.25
# fig = plt.subplots(figsize =(12, 8))
# set height of bar
PDR=[0.633136094675,0.7,0.846153846154,0.990990990991,0.021822849807445]
Filter=[0.723032069970845,0.71,0.88,0.976909413854352,0.217672413793103]
# Set position of bar on X axis
br1 = np.arange(2,len(PDR)+2)
print(br1)
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
colors = iter([plt.cm.tab20(i) for i in range(20)])
# next(colors)
# next(colors)
# next(colors)
# plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
# Make the plot
plt.bar(br1, Filter, width = barWidth,
edgecolor ='black', label ='Control', color=[next(colors)])
next(colors)
plt.bar(br2, PDR, width = barWidth,
edgecolor ='black', label ='Data',color=[next(colors)],hatch = '/')
# plt.bar(br3, RPL, width = barWidth,
# edgecolor ='black', label ='RPL PDR [#]',color=[next(colors)],hatch = '/')
# Adding Xticks
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Number of neighbors', fontweight ='bold', fontsize=15)
plt.ylabel('Reception Ratio [%]', fontweight ='bold', fontsize=15)
# plt.yticks(fontsize=15)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
ncol=3, fancybox=True, shadow=True, fontsize=15)
plt.show()
RMSE=[1.43394533652394,1.44394533652394,1.45860840464733,1.47824637203261,2.45796016765343]
# fig = plt.subplots(figsize =(14, 8))
# plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
plt.bar(br1, RMSE, width = barWidth,
edgecolor ='black', label ='RMSE [m]', color=[next(colors)])
plt.xlabel('Number of neighbors', fontweight ='bold', fontsize=15)
plt.ylabel('Localization Error [m]', fontweight ='bold', fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
# fig = plt.subplots(figsize =(14, 8))
# plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
delay=[191.863,200.1,209.961,218.996, 861.0759]
plt.bar(br1, delay, width = barWidth,
edgecolor ='black', label ='E2E delay [ms]', color=[next(colors)])
plt.xlabel('Number of neighbors', fontweight ='bold', fontsize=15)
plt.ylabel('E2E delay [ms]', fontweight ='bold', fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
|
py | 1a4221daf3c42c8535877258082ffe7bbac722f4 | from django.apps import AppConfig
class NoticeConfig(AppConfig):
name = 'notice'
verbose_name = '通知管理'
|
py | 1a42222788bda191298cd84ef7e33440bd273d2e | #!/usr/bin/env python3
import os
import random # Discuss: random module
import sys
# Constants
# Discuss: set data structure
NSFW = {'bong', 'sodomized', 'kiss', 'head-in', 'telebears'}
# Main Execution
def main():
characters = [] # Discuss: os.popen
for index, line in enumerate(os.popen('cowsay -l')):
if not index: # Discuss: enumerate
continue
for character in line.split(): # Review: str.split
if character not in NSFW: # Review: searching collection
characters.append(character) # Review: list.append
selected = random.choice(characters)
os.system(f'cowsay -f {selected}') # Variant: check exist status
if __name__ == '__main__':
main()
|
py | 1a4222956adbbcb678d8e64e28683272b508be4c | def test_eth_sendTransaction(rpc_client, accounts):
for _ in range(3):
rpc_client(
method="eth_sendTransaction",
params=[{
"from": accounts[0],
"to": accounts[1],
"value": 1,
}],
)
for _ in range(5):
rpc_client(
method="eth_sendTransaction",
params=[{
"from": accounts[1],
"to": accounts[0],
"value": 1,
}],
)
account_0_txn_count = rpc_client(
method="eth_getTransactionCount",
params=[accounts[0]],
)
assert account_0_txn_count == "0x3"
account_1_txn_count = rpc_client(
method="eth_getTransactionCount",
params=[accounts[1]],
)
assert account_1_txn_count == "0x5"
account_2_txn_count = rpc_client(
method="eth_getTransactionCount",
params=[accounts[2]],
)
assert account_2_txn_count == "0x0"
|
py | 1a422432391ebe953a4283cb30ec55eaf7253829 | from django.shortcuts import render
from .models import *
from .serializers import *
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics
from rest_framework import authentication, permissions
# Просмотр всех пользователей
class UserListAPIView(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
# Создание нового пользователя
class UserCreateAPIView(generics.CreateAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
# Редактирование конкретного пользователя
class UserUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
lookup_field = 'pk'
# Посмотр всех газет
class NewspaperAPIView(generics.ListAPIView):
queryset = Newspaper.objects.all()
serializer_class = NewspaperSerializer
# Редактирование конкретной газеты
class NewspaperUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = Newspaper.objects.all()
serializer_class = NewspaperSerializer
lookup_field = 'pk'
# Создание новой газеты
class NewspaperCreateAPIView(generics.CreateAPIView):
queryset = Newspaper.objects.all()
serializer_class = NewspaperSerializer
# Посмотр всех почтовых отделений
class PostOfficeAPIView(generics.ListAPIView):
queryset = PostOffice.objects.all()
serializer_class = PostOfficeSerializer
# Редактирование конкретного почтового отделения
class PostOfficeUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = PostOffice.objects.all()
serializer_class = PostOfficeSerializer
lookup_field = 'pk'
# Создание нового отделения
class PostOfficeCreateAPIView(generics.CreateAPIView):
queryset = PostOffice.objects.all()
serializer_class = PostOfficeSerializer
# Посмотр всех типографий
class PrinteryAPIView(generics.ListAPIView):
queryset = Printery.objects.all()
serializer_class = PrinterySerializer
# Редактирование конкретной типографии
class PrinteryUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = Printery.objects.all()
serializer_class = PrinterySerializer
lookup_field = 'pk'
# Создание новой типографии
class PrinteryCreateAPIView(generics.CreateAPIView):
queryset = Printery.objects.all()
serializer_class = PrinterySerializer
# Посмотр всех печатей
class PrintAPIView(generics.ListAPIView):
queryset = Print.objects.all()
serializer_class = PrintSerializer
# Редактирование конкретной печати
class PrintUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = Print.objects.all()
serializer_class = PrintSerializer
lookup_field = 'pk'
# Создание новой печати
class PrintCreateAPIView(generics.CreateAPIView):
queryset = Print.objects.all()
serializer_class = PrintSerializer
# Посмотр всех партий газет
class NewspapersPartyAPIView(generics.ListAPIView):
queryset = NewspapersParty.objects.all()
serializer_class = NewspapersPartySerializer
# Редактирование конкретной партии газет
class NewspapersPartyUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = NewspapersParty.objects.all()
serializer_class = NewspapersPartySerializer
lookup_field = 'pk'
# Создание новой партий газет
class NewspapersPartyCreateAPIView(generics.CreateAPIView):
queryset = NewspapersParty.objects.all()
serializer_class = NewspapersPartySerializer
# Посмотр всех отчётов
class DistributionReportAPIView(generics.ListAPIView):
queryset = DistributionReport.objects.all()
serializer_class = DistributionReportSerializer
# Редактирование конкретного отчёта
class DistributionReportUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = DistributionReport.objects.all()
serializer_class = DistributionReportSerializer
lookup_field = 'pk'
# Создание нового отчёта
class DistributionReportCreateAPIView(generics.CreateAPIView):
queryset = DistributionReport.objects.all()
serializer_class = DistributionReportSerializer
|
py | 1a422445503559f8f99277bfdb4c1af71d054fc8 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import is_compatible_type
from coremltools.converters.mil.mil.types.symbolic import is_symbolic, any_symbolic
from . import SPACES
from .block import curr_block
from .input_type import TupleInputType, DefaultInputs
from .var import Var, InternalVar, ListVar
VALUE = 1
SYMBOL = 2
NONE = 4
ALL = 7
def _is_compatible_symbolic_array(a, b):
"""
A helper function which check if two numpy array with symbolic value.
For instance, a = np.array([is0, is2])
b = np.array([is1, 1])
are considered compatible.
a = np.array([is0, 1])
b = np.array([is1, -1])
are not.
"""
if not a.shape == b.shape:
return False
a = a.flatten()
b = b.flatten()
for t, v in zip(a, b):
if not is_symbolic(t) and not is_symbolic(v):
if t != v:
return False
return True
def precondition(allow=ALL):
"""
A helper decorator for value_inference method.
Decorate value_inference with parameter VALUE/SYMBOL/NONE or ALL.
For VALUE/SYMBOL/NONE use logical or ( | ) for multiple allowance.
Note that:
1. ALL == VALUE | SYMBOL | NONE
2. Chosen flag (some or all VALUE/SYMBOL/NONE) must be satisfied
by EVERY INPUTS for the precondition to be satisfied.
The meaning for each flag is:
VALUE: value that can be materialized during compile time
SYMBOL: value that cannot be materialized by exist as a symbol value
NONE: a None value
Usage:
@precondition(allow=VALUE|SYMBOL)
def value_inference(self):
'''some value_inference implementation'''
"""
ALLOW_VALUE = allow & VALUE
ALLOW_SYMBOL = allow & SYMBOL
ALLOW_NONE = allow & NONE
def process(v, has_value, has_symbol, has_none):
"""
v: Var
Return updated has_value, has_symbol, has_none
"""
if any_symbolic(v.sym_val):
return has_value, True, has_none
elif v.val is None:
return has_value, has_symbol, True
return True, has_symbol, has_none
def decorator(func):
def wrapper(self):
HAS_VALUE = False
HAS_SYMBOL = False
HAS_NONE = False
for in_name, in_type in self._input_types.items():
if in_type.optional:
# Optional inputs are not required to invoke value_inference()
continue
if isinstance(in_type, TupleInputType):
for v in self._input_vars[in_name]:
HAS_VALUE, HAS_SYMBOL, HAS_NONE = process(
v, HAS_VALUE, HAS_SYMBOL, HAS_NONE
)
else:
HAS_VALUE, HAS_SYMBOL, HAS_NONE = process(
self._input_vars[in_name], HAS_VALUE, HAS_SYMBOL, HAS_NONE
)
if HAS_VALUE and not ALLOW_VALUE:
msg = "Implementation of value_inference() for op {} doesn't support input with VALUE"
raise NotImplementedError(msg.format(self.op_type))
elif HAS_SYMBOL and not ALLOW_SYMBOL:
msg = "Implementation of value_inference() for op {} doesn't support input with SYMBOL"
raise NotImplementedError(msg.format(self.op_type))
elif HAS_NONE and not ALLOW_NONE:
msg = "Implementation of value_inference() for op {} doesn't support input with NONE"
raise NotImplementedError(msg.format(self.op_type))
else:
return func(self)
return wrapper
return decorator
def is_internal_input(arg_name):
return arg_name[0] == "_"
class mil_list(object):
'''
A wrapper around python list
'''
def __init__(self, ls=None):
self.ls = ls if ls is not None else []
if not isinstance(self.ls, list):
raise TypeError("Type of 'ls' must be list in the 'mil_list' class")
class Operation(object):
"""
Represents Operation in MIL.
# Properties
name (str):
The name of the operation
input_types (InputSpec, class attr):
Read-only named input types from all subclasses. Input types are used
to validate `inputs`.
inputs [_input_vars] (dict of str --> Var):
An Operation (subclass of Operation) only has access to input Var,
which is already validated against `input_spec`.
outputs [_output_vars] (list of Var):
List of output var based on type inference. Read-only
"""
def __init__(self, **kwargs):
self._input_types = self.input_spec.input_types
self.name = kwargs.get("name", None)
self._output_vars = None
self._input_vars = {}
self.blocks = []
self.enclosing_block = curr_block()
# Initialize inputs as object attributes (all None)
for k in self._input_types.keys():
setattr(self, k, None)
self._input_vars[k] = None
self._check_expected_inputs(kwargs)
# Set inputs from kwargs
input_kv = {k: v for k, v in kwargs.items() \
if k in self._input_types and v is not None}
self._validate_and_set_inputs(input_kv)
self._ensure_required_inputs()
def _check_expected_inputs(self, kwargs):
"""
Check that all kwargs inputs are one of the followings:
- system inputs (non-attributes)
- op inputs (self._input_types.keys())
"""
non_attributes = [
"name",
"symbolic_datatype",
"datatype",
"symbolic_value",
"value",
"version",
"before_op",
"no_check_var_visibility", # no_check_var_visibility==True to deviate from SSA
"no_check_var_types", # no_check_var_types==True to force set inputs, even if type does not match with earlier ones
]
for k in kwargs.keys():
if k not in non_attributes and k not in self._input_types:
raise ValueError(
"Unknown input '{}' for op '{}'".format(
k, self.op_type)
)
def set_inputs(self,
no_check_var_types=False,
type_inference=False,
**input_kvs):
"""
Parameters
----------
- input_kvs: Dict[str, Var]
Value cannot be None
- type_inference: bool
True to perform type inference and recreate output Var.
"""
self._validate_and_set_inputs(input_kvs,
no_check_var_types=no_check_var_types)
if type_inference and not no_check_var_types:
self.type_value_inference()
self._ensure_required_inputs()
def get_flattened_inputs(self):
"""
Returns:
list[Var]. Flatten all tuple inputs
"""
flat_inputs = []
for v in self.inputs.values():
if isinstance(v, (list, tuple)):
flat_inputs.extend(v)
else:
flat_inputs.append(v)
return flat_inputs
def type_value_inference(self, overwrite_output=False):
"""
Perform type inference and auto_val computation based on new input Vars
in kwargs. If self._output_vars is None then we generate _output_vars;
otherwise no new Var is created, but type inference result is verified
against existing _output_vars, if overwrite_output is False.
If overwrite_output is True, then the type inference result overwrites the
existing _output_vars
"""
output_types = self.type_inference()
if not isinstance(output_types, tuple):
output_types = (output_types,)
output_vals = self._auto_val(output_types)
try:
output_names = self.output_names()
if not isinstance(output_names, tuple):
output_names = (output_names,)
except NotImplementedError as e:
if len(output_types) > 1:
output_names = tuple(str(i) for i, _ in enumerate(output_types))
else:
output_names = ("",) # output name same as op name.
# Combine (output_names, output_types, output_vals) to create output
# Vars.
if self._output_vars is None:
self._output_vars = []
for i, (n, sym_type, sym_val) in enumerate(
zip(output_names, output_types, output_vals)
):
name = self.name + ":" + n if n != "" else self.name
if types.is_list(sym_type):
new_var = ListVar(
name,
elem_type=sym_type.T[0],
init_length=sym_type.T[1],
dynamic_length=sym_type.T[2],
sym_val=sym_val if (sym_val is not None and isinstance(sym_val.val, list)) else None,
op=self,
op_output_idx=i,
)
else:
new_var = Var(name, sym_type, sym_val, op=self, op_output_idx=i)
self._output_vars.append(new_var)
else:
# Check new inference result against existing self._output_vars.
for i, (n, sym_type, sym_val) in enumerate(
zip(output_names, output_types, output_vals)
):
out_var = self._output_vars[i]
# Check type inference
if overwrite_output:
out_var._sym_type = sym_type
elif not is_compatible_type(sym_type, out_var.sym_type):
msg = "Output Var {} in op {} type changes with new input Vars"
raise ValueError(msg.format(out_var.name, self.name))
# Check value inference
if overwrite_output:
out_var._sym_val = sym_val
if sym_val is not None and out_var.sym_val is not None:
if np.any(sym_val.val != out_var.sym_val):
if overwrite_output:
out_var._sym_val = sym_val
else:
msg = 'value_inference differs for var {} in op {}'
if not _is_compatible_symbolic_array(sym_val.val, out_var.sym_val):
raise ValueError(msg.format(out_var.name, self.name))
def _auto_val(self, output_types):
"""
# Evaluation is two stage:
#
# Stage 1: Check whether the method value_inference() is implemented
#
# Stage 2: Check if there's an value_inference() implementation
# for given input types.
#
# Suppose input are all SYMBOL:
# Case 1: No value_inference() implemented => fail at stage 1
# Case 2: If value_inference() implemented, but requires all VALUE not
# SYMBOL => fail at stage 2
# Case 3: If value_inference() implemented, and has no restriction on
# input types => Success
#
# If either stage fails, outputs[i].val is None.
# Otherwise, output[i].sym_val is not None.
output_types: tuple of builtin types
Returns:
output_vals: tuple of builtin type with value, or tuple of None
"""
do_auto_val = True
if do_auto_val:
# Is self.value_inference implemented for corresponding input?
try:
vals = self.value_inference()
except NotImplementedError as e:
do_auto_val = False
if not do_auto_val:
# No auto_val possible.
return tuple(None for _ in output_types)
if not isinstance(vals, (tuple, list)):
vals = (vals,)
for val in vals:
if val is None:
do_auto_val = False
if not do_auto_val:
# No auto_val possible.
return tuple(None for _ in output_types)
auto_val = []
for t, v in zip(output_types, vals):
builtin_val = t()
if isinstance(v, mil_list):
builtin_val.val = v.ls
else:
builtin_val.val = v
auto_val.append(builtin_val)
return auto_val
def value_inference(self):
"""
Optional Python implementation of the op based on (materialized) values
in `self.input_var`. Return a builtin value (single output) or a tuple of
builtin values (multi-outputs) of the same length as returned by `
type_inference`
"""
msg = "value_inference() is not implemented by op {}"
raise NotImplementedError(msg.format(self.op_type))
def default_inputs(self):
"""
Optional. Returns default values for optional inputs. The
function is guaranteed to have access to all required inputs and
possibly some optional inputs should the user supply them.
They may be used to construct default values, such as
`strides=[1]*num_spatial_dims` in conv, where
`num_spatial_dims` may be inferred from the rank of
required inputs
"""
return DefaultInputs()
def output_names(self):
"""
Optional. If implemented, we set the output var i name as
self.name + "/" + output_names[i]
Returns a string (single output) or tuple of strings
"""
msg = "output_names() is not implemented by op {}"
raise NotImplementedError(msg.format(self.op_type))
def type_inference(self):
"""
Return (builtin_type, builtin_val) pair from type inference.
builtin_val may be None if symbolic_value is not attainable at compile
time.
"""
raise NotImplementedError("This function must be implemented by each op")
def build_nested_blocks(self):
"""
Build nested blocks (for cond and while_loop and other composite
blocks)
"""
pass
def _ensure_required_inputs(self):
"""
Raise value error if required inputs aren't present
"""
for name, input_type in self._input_types.items():
if not input_type.optional and \
self._input_vars[name] is None:
msg_prefix = 'Op \"{}\" (op_type: {}) '.format(self.name,
self.op_type)
raise ValueError(msg_prefix + \
"Required input {} is missing".format(name))
def _validate_and_set_inputs(self, input_kvs,
no_check_var_types=False):
"""
For each k, v in `input_kvs`, perform the followings:
- Check k exists in `self.input_specs`
- Check that v satisfies the correspodning `InputType`
- Set input, possibly replacing existing input.
Note that it does not ensure all required inputs are satisfied.
Use _ensure_required_inputs() for that.
Parameters
----------
- input_kvs: Dict[str, Var]
Each key in input_kvs must exist in `self.input_specs`. Its values
must be a Var.
- no_check_var_types: bool
True to check var types against input_specs only, but not
enforcing new input vars to be a subtype of existing input vars
"""
for key in input_kvs.keys():
if key not in self._input_types:
raise RuntimeError(
"Unknown input '{}' for op '{}'".format(key, self.op_type)
)
def check_and_detach(v_new, v_old, op, no_check_var_types):
# Check new var's sym_type is compatible with the
# existing's sym_type.
if (
not is_compatible_type(v_new.sym_type, v_old.sym_type)
and not no_check_var_types
):
msg = "New var type {} not a subtype of " + "existing var type {}"
raise ValueError(msg.format(v_new.sym_type, v_old.sym_type))
v_old.remove_child_op(op, no_check_var_types)
self.input_spec.validate_inputs(self.name, self.op_type, input_kvs)
for name, var in input_kvs.items():
# TODO: remove InternalVar check
#if not isinstance(var, InternalVar):
# Remove this operation itself from existing input
# Var's child_ops
existing_input_var = self._input_vars[name]
if existing_input_var is not None:
if isinstance(existing_input_var, (list, tuple)):
for v_old, v_new in zip(existing_input_var, var):
check_and_detach(v_new, v_old, self, no_check_var_types)
else:
check_and_detach(
var, existing_input_var, self, no_check_var_types
)
# Set var as input_var
if isinstance(var, Var):
var.add_child_op(self)
elif isinstance(var, (tuple, list)):
for v in var:
v.add_child_op(self)
# ignore function inputs
self._input_vars[name] = var
setattr(self, name, var)
@property
def inputs(self):
"""
Returns
-------
- inputs: Dict[str, Union[Var, Tuple[Var]]]
"""
# Filter out InternalVar
return {k: v for k, v in self._input_vars.items() if not
isinstance(v, InternalVar) and v is not None}
@property
def outputs(self):
return self._output_vars
@property
def op_type(self):
return type(self).__name__
def remove_from_block(self):
"""
Remove / detach itself from the enclosing block. See Block.remove_ops
for details.
"""
self.enclosing_block.remove_ops([self])
@staticmethod
def var_to_str(v):
if isinstance(v, (tuple, list)):
return "(" + ", ".join(["%" + s.name for s in v]) + ")"
else:
return "%" + v.name
def indented_str(self, indent=""):
s = indent
if self.outputs is not None:
s += ", ".join([str(o) for o in self.outputs])
s += " = " + self.op_type + "("
if self.op_type == "const":
if self.mode.val == "immediate_value":
if isinstance(self.val.sym_val, (np.generic, np.ndarray)):
val_str = str(self.val.sym_val.tolist())
else:
val_str = (
'"' + self.val.sym_val + '"'
if isinstance(self.val.sym_val, str)
else str(self.val.sym_val)
)
s += "val=" + val_str
else:
s += "val=(file_value)"
else:
s += ", ".join(
[
k + "=" + Operation.var_to_str(self.inputs[k])
for k in self._input_types.keys()
if k in self.inputs and not is_internal_input(k)
]
)
s += ', name="{}")\n'.format(self.name)
for b in self.blocks:
s += b.indented_str(indent=indent + SPACES)
return s
def __repr__(self):
return str(self)
def __str__(self):
return self.indented_str(SPACES)
|
py | 1a4225c6adaf386d00dc65d192ceaef145c4b3a5 | import sqlite3
from abc import abstractmethod
from ipaddress import IPv4Address
from ipaddress import IPv6Address as IPv6AddressPython
from typing import (Callable, FrozenSet, Generic, Optional, Set, Sized,
TypeVar, Union)
from .blockchain import Miner, Node, Version
from .db import Cursor, Database, ForeignKey, Model, Table
from .geolocation import Geolocation
from .serialization import DateTime, IntFlag, IPv6Address
N = TypeVar("N", bound=Node)
class HostInfo(Model):
ip: IPv6Address
isp: str
os: str
timestamp: DateTime
def __hash__(self):
return hash(self.ip)
class CrawlState(IntFlag):
UNKNOWN = 0
DISCOVERED = 1
GEOLOCATED = 2
ATTEMPTED_CONNECTION = DISCOVERED | 4
CONNECTION_FAILED = ATTEMPTED_CONNECTION | 8
CONNECTED = ATTEMPTED_CONNECTION | 16
CONNECTION_RESET = CONNECTED | 32
REQUESTED_NEIGHBORS = CONNECTED | 64
GOT_NEIGHBORS = REQUESTED_NEIGHBORS | 128
REQUESTED_VERSION = CONNECTED | 256
GOT_VERSION = REQUESTED_VERSION | 512
class CrawledNode(Model["CrawlDatabase"]):
ip: IPv6Address
port: int
is_miner: Miner
state: CrawlState
source: str
def __hash__(self):
return hash((self.ip, self.port))
def get_events(self) -> Cursor["CrawlEvent"]:
return self.db.events.select(
node=self.rowid, order_by="timestamp", order_direction="DESC"
)
def get_version(self) -> Optional[Version]:
for version_event in self.db.events.select(
node=self.rowid,
order_by="timestamp",
order_direction="DESC",
limit=1,
event="version",
):
return Version(version_event.description, version_event.timestamp)
return None
def get_location(self) -> Optional[Geolocation]:
return self.db.locations.select(
ip=self.ip, order_by="timestamp DESC", limit=1
).fetchone()
def last_crawled(self) -> Optional[DateTime]:
max_edge = Cursor(
self.db.edges,
"SELECT a.* FROM edges a LEFT OUTER JOIN edges b ON a.rowid = b.rowid AND a.timestamp < b.timestamp "
"WHERE b.rowid is NULL AND a.from_node = ? LIMIT 1",
(self.rowid,),
).fetchone()
if max_edge is None:
return None
return max_edge.timestamp
def get_latest_edges(self) -> Set["CrawledNode"]:
return {
edge.to_node.row
for edge in Cursor(
self.db.edges,
"SELECT a.* FROM edges a LEFT OUTER JOIN edges b ON a.rowid = b.rowid AND a.timestamp < b.timestamp "
"WHERE b.rowid is NULL AND a.from_node = ?",
(self.rowid,),
)
}
def out_degree(self) -> int:
cur = self.db.con.cursor()
try:
result = cur.execute(
"SELECT count(a.*) FROM edges a "
"LEFT OUTER JOIN edges b ON a.rowid = b.rowid AND a.timestamp < b.timestamp "
"WHERE b.rowid is NULL AND a.from_node = ?",
(self.rowid,),
)
return result.fetchone()[0]
finally:
cur.close()
class Edge(Model):
from_node: ForeignKey["nodes", CrawledNode] # noqa: F821
to_node: ForeignKey["nodes", CrawledNode] # noqa: F821
timestamp: DateTime
class CrawlEvent(Model):
node: ForeignKey["nodes", CrawledNode] # noqa: F821
timestamp: DateTime
event: str
description: str
class CrawlDatabase(Database):
nodes: Table[CrawledNode]
events: Table[CrawlEvent]
locations: Table[Geolocation]
edges: Table[Edge]
hosts: Table[HostInfo]
def __init__(self, path: str = ":memory:"):
super().__init__(path)
@property
def crawled_nodes(self) -> Cursor[CrawledNode]:
return Cursor(
self.nodes,
f"SELECT DISTINCT n.*, n.rowid FROM {self.nodes.name} n WHERE n.state >= ?",
(CrawlState.CONNECTED,),
)
class Crawl(Generic[N], Sized):
@abstractmethod
def __contains__(self, node: N) -> bool:
raise NotImplementedError()
@abstractmethod
def __getitem__(self, node: N) -> CrawledNode:
raise NotImplementedError()
@abstractmethod
def get_node(self, node: N) -> CrawledNode:
raise NotImplementedError()
@abstractmethod
def add_event(
self,
node: CrawledNode,
event: str,
description: str,
timestamp: Optional[DateTime] = None,
):
raise NotImplementedError()
@abstractmethod
def set_location(self, ip: IPv6Address, location: Geolocation):
raise NotImplementedError()
@abstractmethod
def get_neighbors(self, node: N) -> FrozenSet[N]:
raise NotImplementedError()
@abstractmethod
def set_neighbors(self, node: N, neighbors: FrozenSet[N]):
raise NotImplementedError()
@abstractmethod
def set_miner(self, node: N, miner: Miner):
raise NotImplementedError()
@abstractmethod
def set_host_info(self, host_info: HostInfo):
raise NotImplementedError()
@abstractmethod
def add_state(self, node: Union[N, CrawledNode], state: CrawlState):
raise NotImplementedError()
@abstractmethod
def update_node(self, node: CrawledNode):
raise NotImplementedError()
def commit(self):
pass
class DatabaseCrawl(Generic[N], Crawl[N]):
def __init__(
self,
constructor: Callable[[Union[str, IPv4Address, IPv6AddressPython], int], N],
db: CrawlDatabase,
):
super().__init__()
self.constructor: Callable[
[Union[str, IPv4Address, IPv6AddressPython], int], N
] = constructor
self.db: CrawlDatabase = db
def __contains__(self, node: N) -> bool:
return self.db.nodes.select(ip=node.ip, port=node.port).fetchone() is not None
def __getitem__(self, node: N) -> CrawledNode:
try:
return next(iter(self.db.nodes.select(ip=node.address, port=node.port)))
except StopIteration:
pass
raise KeyError(node)
def commit(self):
self.db.con.commit()
def get_node(self, node: N) -> CrawledNode:
try:
return self[node]
except KeyError:
# this is a new node
pass
ret = CrawledNode(ip=node.address, port=node.port, source=node.source)
self.db.nodes.append(ret)
return ret
def update_node(self, node: CrawledNode):
with self.db:
self.db.nodes.update(node)
def add_event(
self,
node: CrawledNode,
event: str,
description: str,
timestamp: Optional[DateTime] = None,
):
with self.db:
if timestamp is None:
timestamp = DateTime()
self.db.events.append(
CrawlEvent(
node=node.rowid,
event=event,
description=description,
timestamp=timestamp,
)
)
def get_neighbors(self, node: N) -> FrozenSet[N]:
return frozenset(
{
self.constructor(neighbor.ip, neighbor.port)
for neighbor in self.get_node(node).get_latest_edges()
}
)
def set_neighbors(self, node: N, neighbors: FrozenSet[N]):
with self.db:
crawled_node = self.get_node(node)
timestamp = DateTime()
self.db.edges.extend(
[
Edge(
from_node=crawled_node,
to_node=self.get_node(neighbor),
timestamp=timestamp,
)
for neighbor in neighbors
]
)
self.add_state(node, CrawlState.GOT_NEIGHBORS)
for neighbor in neighbors:
# Make sure we record that we discovered the neighbor
_ = self.get_node(neighbor)
# (simply getting the node for the neighbor will ensure that its state's "discovered" flag is set)
def set_location(self, ip: IPv6Address, location: Geolocation):
with self.db:
self.db.locations.append(location)
def set_miner(self, node: N, miner: Miner):
with self.db:
crawled_node = self.get_node(node)
crawled_node.is_miner = miner
self.db.nodes.update(crawled_node)
def set_host_info(self, host_info: HostInfo):
with self.db:
self.db.hosts.append(host_info)
def add_state(self, node: Union[N, CrawledNode], state: CrawlState):
with self.db:
if isinstance(node, CrawledNode):
crawled_node = node
else:
crawled_node = self.get_node(node)
if crawled_node.state & state != state:
crawled_node.state = crawled_node.state | state
self.db.nodes.update(crawled_node)
def __len__(self) -> int:
return len(self.db.nodes)
|
py | 1a422615d6606ba6772779d8e28d859a0ac1cde7 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import Utils
def Qfun(R, L, f, alpha=None):
if alpha is None:
omega = np.pi*2*f
tau = L/R
alpha = omega*tau
Q = (alpha**2+1j*alpha) / (1+alpha**2)
return alpha, Q
def Mijfun(x,y,z,incl,decl,x1,y1,z1,incl1,decl1, area=1.,area0=1.):
"""
Compute mutual inductance between two loops
This
Parameters
----------
x : array
x location of the Tx loop
y : array
y location of the Tx loop
z : array
z location of the Tx loop
incl:
XXX
decl:
XXX
x1 : array
XXX
y1 : array
XXX
z1 : array
XXX
incl1:
XXX
decl1:
XXX
"""
# Pretty sure below assumes dipole
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z = np.array(z, dtype=float)
x1 = np.array(x1, dtype=float)
y1 = np.array(y1, dtype=float)
z1 = np.array(z1, dtype=float)
incl = np.array(incl, dtype=float)
decl = np.array(decl, dtype=float)
incl1 = np.array(incl1, dtype=float)
decl1 = np.array(decl1, dtype=float)
di=np.pi*incl/180.0
dd=np.pi*decl/180.0
cx=np.cos(di)*np.cos(dd)
cy=np.cos(di)*np.sin(dd)
cz=np.sin(di)
ai=np.pi*incl1/180.0
ad=np.pi*decl1/180.0
ax=np.cos(ai)*np.cos(ad)
ay=np.cos(ai)*np.sin(ad)
az=np.sin(ai)
# begin the calculation
a=x-x1
b=y-y1
h=z-z1
rt=np.sqrt(a**2.+b**2.+h**2.)**5.
txy=3.*a*b/rt
txz=3.*a*h/rt
tyz=3.*b*h/rt
txx=(2.*a**2.-b**2.-h**2.)/rt
tyy=(2.*b**2.-a**2.-h**2.)/rt
tzz=-(txx+tyy)
scale = mu_0*np.pi*area*area0/4
# scale = 1.
bx= (txx*cx+txy*cy+txz*cz)
by= (txy*cx+tyy*cy+tyz*cz)
bz= (txz*cx+tyz*cy+tzz*cz)
return scale*(bx*ax+by*ay+bz*az)
def Cfun(L,R,xc,yc,zc,incl,decl,S,ht,f,xyz):
"""
Compute coupling coefficients
.. math::
- \frac{M_{12} M_{23}}{M_{13}L_2}
Parameters
----------
"""
L = np.array(L, dtype=float)
R = np.array(R, dtype=float)
xc = np.array(xc, dtype=float)
yc = np.array(yc, dtype=float)
zc = np.array(zc, dtype=float)
incl = np.array(incl, dtype=float)
decl = np.array(decl, dtype=float)
S = np.array(S, dtype=float)
f = np.array(f, dtype=float)
# This is a bug, hence needs to be fixed later
x = xyz[:,1]
y = xyz[:,0]
z = xyz[:,2]
# simulate anomalies
yt=y-S/2.
yr=y+S/2.
dm=-S/2.
dp= S/2.
# Computes mutual inducances
# Mijfun(x,y,z,incl,decl,x1,y1,z1,incl1,decl1)
M13=Mijfun(0.,dm,0.,90.,0., 0., dp, 0., 90.,0.)
M12=Mijfun(x,yt,z,90.,0.,xc,yc,zc,incl,decl,area=1.,area0=3.)
M23=Mijfun(xc,yc,zc,incl,decl,x,yr,z,90.,0.,area=3.,area0=1.)
C = -M12*M23/(M13*L)
return C, M12, M23, M13*np.ones_like(C)
if __name__ == '__main__':
out = Mijfun(0., 0., 0., 0., 0., 10., 0, 0., 0., 0.)
anal = mu_0*np.pi / (2*10**3)
err = abs(out-anal)
print(err)
showIt = False
import matplotlib.pyplot as plt
f = np.logspace(-3, 3, 61)
alpha, Q = Qfun(1., 0.1, f)
if showIt:
plt.semilogx(alpha, Q.real)
plt.semilogx(alpha, Q.imag)
plt.show()
L = 1.
R = 2000.
xc = 0.
yc = 0.
zc = 2.
incl = 0.
decl = 90.
S = 4.
ht = 0.
f = 10000.
xmin = -10.
xmax = 10.
dx = 0.25
xp = np.linspace(xmin, xmax, 101)
yp = xp.copy()
zp = np.r_[-ht]
[Y, X] = np.meshgrid(yp, xp)
xyz = np.c_[X.flatten(), Y.flatten(), np.ones_like(X.flatten())*ht]
C, M12, M23, M13 = Cfun(L,R,xc,yc,zc,incl,decl,S,ht,f,xyz)
[Xp, Yp] = np.meshgrid(xp, yp)
if showIt:
plt.contourf(X, Y, C.reshape(X.shape), 100)
plt.show()
# xyz = np.c_[xp, np.zeros_like(yp), np.zeros_like(yp)]
# C, M12, M23, M13 = Cfun(L,R,xc,yc,zc,incl,decl,S,ht,f,xyz)
# plt.plot(xp, C, 'k')
# plt.plot(xp, M12, 'b')
# plt.plot(xp, M23, 'g')
# plt.plot(xp, M13, 'r')
# plt.show()
|
py | 1a42268efbe4f54456f94492825852f2306a2e71 | import inspect
class ProblemSizeCounter:
def __init__ (self, J, F, L, M, P):
self._initNumberOfVariables(J, F, L, M, P)
self._initNumberOfConstraints(J, F, L, M, P)
def _initNumberOfVariables(self, J, F, L, M, P):
self.numberOfVariablesX = P * L * F
self.numberOfVariablesY = P * F * J
self.totalNumberOfVariables = self.numberOfVariablesX + self.numberOfVariablesY
def _initNumberOfConstraints(self, J, F, L, M, P):
self.numberOfDemandConstraints = J * P
self.numberOfMachineCapacityConstraints = L * F
self.numberOfVariablesCompatibilityConstraints = P * F
self.numberOfResourcesConstraints = M * F
self.totalNumberOfConstraints = self.numberOfDemandConstraints \
+ self.numberOfMachineCapacityConstraints \
+ self.numberOfVariablesCompatibilityConstraints \
+ self.numberOfResourcesConstraints
def __str__ (self):
attributesToPrint = [
"numberOfVariablesX",
"numberOfVariablesY",
"totalNumberOfVariables",
"numberOfDemandConstraints",
"numberOfMachineCapacityConstraints",
"numberOfVariablesCompatibilityConstraints",
"numberOfResourcesConstraints",
"totalNumberOfConstraints"
]
string = "ProblemSizeCounter[\n"
for attribute in attributesToPrint:
value = getattr(self, attribute)
string += f"\t{attribute} = {value}\n"
string += "]"
return string
|
py | 1a42275b0d6b34078cf5071b6d75c9fb092b9e47 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.plugin.DefaultQubit` device.
"""
import cmath
# pylint: disable=protected-access,cell-var-from-loop
import math
import pytest
import pennylane as qml
from pennylane import numpy as np, DeviceError
from pennylane.operation import Operation
U = np.array(
[
[0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],
[-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],
]
)
U2 = np.array(
[
[
-0.07843244 - 3.57825948e-01j,
0.71447295 - 5.38069384e-02j,
0.20949966 + 6.59100734e-05j,
-0.50297381 + 2.35731613e-01j,
],
[
-0.26626692 + 4.53837083e-01j,
0.27771991 - 2.40717436e-01j,
0.41228017 - 1.30198687e-01j,
0.01384490 - 6.33200028e-01j,
],
[
-0.69254712 - 2.56963068e-02j,
-0.15484858 + 6.57298384e-02j,
-0.53082141 + 7.18073414e-02j,
-0.41060450 - 1.89462315e-01j,
],
[
-0.09686189 - 3.15085273e-01j,
-0.53241387 - 1.99491763e-01j,
0.56928622 + 3.97704398e-01j,
-0.28671074 - 6.01574497e-02j,
],
]
)
U_toffoli = np.diag([1 for i in range(8)])
U_toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])
U_swap = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
U_cswap = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]])
H = np.array(
[[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]]
)
THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)
def prep_par(par, op):
"Convert par into a list of parameters that op expects."
if op.par_domain == "A":
return [np.diag([x, 1]) for x in par]
return par
def include_inverses_with_test_data(test_data):
return test_data + [(item[0] + ".inv", item[1], item[2]) for item in test_data]
class TestApply:
"""Tests that operations and inverses of certain operations are applied correctly or that the proper
errors are raised.
"""
test_data_no_parameters = [
(qml.PauliX, [1, 0], np.array([0, 1])),
(qml.PauliX, [1/math.sqrt(2), 1/math.sqrt(2)], [1/math.sqrt(2), 1/math.sqrt(2)]),
(qml.PauliY, [1, 0], [0, 1j]),
(qml.PauliY, [1/math.sqrt(2), 1/math.sqrt(2)], [-1j/math.sqrt(2), 1j/math.sqrt(2)]),
(qml.PauliZ, [1, 0], [1, 0]),
(qml.PauliZ, [1/math.sqrt(2), 1/math.sqrt(2)], [1/math.sqrt(2), -1/math.sqrt(2)]),
(qml.S, [1, 0], [1, 0]),
(qml.S, [1/math.sqrt(2), 1/math.sqrt(2)], [1/math.sqrt(2), 1j/math.sqrt(2)]),
(qml.T, [1, 0], [1, 0]),
(qml.T, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), np.exp(1j * np.pi / 4) / math.sqrt(2)]),
(qml.Hadamard, [1, 0], [1/math.sqrt(2), 1/math.sqrt(2)]),
(qml.Hadamard, [1/math.sqrt(2), -1/math.sqrt(2)], [0, 1]),
]
test_data_no_parameters_inverses = [
(qml.PauliX, [1, 0], np.array([0, 1])),
(qml.PauliX, [1/math.sqrt(2), 1/math.sqrt(2)], [1/math.sqrt(2), 1/math.sqrt(2)]),
(qml.PauliY, [1, 0], [0, 1j]),
(qml.PauliY, [1/math.sqrt(2), 1/math.sqrt(2)], [-1j/math.sqrt(2), 1j/math.sqrt(2)]),
(qml.PauliZ, [1, 0], [1, 0]),
(qml.PauliZ, [1/math.sqrt(2), 1/math.sqrt(2)], [1/math.sqrt(2), -1/math.sqrt(2)]),
(qml.S, [1, 0], [1, 0]),
(qml.S, [1/math.sqrt(2), 1/math.sqrt(2)], [1/math.sqrt(2), -1j/math.sqrt(2)]),
(qml.T, [1, 0], [1, 0]),
(qml.T, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), np.exp(-1j * np.pi / 4) / math.sqrt(2)]),
(qml.Hadamard, [1, 0], [1/math.sqrt(2), 1/math.sqrt(2)]),
(qml.Hadamard, [1/math.sqrt(2), -1/math.sqrt(2)], [0, 1]),
]
@pytest.mark.parametrize("operation,input,expected_output", test_data_no_parameters)
def test_apply_operation_single_wire_no_parameters(self, qubit_device_1_wire, tol, operation, input, expected_output):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
qubit_device_1_wire._state = np.array(input)
qubit_device_1_wire.apply([operation(wires=[0])])
assert np.allclose(qubit_device_1_wire._state, np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output", test_data_no_parameters_inverses)
def test_apply_operation_single_wire_no_parameters_inverse(self, qubit_device_1_wire, tol, operation, input, expected_output):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
qubit_device_1_wire._state = np.array(input)
qubit_device_1_wire.apply([operation(wires=[0]).inv()])
assert np.allclose(qubit_device_1_wire._state, np.array(expected_output), atol=tol, rtol=0)
test_data_two_wires_no_parameters = [
(qml.CNOT, [1, 0, 0, 0], [1, 0, 0, 0]),
(qml.CNOT, [0, 0, 1, 0], [0, 0, 0, 1]),
(qml.CNOT, [1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)], [1 / math.sqrt(2), 0, 1 / math.sqrt(2), 0]),
(qml.SWAP, [1, 0, 0, 0], [1, 0, 0, 0]),
(qml.SWAP, [0, 0, 1, 0], [0, 1, 0, 0]),
(qml.SWAP, [1 / math.sqrt(2), 0, -1 / math.sqrt(2), 0], [1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0]),
(qml.CZ, [1, 0, 0, 0], [1, 0, 0, 0]),
(qml.CZ, [0, 0, 0, 1], [0, 0, 0, -1]),
(qml.CZ, [1 / math.sqrt(2), 0, 0, -1 / math.sqrt(2)], [1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)]),
]
@pytest.mark.parametrize("operation,input,expected_output", test_data_two_wires_no_parameters)
def test_apply_operation_two_wires_no_parameters(self, qubit_device_2_wires, tol, operation, input, expected_output):
"""Tests that applying an operation yields the expected output state for two wire
operations that have no parameters."""
qubit_device_2_wires._state = np.array(input).reshape((2, 2))
qubit_device_2_wires.apply([operation(wires=[0, 1])])
assert np.allclose(qubit_device_2_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output", test_data_two_wires_no_parameters)
def test_apply_operation_two_wires_no_parameters_inverse(self, qubit_device_2_wires, tol, operation, input, expected_output):
"""Tests that applying an operation yields the expected output state for two wire
operations that have no parameters."""
qubit_device_2_wires._state = np.array(input).reshape((2, 2))
qubit_device_2_wires.apply([operation(wires=[0, 1]).inv()])
assert np.allclose(qubit_device_2_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
test_data_three_wires_no_parameters = [
(qml.CSWAP, [1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0]),
(qml.CSWAP, [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0]),
(qml.CSWAP, [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0]),
]
@pytest.mark.parametrize("operation,input,expected_output", test_data_three_wires_no_parameters)
def test_apply_operation_three_wires_no_parameters(self, qubit_device_3_wires, tol, operation, input, expected_output):
"""Tests that applying an operation yields the expected output state for three wire
operations that have no parameters."""
qubit_device_3_wires._state = np.array(input).reshape((2, 2, 2))
qubit_device_3_wires.apply([operation(wires=[0, 1, 2])])
assert np.allclose(qubit_device_3_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output", test_data_three_wires_no_parameters)
def test_apply_operation_three_wires_no_parameters_inverse(self, qubit_device_3_wires, tol, operation, input, expected_output):
"""Tests that applying the inverse of an operation yields the expected output state for three wire
operations that have no parameters."""
qubit_device_3_wires._state = np.array(input).reshape((2, 2, 2))
qubit_device_3_wires.apply([operation(wires=[0, 1, 2]).inv()])
assert np.allclose(qubit_device_3_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize("operation,expected_output,par", [
(qml.BasisState, [0, 0, 1, 0], [1, 0]),
(qml.BasisState, [0, 0, 1, 0], [1, 0]),
(qml.BasisState, [0, 0, 0, 1], [1, 1]),
(qml.QubitStateVector, [0, 0, 1, 0], [0, 0, 1, 0]),
(qml.QubitStateVector, [0, 0, 1, 0], [0, 0, 1, 0]),
(qml.QubitStateVector, [0, 0, 0, 1], [0, 0, 0, 1]),
(qml.QubitStateVector, [1/math.sqrt(3), 0, 1/math.sqrt(3), 1/math.sqrt(3)], [1/math.sqrt(3), 0, 1/math.sqrt(3), 1/math.sqrt(3)]),
(qml.QubitStateVector, [1/math.sqrt(3), 0, -1/math.sqrt(3), 1/math.sqrt(3)], [1/math.sqrt(3), 0, -1/math.sqrt(3), 1/math.sqrt(3)]),
])
def test_apply_operation_state_preparation(self, qubit_device_2_wires, tol, operation, expected_output, par):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
par = np.array(par)
qubit_device_2_wires.reset()
qubit_device_2_wires.apply([operation(par, wires=[0, 1])])
assert np.allclose(qubit_device_2_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
test_data_single_wire_with_parameters = [
(qml.PhaseShift, [1, 0], [1, 0], [math.pi / 2]),
(qml.PhaseShift, [0, 1], [0, 1j], [math.pi / 2]),
(qml.PhaseShift, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), 1 / 2 + 1j / 2], [math.pi / 4]),
(qml.RX, [1, 0], [1 / math.sqrt(2), -1j * 1 / math.sqrt(2)], [math.pi / 2]),
(qml.RX, [1, 0], [0, -1j], [math.pi]),
(qml.RX, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 - 1j / 2], [math.pi / 2]),
(qml.RY, [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
(qml.RY, [1, 0], [0, 1], [math.pi]),
(qml.RY, [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, 1], [math.pi / 2]),
(qml.RZ, [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2]),
(qml.RZ, [0, 1], [0, 1j], [math.pi]),
(qml.RZ, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [math.pi / 2]),
(qml.MultiRZ, [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2]),
(qml.MultiRZ, [0, 1], [0, 1j], [math.pi]),
(qml.MultiRZ, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [math.pi / 2]),
(qml.Rot, [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2, 0, 0]),
(qml.Rot, [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, math.pi / 2, 0]),
(qml.Rot, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [0, 0, math.pi / 2]),
(qml.Rot, [1, 0], [-1j / math.sqrt(2), -1 / math.sqrt(2)], [math.pi / 2, -math.pi / 2, math.pi / 2]),
(qml.Rot, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 + 1j / 2, -1 / 2 + 1j / 2],
[-math.pi / 2, math.pi, math.pi]),
(qml.QubitUnitary, [1, 0], [1j / math.sqrt(2), 1j / math.sqrt(2)],
[np.array([[1j / math.sqrt(2), 1j / math.sqrt(2)], [1j / math.sqrt(2), -1j / math.sqrt(2)]])]),
(qml.QubitUnitary, [0, 1], [1j / math.sqrt(2), -1j / math.sqrt(2)],
[np.array([[1j / math.sqrt(2), 1j / math.sqrt(2)], [1j / math.sqrt(2), -1j / math.sqrt(2)]])]),
(qml.QubitUnitary, [1 / math.sqrt(2), -1 / math.sqrt(2)], [0, 1j],
[np.array([[1j / math.sqrt(2), 1j / math.sqrt(2)], [1j / math.sqrt(2), -1j / math.sqrt(2)]])]),
(qml.DiagonalQubitUnitary, [1, 0], [-1, 0], [np.array([-1, 1])]),
(qml.DiagonalQubitUnitary, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), 1j / math.sqrt(2)], [np.array([1, 1j])]),
(qml.DiagonalQubitUnitary, [1 / 2, math.sqrt(3) / 4], [cmath.exp(1j * 0.4) / 2, cmath.exp(1j * -0.4) * math.sqrt(3) / 4], [np.array([cmath.exp(1j * 0.4), cmath.exp(1j * -0.4)])]),
]
test_data_single_wire_with_parameters_inverses = [
(qml.PhaseShift, [1, 0], [1, 0], [math.pi / 2]),
(qml.PhaseShift, [0, 1], [0, -1j], [math.pi / 2]),
(qml.PhaseShift, [1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / math.sqrt(2), 1 / 2 - 1j / 2], [math.pi / 4]),
(qml.RX, [1, 0], [1 / math.sqrt(2), 1j * 1 / math.sqrt(2)], [math.pi / 2]),
(qml.RX, [1, 0], [0, 1j], [math.pi]),
(qml.RX, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 + 1j / 2, 1 / 2 + 1j / 2], [math.pi / 2]),
(qml.RY, [1, 0], [1 / math.sqrt(2), -1 / math.sqrt(2)], [math.pi / 2]),
(qml.RY, [1, 0], [0, -1], [math.pi]),
(qml.RY, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1, 0], [math.pi / 2]),
(qml.RZ, [1, 0], [1 / math.sqrt(2) + 1j / math.sqrt(2), 0], [math.pi / 2]),
(qml.RZ, [0, 1], [0, -1j], [math.pi]),
(qml.RZ, [1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / 2 + 1/2*1j, 1 / 2 - 1/2*1j], [math.pi / 2]),
(qml.MultiRZ, [1, 0], [1 / math.sqrt(2) + 1j / math.sqrt(2), 0], [math.pi / 2]),
(qml.MultiRZ, [0, 1], [0, -1j], [math.pi]),
(qml.MultiRZ, [1 / math.sqrt(2), 1 / math.sqrt(2)],
[1 / 2 + 1/2*1j, 1 / 2 - 1/2*1j], [math.pi / 2]),
(qml.DiagonalQubitUnitary, [1, 0], [-1, 0], [np.array([-1, 1])]),
(qml.DiagonalQubitUnitary, [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), -1j / math.sqrt(2)], [np.array([1, 1j])]),
(qml.DiagonalQubitUnitary, [1 / 2, math.sqrt(3) / 4], [cmath.exp(-1j * 0.4) / 2, cmath.exp(1j * 0.4) * math.sqrt(3) / 4], [np.array([cmath.exp(1j * 0.4), cmath.exp(1j * -0.4)])]),
]
@pytest.mark.parametrize("operation,input,expected_output,par", test_data_single_wire_with_parameters)
def test_apply_operation_single_wire_with_parameters(self, qubit_device_1_wire, tol, operation, input, expected_output, par):
"""Tests that applying an operation yields the expected output state for single wire
operations that have parameters."""
#parameter = par[0]
qubit_device_1_wire._state = np.array(input)
qubit_device_1_wire.apply([operation(*par, wires=[0])])
assert np.allclose(qubit_device_1_wire._state, np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output,par", test_data_single_wire_with_parameters_inverses)
def test_apply_operation_single_wire_with_parameters_inverse(self, qubit_device_1_wire, tol, operation, input, expected_output, par):
"""Tests that applying the inverse of an operation yields the expected output state for single wire
operations that have parameters."""
qubit_device_1_wire._state = np.array(input)
qubit_device_1_wire.apply([operation(*par, wires=[0]).inv()])
assert np.allclose(qubit_device_1_wire._state, np.array(expected_output), atol=tol, rtol=0)
test_data_two_wires_with_parameters = [
(qml.CRX, [0, 1, 0, 0], [0, 1, 0, 0], [math.pi / 2]),
(qml.CRX, [0, 0, 0, 1], [0, 0, -1j, 0], [math.pi]),
(qml.CRX, [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), 1 / 2, -1j / 2], [math.pi / 2]),
(qml.CRY, [0, 0, 0, 1], [0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
(qml.CRY, [0, 0, 0, 1], [0, 0, -1, 0], [math.pi]),
(qml.CRY, [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [math.pi / 2]),
(qml.CRZ, [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2]),
(qml.CRZ, [0, 0, 0, 1], [0, 0, 0, 1j], [math.pi]),
(qml.CRZ, [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [math.pi / 2]),
(qml.MultiRZ, [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) - 1j / math.sqrt(2)], [math.pi / 2]),
(qml.MultiRZ, [0, 0, 1, 0], [0, 0, 1j, 0], [math.pi]),
(qml.MultiRZ, [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2, 0, 0], [math.pi / 2]),
(qml.CRot, [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2, 0, 0]),
(qml.CRot, [0, 0, 0, 1], [0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)], [0, math.pi / 2, 0]),
(qml.CRot, [0, 0, 1 / math.sqrt(2), 1 / math.sqrt(2)], [0, 0, 1 / 2 - 1j / 2, 1 / 2 + 1j / 2],
[0, 0, math.pi / 2]),
(qml.CRot, [0, 0, 0, 1], [0, 0, 1 / math.sqrt(2), 1j / math.sqrt(2)], [math.pi / 2, -math.pi / 2, math.pi / 2]),
(qml.CRot, [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), 0, -1 / 2 + 1j / 2],
[-math.pi / 2, math.pi, math.pi]),
(qml.QubitUnitary, [1, 0, 0, 0], [1, 0, 0, 0], [np.array(
[[1, 0, 0, 0], [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1]])]),
(qml.QubitUnitary, [0, 1, 0, 0], [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [np.array(
[[1, 0, 0, 0], [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1]])]),
(qml.QubitUnitary, [1 / 2, 1 / 2, -1 / 2, 1 / 2], [1 / 2, 0, 1 / math.sqrt(2), 1 / 2], [np.array(
[[1, 0, 0, 0], [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1]])]),
(qml.DiagonalQubitUnitary, [1, 0, 0, 0], [-1, 0, 0, 0], [np.array([-1, 1, 1, -1])]),
(qml.DiagonalQubitUnitary, [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)], [1/math.sqrt(2), 0, 0, -1/math.sqrt(2)], [np.array([1, 1, 1, -1])]),
(qml.DiagonalQubitUnitary, [0, 0, 1, 0], [0, 0, 1j, 0], [np.array([-1, 1j, 1j, -1])]),
]
test_data_two_wires_with_parameters_inverses = [
(qml.CRX, [0, 1, 0, 0], [0, 1, 0, 0], [math.pi / 2]),
(qml.CRX, [0, 0, 0, 1], [0, 0, 1j, 0], [math.pi]),
(qml.CRX, [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), 1 / 2, 1j / 2], [math.pi / 2]),
(qml.MultiRZ, [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2]),
(qml.MultiRZ, [0, 0, 1, 0], [0, 0, -1j, 0], [math.pi]),
(qml.MultiRZ, [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / 2 + 1j / 2, 1 / 2 - 1j / 2, 0, 0], [math.pi / 2]),
(qml.DiagonalQubitUnitary, [1, 0, 0, 0], [-1, 0, 0, 0], [np.array([-1, 1, 1, -1])]),
(qml.DiagonalQubitUnitary, [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)], [1/math.sqrt(2), 0, 0, -1/math.sqrt(2)], [np.array([1, 1, 1, -1])]),
(qml.DiagonalQubitUnitary, [0, 0, 1, 0], [0, 0, -1j, 0], [np.array([-1, 1j, 1j, -1])]),
]
@pytest.mark.parametrize("operation,input,expected_output,par", test_data_two_wires_with_parameters)
def test_apply_operation_two_wires_with_parameters(self, qubit_device_2_wires, tol, operation, input, expected_output, par):
"""Tests that applying an operation yields the expected output state for two wire
operations that have parameters."""
qubit_device_2_wires._state = np.array(input).reshape((2, 2))
qubit_device_2_wires.apply([operation(*par, wires=[0, 1])])
assert np.allclose(qubit_device_2_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output,par", test_data_two_wires_with_parameters_inverses)
def test_apply_operation_two_wires_with_parameters_inverse(self, qubit_device_2_wires, tol, operation, input, expected_output, par):
"""Tests that applying the inverse of an operation yields the expected output state for two wire
operations that have parameters."""
qubit_device_2_wires._state = np.array(input).reshape((2, 2))
qubit_device_2_wires.apply([operation(*par, wires=[0, 1]).inv()])
assert np.allclose(qubit_device_2_wires._state.flatten(), np.array(expected_output), atol=tol, rtol=0)
def test_apply_errors_qubit_state_vector(self, qubit_device_2_wires):
"""Test that apply fails for incorrect state preparation, and > 2 qubit gates"""
with pytest.raises(
ValueError,
match="Sum of amplitudes-squared does not equal one."
):
qubit_device_2_wires.apply([qml.QubitStateVector(np.array([1, -1]), wires=[0])])
with pytest.raises(
ValueError,
match=r"State vector must be of length 2\*\*wires."
):
p = np.array([1, 0, 1, 1, 0]) / np.sqrt(3)
qubit_device_2_wires.apply([qml.QubitStateVector(p, wires=[0, 1])])
with pytest.raises(
DeviceError,
match="Operation QubitStateVector cannot be used after other Operations have already been applied "
"on a default.qubit device."
):
qubit_device_2_wires.reset()
qubit_device_2_wires.apply([
qml.RZ(0.5, wires=[0]),
qml.QubitStateVector(np.array([0, 1, 0, 0]), wires=[0, 1])
])
def test_apply_errors_basis_state(self, qubit_device_2_wires):
with pytest.raises(
ValueError,
match="BasisState parameter must consist of 0 or 1 integers."
):
qubit_device_2_wires.apply([qml.BasisState(np.array([-0.2, 4.2]), wires=[0, 1])])
with pytest.raises(
ValueError,
match="BasisState parameter and wires must be of equal length."
):
qubit_device_2_wires.apply([qml.BasisState(np.array([0, 1]), wires=[0])])
with pytest.raises(
DeviceError,
match="Operation BasisState cannot be used after other Operations have already been applied "
"on a default.qubit device."
):
qubit_device_2_wires.reset()
qubit_device_2_wires.apply([
qml.RZ(0.5, wires=[0]),
qml.BasisState(np.array([1, 1]), wires=[0, 1])
])
class TestExpval:
"""Tests that expectation values are properly calculated or that the proper errors are raised."""
@pytest.mark.parametrize("operation,input,expected_output", [
(qml.PauliX, [1/math.sqrt(2), 1/math.sqrt(2)], 1),
(qml.PauliX, [1/math.sqrt(2), -1/math.sqrt(2)], -1),
(qml.PauliX, [1, 0], 0),
(qml.PauliY, [1/math.sqrt(2), 1j/math.sqrt(2)], 1),
(qml.PauliY, [1/math.sqrt(2), -1j/math.sqrt(2)], -1),
(qml.PauliY, [1, 0], 0),
(qml.PauliZ, [1, 0], 1),
(qml.PauliZ, [0, 1], -1),
(qml.PauliZ, [1/math.sqrt(2), 1/math.sqrt(2)], 0),
(qml.Hadamard, [1, 0], 1/math.sqrt(2)),
(qml.Hadamard, [0, 1], -1/math.sqrt(2)),
(qml.Hadamard, [1/math.sqrt(2), 1/math.sqrt(2)], 1/math.sqrt(2)),
(qml.Identity, [1, 0], 1),
(qml.Identity, [0, 1], 1),
(qml.Identity, [1/math.sqrt(2), -1/math.sqrt(2)], 1),
])
def test_expval_single_wire_no_parameters(self, qubit_device_1_wire, tol, operation, input, expected_output):
"""Tests that expectation values are properly calculated for single-wire observables without parameters."""
obs = operation(wires=[0])
qubit_device_1_wire.reset()
qubit_device_1_wire.apply(
[qml.QubitStateVector(np.array(input), wires=[0])],
obs.diagonalizing_gates()
)
res = qubit_device_1_wire.expval(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Hermitian, [1, 0], 1, [[1, 1j], [-1j, 1]]),
(qml.Hermitian, [0, 1], 1, [[1, 1j], [-1j, 1]]),
(qml.Hermitian, [1/math.sqrt(2), -1/math.sqrt(2)], 1, [[1, 1j], [-1j, 1]]),
])
def test_expval_single_wire_with_parameters(self, qubit_device_1_wire, tol, operation, input, expected_output, par):
"""Tests that expectation values are properly calculated for single-wire observables with parameters."""
obs = operation(np.array(par), wires=[0])
qubit_device_1_wire.reset()
qubit_device_1_wire.apply(
[qml.QubitStateVector(np.array(input), wires=[0])],
obs.diagonalizing_gates()
)
res = qubit_device_1_wire.expval(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Hermitian, [1/math.sqrt(3), 0, 1/math.sqrt(3), 1/math.sqrt(3)], 5/3, [[1, 1j, 0, 1], [-1j, 1, 0, 0], [0, 0, 1, -1j], [1, 0, 1j, 1]]),
(qml.Hermitian, [0, 0, 0, 1], 0, [[0, 1j, 0, 0], [-1j, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]]),
(qml.Hermitian, [1/math.sqrt(2), 0, -1/math.sqrt(2), 0], 1, [[1, 1j, 0, 0], [-1j, 1, 0, 0], [0, 0, 1, -1j], [0, 0, 1j, 1]]),
(qml.Hermitian, [1/math.sqrt(3), -1/math.sqrt(3), 1/math.sqrt(6), 1/math.sqrt(6)], 1, [[1, 1j, 0, .5j], [-1j, 1, 0, 0], [0, 0, 1, -1j], [-.5j, 0, 1j, 1]]),
(qml.Hermitian, [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)], 1, [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]),
(qml.Hermitian, [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], -1, [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]),
])
def test_expval_two_wires_with_parameters(self, qubit_device_2_wires, tol, operation, input, expected_output, par):
"""Tests that expectation values are properly calculated for two-wire observables with parameters."""
obs = operation(np.array(par), wires=[0, 1])
qubit_device_2_wires.reset()
qubit_device_2_wires.apply(
[qml.QubitStateVector(np.array(input), wires=[0, 1])],
obs.diagonalizing_gates()
)
res = qubit_device_2_wires.expval(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
def test_expval_estimate(self):
"""Test that the expectation value is not analytically calculated"""
dev = qml.device("default.qubit", wires=1, shots=3, analytic=False)
@qml.qnode(dev)
def circuit():
return qml.expval(qml.PauliX(0))
expval = circuit()
# With 3 samples we are guaranteed to see a difference between
# an estimated variance an an analytically calculated one
assert expval != 0.0
class TestVar:
"""Tests that variances are properly calculated."""
@pytest.mark.parametrize("operation,input,expected_output", [
(qml.PauliX, [1/math.sqrt(2), 1/math.sqrt(2)], 0),
(qml.PauliX, [1/math.sqrt(2), -1/math.sqrt(2)], 0),
(qml.PauliX, [1, 0], 1),
(qml.PauliY, [1/math.sqrt(2), 1j/math.sqrt(2)], 0),
(qml.PauliY, [1/math.sqrt(2), -1j/math.sqrt(2)], 0),
(qml.PauliY, [1, 0], 1),
(qml.PauliZ, [1, 0], 0),
(qml.PauliZ, [0, 1], 0),
(qml.PauliZ, [1/math.sqrt(2), 1/math.sqrt(2)], 1),
(qml.Hadamard, [1, 0], 1/2),
(qml.Hadamard, [0, 1], 1/2),
(qml.Hadamard, [1/math.sqrt(2), 1/math.sqrt(2)], 1/2),
(qml.Identity, [1, 0], 0),
(qml.Identity, [0, 1], 0),
(qml.Identity, [1/math.sqrt(2), -1/math.sqrt(2)], 0),
])
def test_var_single_wire_no_parameters(self, qubit_device_1_wire, tol, operation, input, expected_output):
"""Tests that variances are properly calculated for single-wire observables without parameters."""
obs = operation(wires=[0])
qubit_device_1_wire.reset()
qubit_device_1_wire.apply(
[qml.QubitStateVector(np.array(input), wires=[0])],
obs.diagonalizing_gates()
)
res = qubit_device_1_wire.var(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Hermitian, [1, 0], 1, [[1, 1j], [-1j, 1]]),
(qml.Hermitian, [0, 1], 1, [[1, 1j], [-1j, 1]]),
(qml.Hermitian, [1/math.sqrt(2), -1/math.sqrt(2)], 1, [[1, 1j], [-1j, 1]]),
])
def test_var_single_wire_with_parameters(self, qubit_device_1_wire, tol, operation, input, expected_output, par):
"""Tests that variances are properly calculated for single-wire observables with parameters."""
obs = operation(np.array(par), wires=[0])
qubit_device_1_wire.reset()
qubit_device_1_wire.apply(
[qml.QubitStateVector(np.array(input), wires=[0])],
obs.diagonalizing_gates()
)
res = qubit_device_1_wire.var(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Hermitian, [1/math.sqrt(3), 0, 1/math.sqrt(3), 1/math.sqrt(3)], 11/9, [[1, 1j, 0, 1], [-1j, 1, 0, 0], [0, 0, 1, -1j], [1, 0, 1j, 1]]),
(qml.Hermitian, [0, 0, 0, 1], 1, [[0, 1j, 0, 0], [-1j, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]]),
(qml.Hermitian, [1/math.sqrt(2), 0, -1/math.sqrt(2), 0], 1, [[1, 1j, 0, 0], [-1j, 1, 0, 0], [0, 0, 1, -1j], [0, 0, 1j, 1]]),
(qml.Hermitian, [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)], 0, [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]),
(qml.Hermitian, [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], 0, [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]),
])
def test_var_two_wires_with_parameters(self, qubit_device_2_wires, tol, operation, input, expected_output, par):
"""Tests that variances are properly calculated for two-wire observables with parameters."""
obs = operation(np.array(par), wires=[0, 1])
qubit_device_2_wires.reset()
qubit_device_2_wires.apply(
[qml.QubitStateVector(np.array(input), wires=[0, 1])],
obs.diagonalizing_gates()
)
res = qubit_device_2_wires.var(obs)
assert np.isclose(res, expected_output, atol=tol, rtol=0)
def test_var_estimate(self):
"""Test that the variance is not analytically calculated"""
dev = qml.device("default.qubit", wires=1, shots=3, analytic=False)
@qml.qnode(dev)
def circuit():
return qml.var(qml.PauliX(0))
var = circuit()
# With 3 samples we are guaranteed to see a difference between
# an estimated variance and an analytically calculated one
assert var != 1.0
class TestSample:
"""Tests that samples are properly calculated."""
def test_sample_dimensions(self, qubit_device_2_wires):
"""Tests if the samples returned by the sample function have
the correct dimensions
"""
# Explicitly resetting is necessary as the internal
# state is set to None in __init__ and only properly
# initialized during reset
qubit_device_2_wires.reset()
qubit_device_2_wires.apply(
[qml.RX(1.5708, wires=[0]), qml.RX(1.5708, wires=[1])]
)
qubit_device_2_wires.shots = 10
qubit_device_2_wires._wires_measured = {0}
qubit_device_2_wires._samples = qubit_device_2_wires.generate_samples()
s1 = qubit_device_2_wires.sample(qml.PauliZ(wires=[0]))
assert np.array_equal(s1.shape, (10,))
qubit_device_2_wires.reset()
qubit_device_2_wires.shots = 12
qubit_device_2_wires._wires_measured = {1}
qubit_device_2_wires._samples = qubit_device_2_wires.generate_samples()
s2 = qubit_device_2_wires.sample(qml.PauliZ(wires=[1]))
assert np.array_equal(s2.shape, (12,))
qubit_device_2_wires.reset()
qubit_device_2_wires.shots = 17
qubit_device_2_wires._wires_measured = {0, 1}
qubit_device_2_wires._samples = qubit_device_2_wires.generate_samples()
s3 = qubit_device_2_wires.sample(qml.PauliX(0) @ qml.PauliZ(1))
assert np.array_equal(s3.shape, (17,))
def test_sample_values(self, qubit_device_2_wires, tol):
"""Tests if the samples returned by sample have
the correct values
"""
# Explicitly resetting is necessary as the internal
# state is set to None in __init__ and only properly
# initialized during reset
qubit_device_2_wires.reset()
qubit_device_2_wires.apply([qml.RX(1.5708, wires=[0])])
qubit_device_2_wires._wires_measured = {0}
qubit_device_2_wires._samples = qubit_device_2_wires.generate_samples()
s1 = qubit_device_2_wires.sample(qml.PauliZ(0))
# s1 should only contain 1 and -1, which is guaranteed if
# they square to 1
assert np.allclose(s1**2, 1, atol=tol, rtol=0)
class TestDefaultQubitIntegration:
"""Integration tests for default.qubit. This test ensures it integrates
properly with the PennyLane interface, in particular QNode."""
def test_load_default_qubit_device(self):
"""Test that the default plugin loads correctly"""
dev = qml.device("default.qubit", wires=2)
assert dev.num_wires == 2
assert dev.shots == 1000
assert dev.analytic
assert dev.short_name == "default.qubit"
def test_args(self):
"""Test that the plugin requires correct arguments"""
with pytest.raises(
TypeError, match="missing 1 required positional argument: 'wires'"
):
qml.device("default.qubit")
def test_qubit_circuit(self, qubit_device_1_wire, tol):
"""Test that the default qubit plugin provides correct result for a simple circuit"""
p = 0.543
@qml.qnode(qubit_device_1_wire)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
expected = -np.sin(p)
assert np.isclose(circuit(p), expected, atol=tol, rtol=0)
def test_qubit_identity(self, qubit_device_1_wire, tol):
"""Test that the default qubit plugin provides correct result for the Identity expectation"""
p = 0.543
@qml.qnode(qubit_device_1_wire)
def circuit(x):
"""Test quantum function"""
qml.RX(x, wires=0)
return qml.expval(qml.Identity(0))
assert np.isclose(circuit(p), 1, atol=tol, rtol=0)
def test_nonzero_shots(self, tol):
"""Test that the default qubit plugin provides correct result for high shot number"""
shots = 10 ** 5
dev = qml.device("default.qubit", wires=1)
p = 0.543
@qml.qnode(dev)
def circuit(x):
"""Test quantum function"""
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
runs = []
for _ in range(100):
runs.append(circuit(p))
assert np.isclose(np.mean(runs), -np.sin(p), atol=tol, rtol=0)
@pytest.mark.parametrize("name,expected_output", [
("PauliX", 1),
("PauliY", 1),
("S", -1),
])
def test_inverse_circuit(self, qubit_device_1_wire, tol, name, expected_output):
"""Tests the inverse of supported gates that act on a single wire and are not parameterized"""
op = getattr(qml.ops, name)
@qml.qnode(qubit_device_1_wire)
def circuit():
qml.BasisState(np.array([1]), wires=[0])
op(wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,expected_output", [
("PauliX", 1),
("PauliY", 1),
("S", -1),
])
def test_inverse_circuit_calling_inv_multiple_times(self, qubit_device_1_wire, tol, name, expected_output):
"""Tests that multiple calls to the inverse of an operation works"""
op = getattr(qml.ops, name)
@qml.qnode(qubit_device_1_wire)
def circuit():
qml.BasisState(np.array([1]), wires=[0])
op(wires=0).inv().inv().inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,expected_output,phi", [("RX", 1,
multiplier * 0.5432) for multiplier in range(8)
])
def test_inverse_circuit_with_parameters(self, qubit_device_1_wire, tol, name, expected_output, phi):
"""Tests the inverse of supported gates that act on a single wire and are parameterized"""
@qml.qnode(qubit_device_1_wire)
def circuit():
qml.RX(phi, wires=0)
qml.RX(phi, wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,expected_output,phi", [("RX", 1,
multiplier * 0.5432) for multiplier in range(8)
])
def test_inverse_circuit_with_parameters_expectation(self, qubit_device_1_wire, tol, name, expected_output, phi):
@qml.qnode(qubit_device_1_wire)
def circuit():
qml.RX(phi, wires=0)
qml.RX(phi, wires=0).inv()
return qml.expval(qml.PauliZ(0).inv())
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran against the state |0> with one Z expval
@pytest.mark.parametrize("name,expected_output", [
("PauliX", -1),
("PauliY", -1),
("PauliZ", 1),
("Hadamard", 0),
])
def test_supported_gate_single_wire_no_parameters(self, qubit_device_1_wire, tol, name, expected_output):
"""Tests supported gates that act on a single wire that are not parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_1_wire.supports_operation(name)
@qml.qnode(qubit_device_1_wire)
def circuit():
op(wires=0)
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran against the state |Phi+> with two Z expvals
@pytest.mark.parametrize("name,expected_output", [
("CNOT", [-1/2, 1]),
("SWAP", [-1/2, -1/2]),
("CZ", [-1/2, -1/2]),
])
def test_supported_gate_two_wires_no_parameters(self, qubit_device_2_wires, tol, name, expected_output):
"""Tests supported gates that act on two wires that are not parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_2_wires.supports_operation(name)
@qml.qnode(qubit_device_2_wires)
def circuit():
qml.QubitStateVector(np.array([1/2, 0, 0, math.sqrt(3)/2]), wires=[0, 1])
op(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,expected_output", [
("CSWAP", [-1, -1, 1]),
])
def test_supported_gate_three_wires_no_parameters(self, qubit_device_3_wires, tol, name, expected_output):
"""Tests supported gates that act on three wires that are not parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_3_wires.supports_operation(name)
@qml.qnode(qubit_device_3_wires)
def circuit():
qml.BasisState(np.array([1, 0, 1]), wires=[0, 1, 2])
op(wires=[0, 1, 2])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran with two Z expvals
@pytest.mark.parametrize("name,par,expected_output", [
("BasisState", [0, 0], [1, 1]),
("BasisState", [1, 0], [-1, 1]),
("BasisState", [0, 1], [1, -1]),
("QubitStateVector", [1, 0, 0, 0], [1, 1]),
("QubitStateVector", [0, 0, 1, 0], [-1, 1]),
("QubitStateVector", [0, 1, 0, 0], [1, -1]),
])
def test_supported_state_preparation(self, qubit_device_2_wires, tol, name, par, expected_output):
"""Tests supported state preparations"""
op = getattr(qml.ops, name)
assert qubit_device_2_wires.supports_operation(name)
@qml.qnode(qubit_device_2_wires)
def circuit():
op(np.array(par), wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran with two Z expvals
@pytest.mark.parametrize("name,par,wires,expected_output", [
("BasisState", [1, 1], [0, 1], [-1, -1]),
("BasisState", [1], [0], [-1, 1]),
("BasisState", [1], [1], [1, -1])
])
def test_basis_state_2_qubit_subset(self, qubit_device_2_wires, tol, name, par, wires, expected_output):
"""Tests qubit basis state preparation on subsets of qubits"""
op = getattr(qml.ops, name)
@qml.qnode(qubit_device_2_wires)
def circuit():
op(np.array(par), wires=wires)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is run with two expvals
@pytest.mark.parametrize("name,par,wires,expected_output", [
("QubitStateVector", [0, 1], [1], [1, -1]),
("QubitStateVector", [0, 1], [0], [-1, 1]),
("QubitStateVector", [1./np.sqrt(2), 1./np.sqrt(2)], [1], [1, 0]),
("QubitStateVector", [1j/2., np.sqrt(3)/2.], [1], [1, -0.5]),
("QubitStateVector", [(2-1j)/3., 2j/3.], [0], [1/9., 1])
])
def test_state_vector_2_qubit_subset(self, qubit_device_2_wires, tol, name, par, wires, expected_output):
"""Tests qubit state vector preparation on subsets of 2 qubits"""
op = getattr(qml.ops, name)
par = np.array(par)
@qml.qnode(qubit_device_2_wires)
def circuit():
op(par, wires=wires)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is run with three expvals
@pytest.mark.parametrize("name,par,wires,expected_output", [
("QubitStateVector", [1j/np.sqrt(10), (1-2j)/np.sqrt(10), 0, 0, 0, 2/np.sqrt(10), 0, 0],
[0, 1, 2], [1/5., 1., -4/5.]),
("QubitStateVector", [1/np.sqrt(2), 0, 0, 1/np.sqrt(2)], [0, 2], [0., 1., 0.]),
("QubitStateVector", [1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)], [0, 1], [0., 0., 1.]),
("QubitStateVector", [0, 1, 0, 0, 0, 0, 0, 0], [2, 1, 0], [-1., 1., 1.]),
("QubitStateVector", [0, 1j, 0, 0, 0, 0, 0, 0], [0, 2, 1], [1., -1., 1.]),
("QubitStateVector", [0, 1/np.sqrt(2), 0, 1/np.sqrt(2)], [1, 0], [-1., 0., 1.]),
("QubitStateVector", [0, 1 / np.sqrt(2), 0, 1 / np.sqrt(2)], [0, 1], [0., -1., 1.])
])
def test_state_vector_3_qubit_subset(self, qubit_device_3_wires, tol, name, par, wires, expected_output):
"""Tests qubit state vector preparation on subsets of 3 qubits"""
op = getattr(qml.ops, name)
par = np.array(par)
@qml.qnode(qubit_device_3_wires)
def circuit():
op(par, wires=wires)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran on the state |0> with one Z expvals
@pytest.mark.parametrize("name,par,expected_output", [
("PhaseShift", [math.pi/2], 1),
("PhaseShift", [-math.pi/4], 1),
("RX", [math.pi/2], 0),
("RX", [-math.pi/4], 1/math.sqrt(2)),
("RY", [math.pi/2], 0),
("RY", [-math.pi/4], 1/math.sqrt(2)),
("RZ", [math.pi/2], 1),
("RZ", [-math.pi/4], 1),
("MultiRZ", [math.pi/2], 1),
("MultiRZ", [-math.pi/4], 1),
("Rot", [math.pi/2, 0, 0], 1),
("Rot", [0, math.pi/2, 0], 0),
("Rot", [0, 0, math.pi/2], 1),
("Rot", [math.pi/2, -math.pi/4, -math.pi/4], 1/math.sqrt(2)),
("Rot", [-math.pi/4, math.pi/2, math.pi/4], 0),
("Rot", [-math.pi/4, math.pi/4, math.pi/2], 1/math.sqrt(2)),
("QubitUnitary", [np.array([[1j/math.sqrt(2), 1j/math.sqrt(2)], [1j/math.sqrt(2), -1j/math.sqrt(2)]])], 0),
("QubitUnitary", [np.array([[-1j/math.sqrt(2), 1j/math.sqrt(2)], [1j/math.sqrt(2), 1j/math.sqrt(2)]])], 0),
])
def test_supported_gate_single_wire_with_parameters(self, qubit_device_1_wire, tol, name, par, expected_output):
"""Tests supported gates that act on a single wire that are parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_1_wire.supports_operation(name)
@qml.qnode(qubit_device_1_wire)
def circuit():
op(*par, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran against the state 1/2|00>+sqrt(3)/2|11> with two Z expvals
@pytest.mark.parametrize("name,par,expected_output", [
("CRX", [0], [-1/2, -1/2]),
("CRX", [-math.pi], [-1/2, 1]),
("CRX", [math.pi/2], [-1/2, 1/4]),
("CRY", [0], [-1/2, -1/2]),
("CRY", [-math.pi], [-1/2, 1]),
("CRY", [math.pi/2], [-1/2, 1/4]),
("CRZ", [0], [-1/2, -1/2]),
("CRZ", [-math.pi], [-1/2, -1/2]),
("CRZ", [math.pi/2], [-1/2, -1/2]),
("MultiRZ", [0], [-1/2, -1/2]),
("MultiRZ", [-math.pi], [-1/2, -1/2]),
("MultiRZ", [math.pi/2], [-1/2, -1/2]),
("CRot", [math.pi/2, 0, 0], [-1/2, -1/2]),
("CRot", [0, math.pi/2, 0], [-1/2, 1/4]),
("CRot", [0, 0, math.pi/2], [-1/2, -1/2]),
("CRot", [math.pi/2, 0, -math.pi], [-1/2, -1/2]),
("CRot", [0, math.pi/2, -math.pi], [-1/2, 1/4]),
("CRot", [-math.pi, 0, math.pi/2], [-1/2, -1/2]),
("QubitUnitary", [np.array([[1, 0, 0, 0], [0, 1/math.sqrt(2), 1/math.sqrt(2), 0], [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], [0, 0, 0, 1]])], [-1/2, -1/2]),
("QubitUnitary", [np.array([[-1, 0, 0, 0], [0, 1/math.sqrt(2), 1/math.sqrt(2), 0], [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], [0, 0, 0, -1]])], [-1/2, -1/2]),
])
def test_supported_gate_two_wires_with_parameters(self, qubit_device_2_wires, tol, name, par, expected_output):
"""Tests supported gates that act on two wires wires that are parameterized"""
op = getattr(qml.ops, name)
assert qubit_device_2_wires.supports_operation(name)
@qml.qnode(qubit_device_2_wires)
def circuit():
qml.QubitStateVector(np.array([1/2, 0, 0, math.sqrt(3)/2]), wires=[0, 1])
op(*par, wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,state,expected_output", [
("PauliX", [1/math.sqrt(2), 1/math.sqrt(2)], 1),
("PauliX", [1/math.sqrt(2), -1/math.sqrt(2)], -1),
("PauliX", [1, 0], 0),
("PauliY", [1/math.sqrt(2), 1j/math.sqrt(2)], 1),
("PauliY", [1/math.sqrt(2), -1j/math.sqrt(2)], -1),
("PauliY", [1, 0], 0),
("PauliZ", [1, 0], 1),
("PauliZ", [0, 1], -1),
("PauliZ", [1/math.sqrt(2), 1/math.sqrt(2)], 0),
("Hadamard", [1, 0], 1/math.sqrt(2)),
("Hadamard", [0, 1], -1/math.sqrt(2)),
("Hadamard", [1/math.sqrt(2), 1/math.sqrt(2)], 1/math.sqrt(2)),
])
def test_supported_observable_single_wire_no_parameters(self, qubit_device_1_wire, tol, name, state, expected_output):
"""Tests supported observables on single wires without parameters."""
obs = getattr(qml.ops, name)
assert qubit_device_1_wire.supports_observable(name)
@qml.qnode(qubit_device_1_wire)
def circuit():
qml.QubitStateVector(np.array(state), wires=[0])
return qml.expval(obs(wires=[0]))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,state,expected_output,par", [
("Identity", [1, 0], 1, []),
("Identity", [0, 1], 1, []),
("Identity", [1/math.sqrt(2), -1/math.sqrt(2)], 1, []),
("Hermitian", [1, 0], 1, [np.array([[1, 1j], [-1j, 1]])]),
("Hermitian", [0, 1], 1, [np.array([[1, 1j], [-1j, 1]])]),
("Hermitian", [1/math.sqrt(2), -1/math.sqrt(2)], 1, [np.array([[1, 1j], [-1j, 1]])]),
])
def test_supported_observable_single_wire_with_parameters(self, qubit_device_1_wire, tol, name, state, expected_output, par):
"""Tests supported observables on single wires with parameters."""
obs = getattr(qml.ops, name)
assert qubit_device_1_wire.supports_observable(name)
@qml.qnode(qubit_device_1_wire)
def circuit():
qml.QubitStateVector(np.array(state), wires=[0])
return qml.expval(obs(*par, wires=[0]))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("name,state,expected_output,par", [
("Hermitian", [1/math.sqrt(3), 0, 1/math.sqrt(3), 1/math.sqrt(3)], 5/3, [np.array([[1, 1j, 0, 1], [-1j, 1, 0, 0], [0, 0, 1, -1j], [1, 0, 1j, 1]])]),
("Hermitian", [0, 0, 0, 1], 0, [np.array([[0, 1j, 0, 0], [-1j, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]])]),
("Hermitian", [1/math.sqrt(2), 0, -1/math.sqrt(2), 0], 1, [np.array([[1, 1j, 0, 0], [-1j, 1, 0, 0], [0, 0, 1, -1j], [0, 0, 1j, 1]])]),
("Hermitian", [1/math.sqrt(3), -1/math.sqrt(3), 1/math.sqrt(6), 1/math.sqrt(6)], 1, [np.array([[1, 1j, 0, .5j], [-1j, 1, 0, 0], [0, 0, 1, -1j], [-.5j, 0, 1j, 1]])]),
("Hermitian", [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)], 1, [np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])]),
("Hermitian", [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], -1, [np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])]),
])
def test_supported_observable_two_wires_with_parameters(self, qubit_device_2_wires, tol, name, state, expected_output, par):
"""Tests supported observables on two wires with parameters."""
obs = getattr(qml.ops, name)
assert qubit_device_2_wires.supports_observable(name)
@qml.qnode(qubit_device_2_wires)
def circuit():
qml.QubitStateVector(np.array(state), wires=[0, 1])
return qml.expval(obs(*par, wires=[0, 1]))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
def test_multi_samples_return_correlated_results(self):
"""Tests if the samples returned by the sample function have
the correct dimensions
"""
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit():
qml.Hadamard(0)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliZ(1))
outcomes = circuit()
assert np.array_equal(outcomes[0], outcomes[1])
@pytest.mark.parametrize("num_wires", [3, 4, 5, 6, 7, 8])
def test_multi_samples_return_correlated_results_more_wires_than_size_of_observable(self, num_wires):
"""Tests if the samples returned by the sample function have
the correct dimensions
"""
dev = qml.device('default.qubit', wires=num_wires)
@qml.qnode(dev)
def circuit():
qml.Hadamard(0)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliZ(1))
outcomes = circuit()
assert np.array_equal(outcomes[0], outcomes[1])
@pytest.mark.parametrize("theta,phi,varphi", list(zip(THETA, PHI, VARPHI)))
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
dev = qml.device("default.qubit", wires=3)
dev.reset()
obs = qml.PauliX(0) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_identity(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliZ and Identity works correctly"""
dev = qml.device("default.qubit", wires=3)
dev.reset()
obs = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = np.cos(varphi)*np.cos(phi)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_hadamard(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
dev = qml.device("default.qubit", wires=3)
obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)
dev.reset()
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian(self, theta, phi, varphi, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
dev = qml.device("default.qubit", wires=3)
dev.reset()
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_hermitian(self, theta, phi, varphi, tol):
"""Test that a tensor product involving two Hermitian matrices works correctly"""
dev = qml.device("default.qubit", wires=3)
A1 = np.array([[1, 2],
[2, 4]])
A2 = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.Hermitian(A1, wires=[0]) @ qml.Hermitian(A2, wires=[1, 2])
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
expected = 0.25 * (
-30
+ 4 * np.cos(phi) * np.sin(theta)
+ 3 * np.cos(varphi) * (-10 + 4 * np.cos(phi) * np.sin(theta) - 3 * np.sin(phi))
- 3 * np.sin(phi)
- 2 * (5 + np.cos(phi) * (6 + 4 * np.sin(theta)) + (-3 + 8 * np.sin(theta)) * np.sin(phi))
* np.sin(varphi)
+ np.cos(theta)
* (
18
+ 5 * np.sin(phi)
+ 3 * np.cos(varphi) * (6 + 5 * np.sin(phi))
+ 2 * (3 + 10 * np.cos(phi) - 5 * np.sin(phi)) * np.sin(varphi)
)
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_identity_expectation(self, theta, phi, varphi, tol):
"""Test that a tensor product involving an Hermitian matrix and the identity works correctly"""
dev = qml.device("default.qubit", wires=2)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
obs = qml.Hermitian(A, wires=[0]) @ qml.Identity(wires=[1])
dev.apply(
[
qml.RY(theta, wires=[0]),
qml.RY(phi, wires=[1]),
qml.CNOT(wires=[0, 1])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
expected = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_two_wires_identity_expectation(self, theta, phi, varphi, tol):
"""Test that a tensor product involving an Hermitian matrix for two wires and the identity works correctly"""
dev = qml.device("default.qubit", wires=3, analytic=True)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
Identity = np.array([[1, 0],[0, 1]])
H = np.kron(np.kron(Identity,Identity), A)
obs = qml.Hermitian(H, wires=[2, 1, 0])
dev.apply(
[
qml.RY(theta, wires=[0]),
qml.RY(phi, wires=[1]),
qml.CNOT(wires=[0, 1])
],
obs.diagonalizing_gates()
)
res = dev.expval(obs)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
expected = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("theta, phi, varphi", list(zip(THETA, PHI, VARPHI)))
class TestTensorVar:
"""Tests for variance of tensor observables"""
def test_paulix_pauliy(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
dev = qml.device("default.qubit", wires=3)
obs = qml.PauliX(0) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.var(obs)
expected = (
8 * np.sin(theta) ** 2 * np.cos(2 * varphi) * np.sin(phi) ** 2
- np.cos(2 * (theta - phi))
- np.cos(2 * (theta + phi))
+ 2 * np.cos(2 * theta)
+ 2 * np.cos(2 * phi)
+ 14
) / 16
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_hadamard(self, theta, phi, varphi, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
dev = qml.device("default.qubit", wires=3)
obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)
dev.reset()
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.var(obs)
expected = (
3
+ np.cos(2 * phi) * np.cos(varphi) ** 2
- np.cos(2 * theta) * np.sin(varphi) ** 2
- 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi)
) / 4
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian(self, theta, phi, varphi, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
dev = qml.device("default.qubit", wires=3)
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
res = dev.var(obs)
expected = (
1057
- np.cos(2 * phi)
+ 12 * (27 + np.cos(2 * phi)) * np.cos(varphi)
- 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi))
+ 16 * np.sin(2 * phi)
- 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi)
- 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2
- 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi)
- 8
* np.cos(theta)
* (
4
* np.cos(phi)
* (
4
+ 8 * np.cos(varphi)
+ np.cos(2 * varphi)
- (1 + 6 * np.cos(varphi)) * np.sin(varphi)
)
+ np.sin(phi)
* (
15
+ 8 * np.cos(varphi)
- 11 * np.cos(2 * varphi)
+ 42 * np.sin(varphi)
+ 3 * np.sin(2 * varphi)
)
)
) / 16
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("theta, phi, varphi", list(zip(THETA, PHI, VARPHI)))
class TestTensorSample:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, theta, phi, varphi, monkeypatch, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
dev = qml.device("default.qubit", wires=3, shots=10000)
obs = qml.PauliX(0) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
dev._wires_measured = {0, 1, 2}
dev._samples = dev.generate_samples()
dev.sample(obs)
s1 = obs.eigvals
p = dev.probability(wires=obs.wires)
# s1 should only contain 1 and -1
assert np.allclose(s1 ** 2, 1, atol=tol, rtol=0)
mean = s1 @ p
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(mean, expected, atol=tol, rtol=0)
var = (s1 ** 2) @ p - (s1 @ p).real ** 2
expected = (
8 * np.sin(theta) ** 2 * np.cos(2 * varphi) * np.sin(phi) ** 2
- np.cos(2 * (theta - phi))
- np.cos(2 * (theta + phi))
+ 2 * np.cos(2 * theta)
+ 2 * np.cos(2 * phi)
+ 14
) / 16
assert np.allclose(var, expected, atol=tol, rtol=0)
def test_pauliz_hadamard(self, theta, phi, varphi, monkeypatch, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
dev = qml.device("default.qubit", wires=3)
obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
dev._wires_measured = {0, 1, 2}
dev._samples = dev.generate_samples()
dev.sample(obs)
s1 = obs.eigvals
p = dev.marginal_prob(dev.probability(), wires=obs.wires)
# s1 should only contain 1 and -1
assert np.allclose(s1 ** 2, 1, atol=tol, rtol=0)
mean = s1 @ p
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(mean, expected, atol=tol, rtol=0)
var = (s1 ** 2) @ p - (s1 @ p).real ** 2
expected = (
3
+ np.cos(2 * phi) * np.cos(varphi) ** 2
- np.cos(2 * theta) * np.sin(varphi) ** 2
- 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi)
) / 4
assert np.allclose(var, expected, atol=tol, rtol=0)
def test_hermitian(self, theta, phi, varphi, monkeypatch, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
dev = qml.device("default.qubit", wires=3)
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2])
],
obs.diagonalizing_gates()
)
dev._wires_measured = {0, 1, 2}
dev._samples = dev.generate_samples()
dev.sample(obs)
s1 = obs.eigvals
p = dev.marginal_prob(dev.probability(), wires=obs.wires)
# s1 should only contain the eigenvalues of
# the hermitian matrix tensor product Z
Z = np.diag([1, -1])
eigvals = np.linalg.eigvalsh(np.kron(Z, A))
assert set(np.round(s1, 8)).issubset(set(np.round(eigvals, 8)))
mean = s1 @ p
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(mean, expected, atol=tol, rtol=0)
var = (s1 ** 2) @ p - (s1 @ p).real ** 2
expected = (
1057
- np.cos(2 * phi)
+ 12 * (27 + np.cos(2 * phi)) * np.cos(varphi)
- 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi))
+ 16 * np.sin(2 * phi)
- 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi)
- 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2
- 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi)
- 8
* np.cos(theta)
* (
4
* np.cos(phi)
* (
4
+ 8 * np.cos(varphi)
+ np.cos(2 * varphi)
- (1 + 6 * np.cos(varphi)) * np.sin(varphi)
)
+ np.sin(phi)
* (
15
+ 8 * np.cos(varphi)
- 11 * np.cos(2 * varphi)
+ 42 * np.sin(varphi)
+ 3 * np.sin(2 * varphi)
)
)
) / 16
assert np.allclose(var, expected, atol=tol, rtol=0)
class TestProbabilityIntegration:
"""Test probability method for when analytic is True/False"""
def mock_analytic_counter(self, wires=None):
self.analytic_counter += 1
return np.array([1, 0, 0, 0], dtype=float)
@pytest.mark.parametrize("x", [[0.2, 0.5], [0.4, 0.9], [0.8, 0.3]])
def test_probability(self, x, tol):
"""Test that the probability function works when analytic=False"""
dev = qml.device("default.qubit", wires=2, analytic=False)
dev_analytic = qml.device("default.qubit", wires=2, analytic=True)
def circuit(x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=0)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[0, 1])
prob = qml.QNode(circuit, dev)
prob_analytic = qml.QNode(circuit, dev_analytic)
assert np.isclose(prob(x).sum(), 1, atol=tol, rtol=0)
assert np.allclose(prob_analytic(x), prob(x), atol=0.1, rtol=0)
assert not np.array_equal(prob_analytic(x), prob(x))
@pytest.mark.parametrize("analytic", [True, False])
def test_call_generate_samples(self, analytic, monkeypatch):
"""Test analytic_probability call when generating samples"""
self.analytic_counter = False
dev = qml.device("default.qubit", wires=2, analytic=analytic)
monkeypatch.setattr(dev, "analytic_probability", self.mock_analytic_counter)
# generate samples through `generate_samples` (using 'analytic_probability')
dev.generate_samples()
# should call `analytic_probability` once through `generate_samples`
assert self.analytic_counter == 1
def test_stateless_analytic_return(self):
"""Test that analytic_probability returns None if device is stateless"""
dev = qml.device("default.qubit", wires=2)
dev._state = None
assert dev.analytic_probability() is None
|
py | 1a4227f6b4b0ef7370b0f09613ef9d4b8916b435 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.jeuxvideo\.com/.*/(.*?)\.htm'
_TESTS = [{
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
'info_dict': {
'id': '114765',
'ext': 'mp4',
'title': 'Tearaway : GC 2013 : Tearaway nous présente ses papiers d\'identité',
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.',
},
}, {
'url': 'http://www.jeuxvideo.com/videos/chroniques/434220/l-histoire-du-jeu-video-la-saturn.htm',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
title = self._html_search_meta('name', webpage) or self._og_search_title(webpage)
config_url = self._html_search_regex(
r'data-src(?:set-video)?="(/contenu/medias/video.php.*?)"',
webpage, 'config URL')
config_url = 'http://www.jeuxvideo.com' + config_url
video_id = self._search_regex(
r'id=(\d+)',
config_url, 'video ID')
config = self._download_json(
config_url, title, 'Downloading JSON config')
formats = [{
'url': source['file'],
'format_id': source['label'],
'resolution': source['label'],
} for source in reversed(config['sources'])]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': config.get('image'),
}
|
py | 1a42283434dfe3a4f6513437ef1ad0854b26bd04 | class ConfigStruct(object):
def __init__(self):
# Location of the star catalog
self.CAL_DIR = '../data/'
self.CAL_NAME = 'gaia_dr2_mag_11.5.npy'
# Location of the MPC data
self.OURS_DIR = '../data/'
self.OURS_NAME = 'mpc_data.txt'
# Location of finder data
self.FINDER_DIR = '../data/'
self.FINDER_NAME = '2019-01-02.txt'
# Location of the query results
self.QUERY_DIR = '../data/'
self.QUERY_NAME = 'query_results.txt'
# Where the RAW imaging coordinates are saved
self.SAVE_DIR = '../data/'
self.SAVE_NAME = 'saved_coordinates.txt'
# Where the imaging coordinates IN TELESCOPE FORMAT are saved
self.FINAL_DIR = '../data/'
self.FINAL_NAME = 'saved_coord_telescope.txt'
# Ask the user for limiting magnitude and FOV size
self.LIM_MAG = 11.5
self.X_SPAN = 0.7317
self.Y_SPAN = 0.7317
config = ConfigStruct() |
py | 1a4228928b1d3bd00005cc2cd1f78dc5705f8ec8 | """Tests for binary operators on subtypes of built-in types."""
import unittest
from test import test_support
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int or long."""
return isinstance(x, int) or isinstance(x, long)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, long, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of longs."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0L, den=1L):
"""Constructor: Rat([num[, den]]).
The arguments must be ints or longs, and default to (0, 1)."""
if not isint(num):
raise TypeError, "Rat numerator must be int or long (%r)" % num
if not isint(den):
raise TypeError, "Rat denominator must be int or long (%r)" % den
# But the zero is always on
if den == 0:
raise ZeroDivisionError, "zero denominator"
g = gcd(den, num)
self.__num = long(num//g)
self.__den = long(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to an string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError, ("%s too large to convert to int" %
repr(self))
raise ValueError, "can't convert %s to int" % repr(self)
def __long__(self):
"""Convert a Rat to an long; self.den must be 1."""
if self.__den == 1:
return long(self.__num)
raise ValueError, "can't convert %s to long" % repr(self)
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
def __ne__(self, other):
"""Compare two Rats for inequality."""
return not self == other
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assert_(gcd(i, j) > 0)
self.assert_(gcd(-i, j) < 0)
self.assert_(gcd(i, -j) > 0)
self.assert_(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10L, 15L)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_future_div(self):
exec future_test
# XXX Ran out of steam; TO DO: divmod, div, future division
future_test = """
from __future__ import division
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
"""
def test_main():
test_support.run_unittest(RatTestCase)
if __name__ == "__main__":
test_main()
|
py | 1a42299448b1f98bfda02272729c27e2cd9c9ba5 | import unittest
import camlib
class ExcellonNumberParseTestInch(unittest.TestCase):
# Inch base format: 00.0000
# LEADING ZEROS
# With leading zeros, when you type in a coordinate,
# the leading zeros must always be included. Trailing zeros
# are unneeded and may be left off. The CNC-7 will automatically add them.
# TRAILING ZEROS
# You must show all zeros to the right of the number and can omit
# all zeros to the left of the number. The CNC-7 will count the number
# of digits you typed and automatically fill in the missing zeros.
def test_inch_leading_6digit(self):
excellon = camlib.Excellon()
self.assertEqual(excellon.zeros, "L")
self.assertEqual(excellon.parse_number("123456"), 12.3456)
def test_inch_leading_5digit(self):
excellon = camlib.Excellon()
self.assertEqual(excellon.parse_number("12345"), 12.345)
def test_inch_leading_15digit(self):
excellon = camlib.Excellon()
self.assertEqual(excellon.parse_number("012345"), 1.2345)
def test_inch_leading_51digit(self):
excellon = camlib.Excellon()
self.assertEqual(excellon.parse_number("123450"), 12.345)
def test_inch_trailing_6digit(self):
excellon = camlib.Excellon()
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("123456"), 12.3456)
def test_inch_trailing_5digit(self):
excellon = camlib.Excellon()
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("12345"), 1.2345)
def test_inch_trailing_15digit(self):
excellon = camlib.Excellon()
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("012345"), 1.2345)
def test_inch_trailing_51digit(self):
excellon = camlib.Excellon()
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("123450"), 12.345)
class ExcellonNumberParseTestMetric(unittest.TestCase):
# Metric base format: 000.000
# LEADING ZEROS
# With leading zeros, when you type in a coordinate,
# the leading zeros must always be included. Trailing zeros
# are unneeded and may be left off. The CNC-7 will automatically add them.
# TRAILING ZEROS
# You must show all zeros to the right of the number and can omit
# all zeros to the left of the number. The CNC-7 will count the number
# of digits you typed and automatically fill in the missing zeros.
def test_inch_leading_6digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
self.assertEqual(excellon.parse_number("123456"), 123.456)
def test_inch_leading_5digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
self.assertEqual(excellon.parse_number("12345"), 123.45)
def test_inch_leading_15digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
self.assertEqual(excellon.parse_number("012345"), 12.345)
def test_inch_leading_51digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
self.assertEqual(excellon.parse_number("123450"), 123.45)
def test_inch_trailing_6digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("123456"), 123.456)
def test_inch_trailing_5digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("12345"), 12.345)
def test_inch_trailing_15digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("012345"), 12.345)
def test_inch_trailing_51digit(self):
excellon = camlib.Excellon()
excellon.units = "mm"
excellon.zeros = "T"
self.assertEqual(excellon.parse_number("123450"), 123.45)
class ExcellonFormatM72Test(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
M72
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "in")
self.assertEqual(self.excellon.zeros, "L")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (90.0, 11.75))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (30.25, 10.5))
class ExcellonFormatM71Test(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
M71
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "mm")
self.assertEqual(self.excellon.zeros, "L")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (900.0, 117.5))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (302.5, 105.0))
class ExcellonFormatINCHLZTest(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
INCH,LZ
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "in")
self.assertEqual(self.excellon.zeros, "L")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (90.0, 11.75))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (30.25, 10.5))
class ExcellonFormatINCHTest(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
INCH,LZ
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "in")
self.assertEqual(self.excellon.zeros, "L")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (90.0, 11.75))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (30.25, 10.5))
class ExcellonFormatINCHTZTest(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
INCH,TZ
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "in")
self.assertEqual(self.excellon.zeros, "T")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (0.9, 1.175))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (3.025, 1.05))
class ExcellonFormatMETRICLZTest(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
METRIC,LZ
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "mm")
self.assertEqual(self.excellon.zeros, "L")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (900.0, 117.5))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (302.5, 105.0))
class ExcellonFormatMETRICTest(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
METRIC,LZ
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "mm")
self.assertEqual(self.excellon.zeros, "L")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (900.0, 117.5))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (302.5, 105.0))
class ExcellonFormatMETRICTZTest(unittest.TestCase):
def setUp(self):
self.excellon = camlib.Excellon()
code = """
M48
METRIC,TZ
T1C.02362F197S550
T2C.03543F197S550
M95
T1
X9000Y11750
X30250Y10500
"""
code = code.split('\n')
self.excellon.parse_lines(code)
def test_format(self):
self.assertEqual(self.excellon.units.lower(), "mm")
self.assertEqual(self.excellon.zeros, "T")
def test_coords(self):
# For X9000 add the missing 00 on the right. Then divide by 10000.
self.assertEqual(self.excellon.drills[0]["point"].coords[0], (9.0, 11.75))
self.assertEqual(self.excellon.drills[1]["point"].coords[0], (30.25, 10.5))
if __name__ == '__main__':
unittest.main() |
py | 1a422a7ce516a1ca4d861f47c957882a5f88a0ba | import secrets
def _get_header(token):
return f'''
rule encoding_geary_{token}:'''
def _get_benchmark(benchmark_out):
return f'''
benchmark:
"{benchmark_out}"'''
def _get_main(fasta_in, classes_in, length_in, geary_out):
return f'''
input:
fasta_in="{fasta_in}",
classes_in="{classes_in}",
length_in="{length_in}"
output:
csv_out={geary_out}
threads:
1000
params:
snakefile="nodes/encodings/geary/Snakefile",
configfile="nodes/encodings/geary/config.yaml"
run:
with WorkflowExecuter(dict(input), dict(output), params.configfile, cores=CORES) as e:
shell(f"""{{e.snakemake}} -s {{params.snakefile}} --configfile {{params.configfile}}""")
'''
def rule(fasta_in, classes_in, length_in, geary_out, benchmark_dir=None):
"""
Computes the Geary correlation encoding.
Category: encodings \n
Node: geary
:param fasta_in: The path to the fasta file.
:param classes_in: The path to the classes file.
:param length_in: The path to the file, containing the allowed parameter space.
:param geary_out: A list of output file paths to store the encoded datasets.
:param benchmark_dir: The path to the directory to store the benchmark results. If None,
benchmark will be not executed (default).
:return: A string object representing a Snakemake rule.
"""
token = secrets.token_hex(4)
rule = _get_header(token)
if benchmark_dir is not None:
benchmark_out = f"{benchmark_dir}encoding_geary_{token}.txt"
rule += _get_benchmark(benchmark_out)
rule += _get_main(fasta_in, classes_in, length_in, geary_out)
return rule
|
py | 1a422a9e8ce37116415efdf966abab28b472f6e3 | from copy import deepcopy
import setpath
import vtbase
import functions
import heapq
### Classic stream iterator
registered=True
class StreamIntersect(vtbase.VT):
def BestIndex(self, constraints, orderbys):
return (None, 0, None, True, 1000)
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if len(largs) < 1:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Not defined union tables ")
streams = str(largs[0]).split(",")
if len(streams) < 2:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables must be more than one ")
cursors = []
execs = []
for stream in streams:
cursors.append(envars['db'].cursor())
execs.append(cursors[-1].execute("select * from " + str(stream) + ";"))
comparedcursor = str(cursors[0].getdescriptionsafe())
# for cursor in cursors:
# if str(cursor.getdescriptionsafe()) != comparedcursor:
# raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables with different schemas ")
if 'cols' in dictargs:
try:
cols = int(dictargs['cols'])
except ValueError:
try:
cols = [y[0] for y in cursors[0].getdescriptionsafe()].index(dictargs['cols'])
except ValueError:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Column name does not exists ")
else:
cols=0
if cols >= len(cursors[0].getdescriptionsafe()):
raise functions.OperatorError(__name__.rsplit('.')[-1],"Column position does not exists ")
for x in range(0, len(streams)):
if x is 0:
execs[0] = ((v[cols], (0,) + v) for v in execs[0])
elif x is 1:
execs[1] = ((v[cols], (1,) + v) for v in execs[1])
elif x is 2:
execs[2] = ((v[cols], (2,) + v) for v in execs[2])
elif x is 3:
execs[3] = ((v[cols], (3,) + v) for v in execs[3])
elif x is 4:
execs[4] = ((v[cols], (4,) + v) for v in execs[4])
try:
yield list(cursors[0].getdescriptionsafe())
except StopIteration:
try:
raise
finally:
try:
for cur in cursors:
cur.close()
except:
pass
currentgroup = None
lists = [[]] * len(streams)
for k, v in heapq.merge(*execs):
if currentgroup is None or currentgroup != k:
for t in set(lists[0]).intersection(*lists[1:]):
yield t
lists = [[]] * len(streams)
lists[v[0]] = lists[v[0]] + [tuple(v[1:])]
currentgroup = k
for t in set(lists[0]).intersection(*lists[1:]):
yield t
def Source():
return vtbase.VTGenerator(StreamIntersect)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
py | 1a422aec306d1a27cbac5bbcef449a004c7e017f | """ queries ads using https://github.com/adsabs/adsabs-dev-api
"""
import ads.exceptions
from ads.export import ExportQuery
def bibcode_to_bibtex(bibcode):
"""Queries ads for the bibtex"""
try:
bibtex = ExportQuery(bibcodes=bibcode, format="bibtex").execute()
return bibtex
except ads.exceptions.APIResponseError as e:
raise e # TODO (ryan) provide rate limit info |
py | 1a422b6f94b77be7bb08c99ba0bdd183081c4a90 | """
Module: 'builtins' on micropython-v1.16-pyboard
"""
# MCU: {'ver': 'v1.16', 'port': 'pyboard', 'arch': 'armv7emsp', 'sysname': 'pyboard', 'release': '1.16.0', 'name': 'micropython', 'mpy': 7685, 'version': '1.16.0', 'machine': 'PYBv1.1 with STM32F405RG', 'build': '', 'nodename': 'pyboard', 'platform': 'pyboard', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
class ArithmeticError(Exception):
""""""
class AssertionError(Exception):
""""""
class AttributeError(Exception):
""""""
class EOFError(Exception):
""""""
Ellipsis: Any ## <class ''> = Ellipsis
class GeneratorExit:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class ImportError(Exception):
""""""
class IndentationError(Exception):
""""""
class IndexError(Exception):
""""""
class KeyError(Exception):
""""""
class KeyboardInterrupt(Exception):
""""""
class LookupError(Exception):
""""""
class MemoryError(Exception):
""""""
class NameError(Exception):
""""""
class NotImplementedError(Exception):
""""""
class OSError(Exception):
""""""
class OverflowError(Exception):
""""""
class RuntimeError(Exception):
""""""
class StopIteration(Exception):
""""""
class SyntaxError(Exception):
""""""
class SystemExit(Exception):
""""""
class TypeError(Exception):
""""""
class ValueError(Exception):
""""""
class ZeroDivisionError(Exception):
""""""
def abs(*args, **kwargs) -> Any:
...
def all(*args, **kwargs) -> Any:
...
def any(*args, **kwargs) -> Any:
...
class bool:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class bytearray:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def append(self, *args, **kwargs) -> Any:
...
def extend(self, *args, **kwargs) -> Any:
...
def decode(self, *args, **kwargs) -> Any:
...
class bytes:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def count(self, *args, **kwargs) -> Any:
...
def endswith(self, *args, **kwargs) -> Any:
...
def find(self, *args, **kwargs) -> Any:
...
def format(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
def isalpha(self, *args, **kwargs) -> Any:
...
def isdigit(self, *args, **kwargs) -> Any:
...
def islower(self, *args, **kwargs) -> Any:
...
def isspace(self, *args, **kwargs) -> Any:
...
def isupper(self, *args, **kwargs) -> Any:
...
def join(self, *args, **kwargs) -> Any:
...
def lower(self, *args, **kwargs) -> Any:
...
def lstrip(self, *args, **kwargs) -> Any:
...
def replace(self, *args, **kwargs) -> Any:
...
def rfind(self, *args, **kwargs) -> Any:
...
def rindex(self, *args, **kwargs) -> Any:
...
def rsplit(self, *args, **kwargs) -> Any:
...
def rstrip(self, *args, **kwargs) -> Any:
...
def split(self, *args, **kwargs) -> Any:
...
def startswith(self, *args, **kwargs) -> Any:
...
def strip(self, *args, **kwargs) -> Any:
...
def upper(self, *args, **kwargs) -> Any:
...
def center(self, *args, **kwargs) -> Any:
...
def decode(self, *args, **kwargs) -> Any:
...
def partition(self, *args, **kwargs) -> Any:
...
def rpartition(self, *args, **kwargs) -> Any:
...
def splitlines(self, *args, **kwargs) -> Any:
...
def callable(*args, **kwargs) -> Any:
...
def chr(*args, **kwargs) -> Any:
...
class dict:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def clear(self, *args, **kwargs) -> Any:
...
def copy(self, *args, **kwargs) -> Any:
...
def get(self, *args, **kwargs) -> Any:
...
def items(self, *args, **kwargs) -> Any:
...
def keys(self, *args, **kwargs) -> Any:
...
def pop(self, *args, **kwargs) -> Any:
...
def popitem(self, *args, **kwargs) -> Any:
...
def setdefault(self, *args, **kwargs) -> Any:
...
def update(self, *args, **kwargs) -> Any:
...
def values(self, *args, **kwargs) -> Any:
...
@classmethod
def fromkeys(cls, *args, **kwargs) -> Any:
...
def dir(*args, **kwargs) -> Any:
...
def divmod(*args, **kwargs) -> Any:
...
def eval(*args, **kwargs) -> Any:
...
def exec(*args, **kwargs) -> Any:
...
def getattr(*args, **kwargs) -> Any:
...
def globals(*args, **kwargs) -> Any:
...
def hasattr(*args, **kwargs) -> Any:
...
def hash(*args, **kwargs) -> Any:
...
def id(*args, **kwargs) -> Any:
...
class int:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
@classmethod
def from_bytes(cls, *args, **kwargs) -> Any:
...
def to_bytes(self, *args, **kwargs) -> Any:
...
def isinstance(*args, **kwargs) -> Any:
...
def issubclass(*args, **kwargs) -> Any:
...
def iter(*args, **kwargs) -> Any:
...
def len(*args, **kwargs) -> Any:
...
class list:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def append(self, *args, **kwargs) -> Any:
...
def clear(self, *args, **kwargs) -> Any:
...
def copy(self, *args, **kwargs) -> Any:
...
def count(self, *args, **kwargs) -> Any:
...
def extend(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
def insert(self, *args, **kwargs) -> Any:
...
def pop(self, *args, **kwargs) -> Any:
...
def remove(self, *args, **kwargs) -> Any:
...
def reverse(self, *args, **kwargs) -> Any:
...
def sort(self, *args, **kwargs) -> Any:
...
def locals(*args, **kwargs) -> Any:
...
class map:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def next(*args, **kwargs) -> Any:
...
class object:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def open(*args, **kwargs) -> Any:
...
def ord(*args, **kwargs) -> Any:
...
def pow(*args, **kwargs) -> Any:
...
def print(*args, **kwargs) -> Any:
...
class range:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def repr(*args, **kwargs) -> Any:
...
def round(*args, **kwargs) -> Any:
...
class set:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def clear(self, *args, **kwargs) -> Any:
...
def copy(self, *args, **kwargs) -> Any:
...
def pop(self, *args, **kwargs) -> Any:
...
def remove(self, *args, **kwargs) -> Any:
...
def update(self, *args, **kwargs) -> Any:
...
def add(self, *args, **kwargs) -> Any:
...
def difference(self, *args, **kwargs) -> Any:
...
def difference_update(self, *args, **kwargs) -> Any:
...
def discard(self, *args, **kwargs) -> Any:
...
def intersection(self, *args, **kwargs) -> Any:
...
def intersection_update(self, *args, **kwargs) -> Any:
...
def isdisjoint(self, *args, **kwargs) -> Any:
...
def issubset(self, *args, **kwargs) -> Any:
...
def issuperset(self, *args, **kwargs) -> Any:
...
def symmetric_difference(self, *args, **kwargs) -> Any:
...
def symmetric_difference_update(self, *args, **kwargs) -> Any:
...
def union(self, *args, **kwargs) -> Any:
...
def setattr(*args, **kwargs) -> Any:
...
def sorted(*args, **kwargs) -> Any:
...
class str:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def count(self, *args, **kwargs) -> Any:
...
def endswith(self, *args, **kwargs) -> Any:
...
def find(self, *args, **kwargs) -> Any:
...
def format(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
def isalpha(self, *args, **kwargs) -> Any:
...
def isdigit(self, *args, **kwargs) -> Any:
...
def islower(self, *args, **kwargs) -> Any:
...
def isspace(self, *args, **kwargs) -> Any:
...
def isupper(self, *args, **kwargs) -> Any:
...
def join(self, *args, **kwargs) -> Any:
...
def lower(self, *args, **kwargs) -> Any:
...
def lstrip(self, *args, **kwargs) -> Any:
...
def replace(self, *args, **kwargs) -> Any:
...
def rfind(self, *args, **kwargs) -> Any:
...
def rindex(self, *args, **kwargs) -> Any:
...
def rsplit(self, *args, **kwargs) -> Any:
...
def rstrip(self, *args, **kwargs) -> Any:
...
def split(self, *args, **kwargs) -> Any:
...
def startswith(self, *args, **kwargs) -> Any:
...
def strip(self, *args, **kwargs) -> Any:
...
def upper(self, *args, **kwargs) -> Any:
...
def center(self, *args, **kwargs) -> Any:
...
def encode(self, *args, **kwargs) -> Any:
...
def partition(self, *args, **kwargs) -> Any:
...
def rpartition(self, *args, **kwargs) -> Any:
...
def splitlines(self, *args, **kwargs) -> Any:
...
def sum(*args, **kwargs) -> Any:
...
class super:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class tuple:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def count(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
class type:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class zip:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
NotImplemented: Any ## <class ''> = NotImplemented
class StopAsyncIteration:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class UnicodeError(Exception):
""""""
class ViperTypeError(Exception):
""""""
def bin(*args, **kwargs) -> Any:
...
def compile(*args, **kwargs) -> Any:
...
class complex:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def delattr(*args, **kwargs) -> Any:
...
class enumerate:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def execfile(*args, **kwargs) -> Any:
...
class filter:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class float:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class frozenset:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def copy(self, *args, **kwargs) -> Any:
...
def difference(self, *args, **kwargs) -> Any:
...
def intersection(self, *args, **kwargs) -> Any:
...
def isdisjoint(self, *args, **kwargs) -> Any:
...
def issubset(self, *args, **kwargs) -> Any:
...
def issuperset(self, *args, **kwargs) -> Any:
...
def symmetric_difference(self, *args, **kwargs) -> Any:
...
def union(self, *args, **kwargs) -> Any:
...
def help(*args, **kwargs) -> Any:
...
def hex(*args, **kwargs) -> Any:
...
def input(*args, **kwargs) -> Any:
...
def max(*args, **kwargs) -> Any:
...
class memoryview:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def min(*args, **kwargs) -> Any:
...
def oct(*args, **kwargs) -> Any:
...
class property:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def deleter(self, *args, **kwargs) -> Any:
...
def getter(self, *args, **kwargs) -> Any:
...
def setter(self, *args, **kwargs) -> Any:
...
class reversed:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class slice:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
|
py | 1a422c01f1754a7a02335bdfd90a785973c65eed | from sympy import (
Rational, Symbol, N, I, Abs, sqrt, exp, Float, sin,
cos, symbols)
from sympy.matrices import eye, Matrix
from sympy.matrices.matrices import MatrixEigen
from sympy.matrices.common import _MinimalMatrix, _CastableMatrix
from sympy.core.singleton import S
from sympy.testing.pytest import raises, XFAIL
from sympy.matrices.matrices import NonSquareMatrixError, MatrixError
from sympy.simplify.simplify import simplify
from sympy.matrices.immutable import ImmutableMatrix
from sympy.testing.pytest import slow
class EigenOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixEigen):
pass
def test_eigen():
R = Rational
M = Matrix.eye(3)
assert M.eigenvals(multiple=False) == {S.One: 3}
assert M.eigenvals(multiple=True) == [1, 1, 1]
assert M.eigenvects() == (
[(1, 3, [Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])])
assert M.left_eigenvects() == (
[(1, 3, [Matrix([[1, 0, 0]]),
Matrix([[0, 1, 0]]),
Matrix([[0, 0, 1]])])])
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
assert M.eigenvects() == (
[
(-1, 1, [Matrix([-1, 1, 0])]),
( 0, 1, [Matrix([0, -1, 1])]),
( 2, 1, [Matrix([R(2, 3), R(1, 3), 1])])
])
assert M.left_eigenvects() == (
[
(-1, 1, [Matrix([[-2, 1, 1]])]),
(0, 1, [Matrix([[-1, -1, 1]])]),
(2, 1, [Matrix([[1, 1, 1]])])
])
a = Symbol('a')
M = Matrix([[a, 0],
[0, 1]])
assert M.eigenvals() == {a: 1, S.One: 1}
M = Matrix([[1, -1],
[1, 3]])
assert M.eigenvects() == ([(2, 2, [Matrix(2, 1, [-1, 1])])])
assert M.left_eigenvects() == ([(2, 2, [Matrix([[1, 1]])])])
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a = R(15, 2)
b = 3*33**R(1, 2)
c = R(13, 2)
d = (R(33, 8) + 3*b/8)
e = (R(33, 8) - 3*b/8)
def NS(e, n):
return str(N(e, n))
r = [
(a - b/2, 1, [Matrix([(12 + 24/(c - b/2))/((c - b/2)*e) + 3/(c - b/2),
(6 + 12/(c - b/2))/e, 1])]),
( 0, 1, [Matrix([1, -2, 1])]),
(a + b/2, 1, [Matrix([(12 + 24/(c + b/2))/((c + b/2)*d) + 3/(c + b/2),
(6 + 12/(c + b/2))/d, 1])]),
]
r1 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
r = M.eigenvects()
r2 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
assert sorted(r1) == sorted(r2)
eps = Symbol('eps', real=True)
M = Matrix([[abs(eps), I*eps ],
[-I*eps, abs(eps) ]])
assert M.eigenvects() == (
[
( 0, 1, [Matrix([[-I*eps/abs(eps)], [1]])]),
( 2*abs(eps), 1, [ Matrix([[I*eps/abs(eps)], [1]]) ] ),
])
assert M.left_eigenvects() == (
[
(0, 1, [Matrix([[I*eps/Abs(eps), 1]])]),
(2*Abs(eps), 1, [Matrix([[-I*eps/Abs(eps), 1]])])
])
M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
M._eigenvects = M.eigenvects(simplify=False)
assert max(i.q for i in M._eigenvects[0][2][0]) > 1
M._eigenvects = M.eigenvects(simplify=True)
assert max(i.q for i in M._eigenvects[0][2][0]) == 1
M = Matrix([[Rational(1, 4), 1], [1, 1]])
assert M.eigenvects(simplify=True) == [
(Rational(5, 8) - sqrt(73)/8, 1, [Matrix([[-sqrt(73)/8 - Rational(3, 8)], [1]])]),
(Rational(5, 8) + sqrt(73)/8, 1, [Matrix([[Rational(-3, 8) + sqrt(73)/8], [1]])])]
assert M.eigenvects(simplify=False) == [
(Rational(5, 8) - sqrt(73)/8, 1, [Matrix([[-1/(-Rational(3, 8) + sqrt(73)/8)], [1]])]),
(Rational(5, 8) + sqrt(73)/8, 1, [Matrix([[8/(3 + sqrt(73))], [1]])])]
# issue 10719
assert Matrix([]).eigenvals() == {}
assert Matrix([]).eigenvects() == []
# issue 15119
raises(NonSquareMatrixError, lambda : Matrix([[1, 2], [0, 4], [0, 0]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 0], [3, 4], [5, 6]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals(error_when_incomplete = False))
raises(NonSquareMatrixError, lambda : Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals(error_when_incomplete = False))
# issue 15125
from sympy.core.function import count_ops
q = Symbol("q", positive = True)
m = Matrix([[-2, exp(-q), 1], [exp(q), -2, 1], [1, 1, -2]])
assert count_ops(m.eigenvals(simplify=False)) > count_ops(m.eigenvals(simplify=True))
assert count_ops(m.eigenvals(simplify=lambda x: x)) > count_ops(m.eigenvals(simplify=True))
assert isinstance(m.eigenvals(simplify=True, multiple=False), dict)
assert isinstance(m.eigenvals(simplify=True, multiple=True), list)
assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=False), dict)
assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=True), list)
def test_float_eigenvals():
m = Matrix([[1, .6, .6], [.6, .9, .9], [.9, .6, .6]])
evals = [
Rational(5, 4) - sqrt(385)/20,
sqrt(385)/20 + Rational(5, 4),
S.Zero]
n_evals = m.eigenvals(rational=True, multiple=True)
n_evals = sorted(n_evals)
s_evals = [x.evalf() for x in evals]
s_evals = sorted(s_evals)
for x, y in zip(n_evals, s_evals):
assert abs(x-y) < 10**-9
@XFAIL
def test_eigen_vects():
m = Matrix(2, 2, [1, 0, 0, I])
raises(NotImplementedError, lambda: m.is_diagonalizable(True))
# !!! bug because of eigenvects() or roots(x**2 + (-1 - I)*x + I, x)
# see issue 5292
assert not m.is_diagonalizable(True)
raises(MatrixError, lambda: m.diagonalize(True))
(P, D) = m.diagonalize(True)
def test_issue_8240():
# Eigenvalues of large triangular matrices
x, y = symbols('x y')
n = 200
diagonal_variables = [Symbol('x%s' % i) for i in range(n)]
M = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
M[i][i] = diagonal_variables[i]
M = Matrix(M)
eigenvals = M.eigenvals()
assert len(eigenvals) == n
for i in range(n):
assert eigenvals[diagonal_variables[i]] == 1
eigenvals = M.eigenvals(multiple=True)
assert set(eigenvals) == set(diagonal_variables)
# with multiplicity
M = Matrix([[x, 0, 0], [1, y, 0], [2, 3, x]])
eigenvals = M.eigenvals()
assert eigenvals == {x: 2, y: 1}
eigenvals = M.eigenvals(multiple=True)
assert len(eigenvals) == 3
assert eigenvals.count(x) == 2
assert eigenvals.count(y) == 1
# EigenOnlyMatrix tests
def test_eigenvals():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
# if we cannot factor the char poly, we raise an error
m = Matrix([
[3, 0, 0, 0, -3],
[0, -3, -3, 0, 3],
[0, 3, 0, 3, 0],
[0, 0, 3, 0, 3],
[3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.eigenvals())
def test_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert M*vec_list[0] == val*vec_list[0]
def test_left_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.left_eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert vec_list[0]*M == val*vec_list[0]
@slow
def test_bidiagonalize():
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.bidiagonalize() == M
assert M.bidiagonalize(upper=False) == M
assert M.bidiagonalize() == M
assert M.bidiagonal_decomposition() == (M, M, M)
assert M.bidiagonal_decomposition(upper=False) == (M, M, M)
assert M.bidiagonalize() == M
import random
#Real Tests
for real_test in range(2):
test_values = []
row = 2
col = 2
for _ in range(row * col):
value = random.randint(-1000000000, 1000000000)
test_values = test_values + [value]
# L -> Lower Bidiagonalization
# M -> Mutable Matrix
# N -> Immutable Matrix
# 0 -> Bidiagonalized form
# 1,2,3 -> Bidiagonal_decomposition matrices
# 4 -> Product of 1 2 3
M = Matrix(row, col, test_values)
N = ImmutableMatrix(M)
N1, N2, N3 = N.bidiagonal_decomposition()
M1, M2, M3 = M.bidiagonal_decomposition()
M0 = M.bidiagonalize()
N0 = N.bidiagonalize()
N4 = N1 * N2 * N3
M4 = M1 * M2 * M3
N2.simplify()
N4.simplify()
N0.simplify()
M0.simplify()
M2.simplify()
M4.simplify()
LM0 = M.bidiagonalize(upper=False)
LM1, LM2, LM3 = M.bidiagonal_decomposition(upper=False)
LN0 = N.bidiagonalize(upper=False)
LN1, LN2, LN3 = N.bidiagonal_decomposition(upper=False)
LN4 = LN1 * LN2 * LN3
LM4 = LM1 * LM2 * LM3
LN2.simplify()
LN4.simplify()
LN0.simplify()
LM0.simplify()
LM2.simplify()
LM4.simplify()
assert M == M4
assert M2 == M0
assert N == N4
assert N2 == N0
assert M == LM4
assert LM2 == LM0
assert N == LN4
assert LN2 == LN0
#Complex Tests
for complex_test in range(2):
test_values = []
size = 2
for _ in range(size * size):
real = random.randint(-1000000000, 1000000000)
comp = random.randint(-1000000000, 1000000000)
value = real + comp * I
test_values = test_values + [value]
M = Matrix(size, size, test_values)
N = ImmutableMatrix(M)
# L -> Lower Bidiagonalization
# M -> Mutable Matrix
# N -> Immutable Matrix
# 0 -> Bidiagonalized form
# 1,2,3 -> Bidiagonal_decomposition matrices
# 4 -> Product of 1 2 3
N1, N2, N3 = N.bidiagonal_decomposition()
M1, M2, M3 = M.bidiagonal_decomposition()
M0 = M.bidiagonalize()
N0 = N.bidiagonalize()
N4 = N1 * N2 * N3
M4 = M1 * M2 * M3
N2.simplify()
N4.simplify()
N0.simplify()
M0.simplify()
M2.simplify()
M4.simplify()
LM0 = M.bidiagonalize(upper=False)
LM1, LM2, LM3 = M.bidiagonal_decomposition(upper=False)
LN0 = N.bidiagonalize(upper=False)
LN1, LN2, LN3 = N.bidiagonal_decomposition(upper=False)
LN4 = LN1 * LN2 * LN3
LM4 = LM1 * LM2 * LM3
LN2.simplify()
LN4.simplify()
LN0.simplify()
LM0.simplify()
LM2.simplify()
LM4.simplify()
assert M == M4
assert M2 == M0
assert N == N4
assert N2 == N0
assert M == LM4
assert LM2 == LM0
assert N == LN4
assert LN2 == LN0
M = Matrix(18, 8, range(1, 145))
M = M.applyfunc(lambda i: Float(i))
assert M.bidiagonal_decomposition()[1] == M.bidiagonalize()
assert M.bidiagonal_decomposition(upper=False)[1] == M.bidiagonalize(upper=False)
a, b, c = M.bidiagonal_decomposition()
diff = a * b * c - M
assert abs(max(diff)) < 10**-12
def test_diagonalize():
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
raises(MatrixError, lambda: m.diagonalize(reals_only=True))
P, D = m.diagonalize()
assert D.is_diagonal()
assert D == Matrix([
[-I, 0],
[ 0, I]])
# make sure we use floats out if floats are passed in
m = EigenOnlyMatrix(2, 2, [0, .5, .5, 0])
P, D = m.diagonalize()
assert all(isinstance(e, Float) for e in D.values())
assert all(isinstance(e, Float) for e in P.values())
_, D2 = m.diagonalize(reals_only=True)
assert D == D2
def test_is_diagonalizable():
a, b, c = symbols('a b c')
m = EigenOnlyMatrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
assert not EigenOnlyMatrix(2, 2, [1, 1, 0, 1]).is_diagonalizable()
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
assert m.is_diagonalizable()
assert not m.is_diagonalizable(reals_only=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# the next two tests test the cases where the old
# algorithm failed due to the fact that the block structure can
# *NOT* be determined from algebraic and geometric multiplicity alone
# This can be seen most easily when one lets compute the J.c.f. of a matrix that
# is in J.c.f already.
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
P, J = A.jordan_form()
assert simplify(P*J*P.inv()) == A
assert EigenOnlyMatrix(1, 1, [1]).jordan_form() == (
Matrix([1]), Matrix([1]))
assert EigenOnlyMatrix(1, 1, [1]).jordan_form(
calc_transform=False) == Matrix([1])
# make sure if we cannot factor the characteristic polynomial, we raise an error
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.jordan_form())
# make sure that if the input has floats, the output does too
m = Matrix([
[ 0.6875, 0.125 + 0.1875*sqrt(3)],
[0.125 + 0.1875*sqrt(3), 0.3125]])
P, J = m.jordan_form()
assert all(isinstance(x, Float) or x == 0 for x in P)
assert all(isinstance(x, Float) or x == 0 for x in J)
def test_singular_values():
x = Symbol('x', real=True)
A = EigenOnlyMatrix([[0, 1*I], [2, 0]])
# if singular values can be sorted, they should be in decreasing order
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
# since Abs(x) cannot be sorted, test set equality
assert set(vals) == {5, 1, Abs(x)}
A = EigenOnlyMatrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S.One, S.One]
A = EigenOnlyMatrix([
[2, 4],
[1, 3],
[0, 0],
[0, 0]
])
assert A.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))]
assert A.T.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0]
def test___eq__():
assert (EigenOnlyMatrix(
[[0, 1, 1],
[1, 0, 0],
[1, 1, 1]]) == {}) is False
def test_definite():
# Examples from Gilbert Strang, "Introduction to Linear Algebra"
# Positive definite matrices
m = Matrix([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[5, 4], [4, 5]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Positive semidefinite matrices
m = Matrix([[2, -1, -1], [-1, 2, -1], [-1, -1, 2]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[1, 2], [2, 4]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Examples from Mathematica documentation
# Non-hermitian positive definite matrices
m = Matrix([[2, 3], [4, 8]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[1, 2*I], [-I, 4]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Symbolic matrices examples
a = Symbol('a', positive=True)
b = Symbol('b', negative=True)
m = Matrix([[a, 0, 0], [0, a, 0], [0, 0, a]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[b, 0, 0], [0, b, 0], [0, 0, b]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == False
assert m.is_negative_definite == True
assert m.is_negative_semidefinite == True
assert m.is_indefinite == False
m = Matrix([[a, 0], [0, b]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == False
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == True
m = Matrix([
[0.0228202735623867, 0.00518748979085398,
-0.0743036351048907, -0.00709135324903921],
[0.00518748979085398, 0.0349045359786350,
0.0830317991056637, 0.00233147902806909],
[-0.0743036351048907, 0.0830317991056637,
1.15859676366277, 0.340359081555988],
[-0.00709135324903921, 0.00233147902806909,
0.340359081555988, 0.928147644848199]
])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_indefinite == False
|
py | 1a422c02ba8a84f07f96df6fd0f2edf8ef20698d | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
from __future__ import print_function, absolute_import
import os
import glob
import sys
import platform
import subprocess
import difflib
import filecmp
import shutil
from itertools import chain
from optparse import OptionParser
def make_relpath (path, start=os.curdir):
"Wrapper around os.path.relpath which always uses '/' as the separator."
p = os.path.relpath (path, start)
return p if sys.platform != "win32" else p.replace ('\\', '/')
#
# Get standard testsuite test arguments: srcdir exepath
#
srcdir = "."
tmpdir = "."
OSL_BUILD_DIR = os.environ.get("OSL_BUILD_DIR", "..")
OSL_SOURCE_DIR = os.environ.get("OSL_SOURCE_DIR", "../../..")
OSL_TESTSUITE_DIR = os.path.join(OSL_SOURCE_DIR, "testsuite")
OpenImageIO_ROOT = os.environ.get("OpenImageIO_ROOT", None)
OSL_TESTSUITE_ROOT = make_relpath(os.getenv('OSL_TESTSUITE_ROOT',
'../../../testsuite'))
os.environ['OSLHOME'] = os.path.join(OSL_SOURCE_DIR, "src")
OSL_REGRESSION_TEST = os.environ.get("OSL_REGRESSION_TEST", None)
# Options for the command line
parser = OptionParser()
parser.add_option("-p", "--path", help="add to executable path",
action="store", type="string", dest="path", default="")
parser.add_option("--devenv-config", help="use a MS Visual Studio configuration",
action="store", type="string", dest="devenv_config", default="")
parser.add_option("--solution-path", help="MS Visual Studio solution path",
action="store", type="string", dest="solution_path", default="")
(options, args) = parser.parse_args()
if args and len(args) > 0 :
srcdir = args[0]
srcdir = os.path.abspath (srcdir) + "/"
os.chdir (srcdir)
if args and len(args) > 1 :
OSL_BUILD_DIR = args[1]
OSL_BUILD_DIR = os.path.normpath (OSL_BUILD_DIR)
tmpdir = "."
tmpdir = os.path.abspath (tmpdir)
if platform.system() == 'Windows' :
redirect = " >> out.txt 2>&1 "
else :
redirect = " >> out.txt 2>>out.txt "
refdir = "ref/"
mytest = os.path.split(os.path.abspath(os.getcwd()))[-1]
if str(mytest).endswith('.opt') or str(mytest).endswith('.optix') :
mytest = mytest.split('.')[0]
test_source_dir = os.getenv('OSL_TESTSUITE_SRC',
os.path.join(OSL_TESTSUITE_ROOT, mytest))
#test_source_dir = os.path.join(OSL_TESTSUITE_DIR,
# os.path.basename(os.path.abspath(srcdir)))
command = ""
outputs = [ "out.txt" ] # default
failureok = 0
failthresh = 0.004
hardfail = 0.01
failpercent = 0.02
cleanup_on_success = False
if int(os.getenv('TESTSUITE_CLEANUP_ON_SUCCESS', '0')) :
cleanup_on_success = True;
oslcargs = "-Wall"
image_extensions = [ ".tif", ".tx", ".exr", ".jpg", ".png", ".rla",
".dpx", ".iff", ".psd" ]
compile_osl_files = True
splitsymbol = ';'
#print ("srcdir = " + srcdir)
#print ("tmpdir = " + tmpdir)
#print ("path = " + path)
#print ("refdir = " + refdir)
print ("test source dir = ", test_source_dir)
if platform.system() == 'Windows' :
if not os.path.exists("./ref") :
test_source_ref_dir = os.path.join (test_source_dir, "ref")
if os.path.exists(test_source_ref_dir) :
shutil.copytree (test_source_ref_dir, "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
shutil.copytree (os.path.join (test_source_dir, "src"), "./src")
if not os.path.exists(os.path.abspath("data")) :
shutil.copytree (test_source_dir, os.path.abspath("data"))
else :
if not os.path.exists("./ref") :
test_source_ref_dir = os.path.join (test_source_dir, "ref")
if os.path.exists(test_source_ref_dir) :
os.symlink (test_source_ref_dir, "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
os.symlink (os.path.join (test_source_dir, "src"), "./src")
if not os.path.exists("./data") :
os.symlink (test_source_dir, "./data")
pythonbin = 'python'
if os.getenv("PYTHON_VERSION") :
pythonbin += os.getenv("PYTHON_VERSION")
#print ("pythonbin = ", pythonbin)
###########################################################################
# Handy functions...
# Compare two text files. Returns 0 if they are equal otherwise returns
# a non-zero value and writes the differences to "diff_file".
# Based on the command-line interface to difflib example from the Python
# documentation
def text_diff (fromfile, tofile, diff_file=None):
import time
try:
fromdate = time.ctime (os.stat (fromfile).st_mtime)
todate = time.ctime (os.stat (tofile).st_mtime)
fromlines = open (fromfile, 'r').readlines()
tolines = open (tofile, 'r').readlines()
except:
print ("Unexpected error:", sys.exc_info()[0])
return -1
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile,
fromdate, todate)
# Diff is a generator, but since we need a way to tell if it is
# empty we just store all the text in advance
diff_lines = [l for l in diff]
if not diff_lines:
return 0
if diff_file:
try:
open (diff_file, 'w').writelines (diff_lines)
print ("Diff " + fromfile + " vs " + tofile + " was:\n-------")
# print (diff)
print ("".join(diff_lines))
except:
print ("Unexpected error:", sys.exc_info()[0])
return 1
def run_app (app, silent=False, concat=True) :
command = app
if not silent :
command += redirect
if concat:
command += " ;\n"
return command
def osl_app (app):
apath = os.path.join(OSL_BUILD_DIR, "bin")
if (platform.system () == 'Windows'):
# when we use Visual Studio, built applications are stored
# in the app/$(OutDir)/ directory, e.g., Release or Debug.
apath = os.path.join(apath, options.devenv_config)
return os.path.join(apath, app) + " "
def oiio_app (app):
if OpenImageIO_ROOT :
return os.path.join (OpenImageIO_ROOT, "bin", app) + " "
else :
return app + " "
# Construct a command that will compile the shader file, appending output to
# the file "out.txt".
def oslc (args) :
return (osl_app("oslc") + oslcargs + " " + args + redirect + " ;\n")
# Construct a command that will run oslinfo, appending output to
# the file "out.txt".
def oslinfo (args) :
return (osl_app("oslinfo") + args + redirect + " ;\n")
# Construct a command that runs oiiotool, appending console output
# to the file "out.txt".
def oiiotool (args, silent=False) :
oiiotool_cmd = (oiio_app("oiiotool") + args)
if not silent :
oiiotool_cmd += redirect
oiiotool_cmd += " ;\n"
return oiiotool_cmd;
# Construct a command that runs maketx, appending console output
# to the file "out.txt".
def maketx (args) :
return (oiio_app("maketx") + args + redirect + " ;\n")
# Construct a command that will compare two images, appending output to
# the file "out.txt". We allow a small number of pixels to have up to
# 1 LSB (8 bit) error, it's very hard to make different platforms and
# compilers always match to every last floating point bit.
def oiiodiff (fileA, fileB, extraargs="", silent=True, concat=True) :
command = (oiio_app("idiff") + "-a"
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " -warnpercent " + str(failpercent)
+ " " + extraargs + " " + make_relpath(fileA,tmpdir)
+ " " + make_relpath(fileB,tmpdir))
if not silent :
command += redirect
if concat:
command += " ;\n"
return command
# Construct a command that run testshade with the specified arguments,
# appending output to the file "out.txt".
def testshade (args) :
if os.environ.__contains__('OSL_TESTSHADE_NAME') :
testshadename = os.environ['OSL_TESTSHADE_NAME'] + " "
else :
testshadename = osl_app("testshade")
return (testshadename + args + redirect + " ;\n")
# Construct a command that run testrender with the specified arguments,
# appending output to the file "out.txt".
def testrender (args) :
os.environ["optix_log_level"] = "0"
return (osl_app("testrender") + " " + args + redirect + " ;\n")
# Construct a command that run testoptix with the specified arguments,
# appending output to the file "out.txt".
def testoptix (args) :
# Disable OptiX logging to prevent messages from the library from
# appearing in the program output.
os.environ["optix_log_level"] = "0"
return (osl_app("testoptix") + " " + args + redirect + " ;\n")
# Run 'command'. For each file in 'outputs', compare it to the copy
# in 'ref/'. If all outputs match their reference copies, return 0
# to pass. If any outputs do not match their references return 1 to
# fail.
def runtest (command, outputs, failureok=0, failthresh=0, failpercent=0, regression=None) :
# print ("working dir = " + tmpdir)
os.chdir (srcdir)
open ("out.txt", "w").close() # truncate out.txt
if options.path != "" :
sys.path = [options.path] + sys.path
test_environ = None
if (platform.system () == 'Windows') and (options.solution_path != "") and \
(os.path.isdir (options.solution_path)):
test_environ = os.environ
libOIIO_path = options.solution_path + "\\libOpenImageIO\\"
if options.devenv_config != "":
libOIIO_path = libOIIO_path + '\\' + options.devenv_config
test_environ["PATH"] = libOIIO_path + ';' + test_environ["PATH"]
if regression == "BATCHED" :
if test_environ == None :
test_environ = os.environ
test_environ["TESTSHADE_BATCHED"] = "1"
print ("command = ", command)
for sub_command in command.split(splitsymbol):
sub_command = sub_command.lstrip().rstrip()
#print ("running = ", sub_command)
cmdret = subprocess.call (sub_command, shell=True, env=test_environ)
if cmdret != 0 and failureok == 0 :
print ("#### Error: this command failed: ", sub_command)
print ("FAIL")
print ("Output was:\n--------")
print (open ("out.txt", 'r').read())
print ("--------")
return (1)
err = 0
if regression == "BASELINE" :
if not os.path.exists("./baseline") :
os.mkdir("./baseline")
for out in outputs :
shutil.move(out, "./baseline/"+out)
else :
for out in outputs :
extension = os.path.splitext(out)[1]
ok = 0
# We will first compare out to ref/out, and if that fails, we
# will compare it to everything else with the same extension in
# the ref directory. That allows us to have multiple matching
# variants for different platforms, etc.
if regression != None:
testfiles = ["baseline/"+out]
else :
testfiles = ["ref/"+out] + glob.glob (os.path.join ("ref", "*"+extension))
for testfile in (testfiles) :
# print ("comparing " + out + " to " + testfile)
if extension == ".tif" or extension == ".exr" :
# images -- use idiff
cmpcommand = oiiodiff (out, testfile, concat=False, silent=True)
# print ("cmpcommand = ", cmpcommand)
cmpresult = os.system (cmpcommand)
elif extension == ".txt" :
cmpresult = text_diff (out, testfile, out + ".diff")
else :
# anything else
cmpresult = 0 if filecmp.cmp (out, testfile) else 1
if cmpresult == 0 :
ok = 1
break # we're done
if ok :
# if extension == ".tif" or extension == ".exr" or extension == ".jpg" or extension == ".png":
# # If we got a match for an image, save the idiff results
# os.system (oiiodiff (out, testfile, silent=False))
print ("PASS: ", out, " matches ", testfile)
else :
err = 1
print ("NO MATCH for ", out)
print ("FAIL ", out)
if extension == ".txt" :
# If we failed to get a match for a text file, print the
# file and the diff, for easy debugging.
print ("-----" + out + "----->")
print (open(out,'r').read() + "<----------")
print ("Diff was:\n-------")
print (open (out+".diff", 'r').read())
if extension == ".tif" or extension == ".exr" or extension == ".jpg" or extension == ".png":
# If we failed to get a match for an image, send the idiff
# results to the console
testfile = None
if regression != None:
testfile = os.path.join ("baseline/", out)
else :
testfile = os.path.join (refdir, out)
os.system (oiiodiff (out, testfile, silent=False))
return (err)
##########################################################################
#
# Read the individual run.py file for this test, which will define
# command and outputs.
#
with open(os.path.join(test_source_dir,"run.py")) as f:
code = compile(f.read(), "run.py", 'exec')
exec (code)
# if os.path.exists("run.py") :
# execfile ("run.py")
# Allow a little more slop for slight pixel differences when in DEBUG mode.
if "DEBUG" in os.environ and os.environ["DEBUG"] :
failthresh *= 2.0
hardfail *= 2.0
failpercent *= 2.0
# Force out.txt to be in the outputs
##if "out.txt" not in outputs :
## outputs.append ("out.txt")
# Force any local shaders to compile automatically, prepending the
# compilation onto whatever else the individual run.py file requested.
for filetype in [ "*.osl", "*.h", "*.oslgroup", "*.xml" ] :
for testfile in glob.glob (os.path.join (test_source_dir, filetype)) :
shutil.copyfile (testfile, os.path.basename(testfile))
if compile_osl_files :
compiles = ""
oslfiles = glob.glob ("*.osl")
oslfiles.sort() ## sort the shaders to compile so that they always compile in the same order
for testfile in oslfiles :
compiles += oslc (testfile)
command = compiles + command
# If either out.exr or out.tif is in the reference directory but somehow
# is not in the outputs list, put it there anyway!
if (os.path.exists("ref/out.exr") and ("out.exr" not in outputs)) :
outputs.append ("out.exr")
if (os.path.exists("ref/out.tif") and ("out.tif" not in outputs)) :
outputs.append ("out.tif")
# Run the test and check the outputs
if OSL_REGRESSION_TEST != None :
# need to produce baseline images
ret = runtest (command, outputs, failureok=failureok,
failthresh=failthresh, failpercent=failpercent, regression="BASELINE")
if ret == 0 :
# run again comparing against baseline, not ref
ret = runtest (command, outputs, failureok=failureok,
failthresh=failthresh, failpercent=failpercent, regression=OSL_REGRESSION_TEST)
else :
ret = runtest (command, outputs, failureok=failureok,
failthresh=failthresh, failpercent=failpercent)
if ret == 0 and cleanup_on_success :
for ext in image_extensions + [ ".txt", ".diff" ] :
files = glob.iglob (srcdir + '/*' + ext)
baselineFiles = glob.iglob (srcdir + '/baseline/*' + ext)
for f in chain(files,baselineFiles) :
os.remove(f)
#print('REMOVED ', f)
sys.exit (ret)
|
py | 1a422dc216ceec4c29ab35a46966eb1ae20ee68c | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mw_menus.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(596, 717)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_7 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_7.setObjectName("gridLayout_7")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.groupBox_2)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_2 = QtWidgets.QLabel(self.groupBox_3)
self.label_2.setObjectName("label_2")
self.gridLayout_5.addWidget(self.label_2, 0, 0, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_5.addWidget(self.lineEdit, 0, 1, 1, 1)
self.frame = QtWidgets.QFrame(self.groupBox_3)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.formLayout = QtWidgets.QFormLayout(self.frame)
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.label_3 = QtWidgets.QLabel(self.frame)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.lineEdit_2 = QtWidgets.QLineEdit(self.frame)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.listWidget_2 = QtWidgets.QListWidget(self.frame)
self.listWidget_2.setObjectName("listWidget_2")
item = QtWidgets.QListWidgetItem()
self.listWidget_2.addItem(item)
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.listWidget_2)
self.gridLayout_5.addWidget(self.frame, 2, 0, 1, 2)
self.listWidget = QtWidgets.QListWidget(self.groupBox_3)
self.listWidget.setObjectName("listWidget")
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
self.gridLayout_5.addWidget(self.listWidget, 1, 1, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_3, 1, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_6 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_4 = QtWidgets.QLabel(self.tab_2)
self.label_4.setObjectName("label_4")
self.gridLayout_6.addWidget(self.label_4, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.tabWidget.addTab(self.tab_4, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.tabWidget.addTab(self.tab_5, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.tabWidget.addTab(self.tab_6, "")
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName("tab_7")
self.tabWidget.addTab(self.tab_7, "")
self.tab_8 = QtWidgets.QWidget()
self.tab_8.setObjectName("tab_8")
self.tabWidget.addTab(self.tab_8, "")
self.tab_9 = QtWidgets.QWidget()
self.tab_9.setObjectName("tab_9")
self.tabWidget.addTab(self.tab_9, "")
self.tab_10 = QtWidgets.QWidget()
self.tab_10.setObjectName("tab_10")
self.tabWidget.addTab(self.tab_10, "")
self.tab_11 = QtWidgets.QWidget()
self.tab_11.setObjectName("tab_11")
self.tabWidget.addTab(self.tab_11, "")
self.tab_12 = QtWidgets.QWidget()
self.tab_12.setObjectName("tab_12")
self.gridLayout_3 = QtWidgets.QGridLayout(self.tab_12)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tabWidget.addTab(self.tab_12, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setLineWidth(0)
self.label.setTextFormat(QtCore.Qt.RichText)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(True)
self.label.setOpenExternalLinks(True)
self.label.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.gridLayout_7.addWidget(self.groupBox, 1, 0, 1, 1)
self.label_71 = QtWidgets.QLabel(self.centralwidget)
self.label_71.setAlignment(QtCore.Qt.AlignCenter)
self.label_71.setObjectName("label_71")
self.gridLayout_7.addWidget(self.label_71, 2, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 596, 25))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
self.menuMenuSub = QtWidgets.QMenu(self.menuMenu)
self.menuMenuSub.setObjectName("menuMenuSub")
self.menuMenuDelayed = QtWidgets.QMenu(self.menubar)
self.menuMenuDelayed.setObjectName("menuMenuDelayed")
self.menuMenuSubDelayed = QtWidgets.QMenu(self.menuMenuDelayed)
self.menuMenuSubDelayed.setObjectName("menuMenuSubDelayed")
self.menuMenuCheckale = QtWidgets.QMenu(self.menubar)
self.menuMenuCheckale.setObjectName("menuMenuCheckale")
self.menuAbout = QtWidgets.QMenu(self.menubar)
self.menuAbout.setObjectName("menuAbout")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.toolBarDelayed = QtWidgets.QToolBar(MainWindow)
self.toolBarDelayed.setObjectName("toolBarDelayed")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBarDelayed)
self.toolBarCheckable = QtWidgets.QToolBar(MainWindow)
self.toolBarCheckable.setObjectName("toolBarCheckable")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBarCheckable)
MainWindow.insertToolBarBreak(self.toolBarCheckable)
self.actionActionA = QtWidgets.QAction(MainWindow)
self.actionActionA.setObjectName("actionActionA")
self.actionActionSubA = QtWidgets.QAction(MainWindow)
self.actionActionSubA.setObjectName("actionActionSubA")
self.actionActionSubB = QtWidgets.QAction(MainWindow)
self.actionActionSubB.setObjectName("actionActionSubB")
self.actionActionDelayedA = QtWidgets.QAction(MainWindow)
self.actionActionDelayedA.setObjectName("actionActionDelayedA")
self.actionActionDelayedSubA = QtWidgets.QAction(MainWindow)
self.actionActionDelayedSubA.setObjectName("actionActionDelayedSubA")
self.actionActionCheckableA = QtWidgets.QAction(MainWindow)
self.actionActionCheckableA.setCheckable(True)
self.actionActionCheckableA.setObjectName("actionActionCheckableA")
self.actionActionCheckableSubAChecked = QtWidgets.QAction(MainWindow)
self.actionActionCheckableSubAChecked.setCheckable(True)
self.actionActionCheckableSubAChecked.setChecked(True)
self.actionActionCheckableSubAChecked.setObjectName("actionActionCheckableSubAChecked")
self.actionActionCheckableSubAUnchecked = QtWidgets.QAction(MainWindow)
self.actionActionCheckableSubAUnchecked.setCheckable(True)
self.actionActionCheckableSubAUnchecked.setObjectName("actionActionCheckableSubAUnchecked")
self.menuMenuSub.addAction(self.actionActionSubA)
self.menuMenuSub.addAction(self.actionActionSubB)
self.menuMenu.addAction(self.actionActionA)
self.menuMenu.addAction(self.menuMenuSub.menuAction())
self.menuMenuSubDelayed.addAction(self.actionActionDelayedSubA)
self.menuMenuDelayed.addAction(self.actionActionDelayedA)
self.menuMenuDelayed.addAction(self.menuMenuSubDelayed.menuAction())
self.menuMenuCheckale.addAction(self.actionActionCheckableA)
self.menubar.addAction(self.menuMenu.menuAction())
self.menubar.addAction(self.menuMenuDelayed.menuAction())
self.menubar.addAction(self.menuMenuCheckale.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.toolBar.addAction(self.actionActionA)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionActionSubA)
self.toolBar.addAction(self.actionActionSubB)
self.toolBarDelayed.addAction(self.actionActionDelayedA)
self.toolBarDelayed.addSeparator()
self.toolBarDelayed.addAction(self.actionActionDelayedSubA)
self.toolBarCheckable.addAction(self.actionActionCheckableA)
self.toolBarCheckable.addSeparator()
self.toolBarCheckable.addAction(self.actionActionCheckableSubAChecked)
self.toolBarCheckable.addAction(self.actionActionCheckableSubAUnchecked)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.lineEdit, self.tabWidget)
MainWindow.setTabOrder(self.tabWidget, self.lineEdit_2)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox_2.setTitle(_translate("MainWindow", "Issue #115 - Tabs scroller buttons"))
self.groupBox_3.setTitle(_translate("MainWindow", "Issue #123 - Missing borders"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
self.lineEdit.setText(_translate("MainWindow", "Inside tab, outside frame"))
self.label_3.setText(_translate("MainWindow", "TextLabel"))
self.lineEdit_2.setText(_translate("MainWindow", "Inside tab and frame"))
__sortingEnabled = self.listWidget_2.isSortingEnabled()
self.listWidget_2.setSortingEnabled(False)
item = self.listWidget_2.item(0)
item.setText(_translate("MainWindow", "ListWidget"))
self.listWidget_2.setSortingEnabled(__sortingEnabled)
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", "ListWidget"))
self.listWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
self.label_4.setText(_translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_8), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_9), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_10), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_11), _translate("MainWindow", "Page"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_12), _translate("MainWindow", "Page"))
self.groupBox.setTitle(_translate("MainWindow", "Issue #112 - Hyperlinks color"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><a href=\"https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/112\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">Hyperlink Example</span></a></p><p align=\"center\"><span style=\" font-size:10pt; color:#7d7d7d;\">CSS for the documents (RichText) is not the same as the application. We cannot change the internal content CSS, e.g., hyperlinks. We suggest you use the middle tons (0-255, use 125), so this works for both white and dark theme (this color). The original color is the blue link on top.</span></p><p align=\"center\"><br/></p></body></html>"))
self.label_71.setText(_translate("MainWindow", "Inside Central Widget"))
self.menuMenu.setTitle(_translate("MainWindow", "Menu"))
self.menuMenuSub.setTitle(_translate("MainWindow", "Menu Sub"))
self.menuMenuDelayed.setTitle(_translate("MainWindow", "Menu Delayed"))
self.menuMenuSubDelayed.setTitle(_translate("MainWindow", "Menu Sub Delayed"))
self.menuMenuCheckale.setTitle(_translate("MainWindow", "Menu Checkable"))
self.menuAbout.setTitle(_translate("MainWindow", "About QDarkStyle"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.toolBar.setWindowTitle(_translate("MainWindow", "Tool bar actions"))
self.toolBarDelayed.setWindowTitle(_translate("MainWindow", "Tool bar actions delayed"))
self.toolBarCheckable.setWindowTitle(_translate("MainWindow", "Tool bar action checkable"))
self.actionActionA.setText(_translate("MainWindow", "Action A"))
self.actionActionSubA.setText(_translate("MainWindow", "Action A Sub"))
self.actionActionSubA.setToolTip(_translate("MainWindow", "Action A Sub"))
self.actionActionSubB.setText(_translate("MainWindow", "Action B Sub"))
self.actionActionDelayedA.setText(_translate("MainWindow", "Action Delayed A"))
self.actionActionDelayedA.setToolTip(_translate("MainWindow", "Action Delayed A"))
self.actionActionDelayedSubA.setText(_translate("MainWindow", "Action Delayed Sub A"))
self.actionActionDelayedSubA.setToolTip(_translate("MainWindow", "Action Delayed Sub A"))
self.actionActionCheckableA.setText(_translate("MainWindow", "Action Checkable A"))
self.actionActionCheckableA.setToolTip(_translate("MainWindow", "Action Checkable A"))
self.actionActionCheckableSubAChecked.setText(_translate("MainWindow", "Action Checkable Sub A Checked"))
self.actionActionCheckableSubAChecked.setToolTip(_translate("MainWindow", "Action Checkable Sub A Checked"))
self.actionActionCheckableSubAUnchecked.setText(_translate("MainWindow", "Action Checkable Sub A Unchecked"))
self.actionActionCheckableSubAUnchecked.setToolTip(_translate("MainWindow", "Action Checkable Sub A Unchecked"))
|
py | 1a422dce6d7d4d0b06015d2043e262f4b8f845be | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 3 14:11:25 2017
@author: juan
"""
#This program implements the clamped cubic spline with zero derivative at the
#endpoints
import numpy as np
def deltaGrid(grid):
deltas = ()
for i in range(1, len(grid)):
deltas += (grid[i] - grid[i - 1], )
return deltas
def firstLinearRelation(functionValues, deltas):
b = np.array(range(len(deltas) - 1))
for j in range(len(b)):
b[j] = 6 * (((functionValues[j + 2] - functionValues[j + 1])/(deltas[j + 1]))
- ((functionValues[j + 1] - functionValues[j])/(deltas[j])))
return b
def firstVector(functionValues, deltas):
a = np.array([0])
c = np.array([1])
b = firstLinearRelation(functionValues, deltas)
return np.concatenate((a, b, c))
def secondLinearRelation(rowLength, j, deltas):
a = np.array([deltas[j + 1], 2 * (deltas[j] + deltas[j + 1]), deltas[j]])
b = np.array([0])
c = a
for i in range(j):
c = np.concatenate((b, c))
for i in range(rowLength - (j + 2)):
c = np.concatenate((c, b))
return c
def matrix(deltas):
firstRow = np.array([1])
lastRow = np.array([1])
zero = np.array([0])
for i in range(len(deltas)):
firstRow = np.concatenate((firstRow, zero))
for i in range(len(deltas)):
lastRow = np.concatenate((zero, lastRow))
matrix = firstRow
for i in range(len(deltas) - 1):
matrix = np.vstack((matrix, secondLinearRelation(len(deltas), i, deltas)))
matrix = np.vstack((matrix, lastRow))
return matrix
def Derivatives(grid, functionValues):
deltas = deltaGrid(grid)
y = firstVector(functionValues, deltas)
matriz = matrix(deltas)
sigmas = np.linalg.solve(matriz, y)
return sigmas
def cubicPolynomialCoeficients(lowerLimit, upperLimit,
lowerLimitValue, upperLimitValue,
lowerDerivative, upperDerivative):
A = np.matrix([[1, lowerLimit, lowerLimit ** 2, lowerLimit ** 3],
[1, upperLimit, upperLimit ** 2, upperLimit ** 3],
[0, 0, 2, 6 * (lowerLimit ** 1)],
[0, 0, 2, 6 * (upperLimit ** 1)]])
y = np.array([lowerLimitValue, upperLimitValue, lowerDerivative, upperDerivative])
x = np.linalg.solve(A, y)
return x
def closestIntervalIndex(x, grid):
for i in range(len(grid) - 1):
if grid[i] <= x and x <= grid[i + 1]:
return i
if x < grid[0]:
return 0
return len(grid)-2
def polynomialFromCoeficients(x, coeficients):
value = 0.0
for i in range(len(coeficients)):
value += coeficients[i] * (x ** i)
return value
def naturalCubicSpline(x, sample, functionValues,derivatives):
closest = closestIntervalIndex(x, sample)
x_j = sample[closest]
x_j_1 = sample[closest + 1]
f_x_j = functionValues[closest]
f_x_j_1 = functionValues[closest + 1]
f_2_x_j = derivatives[closest]
f_2_x_j_1 = derivatives[closest + 1]
coeficients = cubicPolynomialCoeficients(x_j, x_j_1,
f_x_j, f_x_j_1,
f_2_x_j, f_2_x_j_1
)
spline = polynomialFromCoeficients(x, coeficients)
return spline
|
py | 1a422e4156237de1cb2307723d2815c6e365e2cc | import requests
from model.json_check import *
from model.input_data import *
# выдает все объекты компьютер в системе
def test_GetV1AllServersCode200():
data = "success"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/servers/", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["status"]
assert data == n
# выдает определенный объект компьютер
def test_GetV1ServersByIdCode200():
data = "success"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/servers/"+slave, auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["status"]
assert data == n
def test_GetV1ServerssByIdCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/servers/"+slave, auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1ServerssByIdCode404():
data = "Unknown server id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/servers/0", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
|
py | 1a422ed35bb693e2872577fa55eaf1ce62234532 | # Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import io
import os
import sys
from shutil import rmtree
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
# Configure library params.
NAME = "dcgan_pytorch"
DESCRIPTION = "Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks."
URL = "https://github.com/Lornatang/DCGAN-PyTorch"
EMAIL = "[email protected]"
AUTHOR = "Liu Goodfellow"
REQUIRES_PYTHON = ">=3.8.0"
VERSION = "1.0.0"
# Libraries that must be installed.
REQUIRED = [
"torch"
]
# The following libraries directory need to be installed if you need to run all scripts.
EXTRAS = {
}
# Find the current running location.
here = os.path.abspath(os.path.dirname(__file__))
# About README file description.
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Set Current Library Version.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
setup(name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="Apache",
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only"
],
cmdclass={
"upload": UploadCommand,
},
)
|
py | 1a422fa0f5e597e7dfd77ff703b5d734b1a0cb57 | # qubit number=4
# total number=33
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=10
prog += X(3) # number=11
prog += H(3) # number=13
prog += CZ(0,3) # number=14
prog += H(1) # number=18
prog += CZ(3,1) # number=19
prog += Z(3) # number=25
prog += H(1) # number=20
prog += RX(-3.141592653589793,3) # number=26
prog += H(3) # number=15
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(2) # number=17
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(0) # number=27
prog += CZ(1,0) # number=28
prog += H(0) # number=29
prog += H(0) # number=30
prog += CZ(1,0) # number=31
prog += H(0) # number=32
prog += X(1) # number=23
prog += X(1) # number=24
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1941.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
py | 1a42308c9c8a9f10f9c6d6b7c46f2c63526b59c6 | from opytimizer.optimizers.science import SA
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'T': 100,
'beta': 0.999
}
# Creates a SA optimizer
o = SA(params=params)
|
py | 1a4231bbc55d166c8ae9081c6a01def38c67f460 | import asyncio
from aiohttp.test_utils import AioHTTPTestCase
from ... import TracerTestCase
from .app.web import setup_app
class TraceTestCase(TracerTestCase, AioHTTPTestCase):
"""
Base class that provides a valid ``aiohttp`` application with
the async tracer.
"""
def enable_tracing(self):
pass
def disable_tracing(self):
pass
def tearDown(self):
# unpatch the aiohttp_jinja2 module
super(TraceTestCase, self).tearDown()
self.disable_tracing()
def get_app(self, loop=None):
"""
Override the get_app method to return the test application
"""
# aiohttp 2.0+ stores the loop instance in self.loop; for
# backward compatibility, we should expect a `loop` argument
loop = loop or self.loop
# create the app with the testing loop
self.app = setup_app(loop)
asyncio.set_event_loop(loop)
# trace the app
self.enable_tracing()
return self.app
|
py | 1a4231bcc378b73c828f0164d0e6e4ec415fdeac | #!/usr/bin/env python3
import unittest
import os
from getJenkinsVersion import get_latest_version, get_jenkins_version
USERNAME = os.environ.get('MAVEN_REPOSITORY_USERNAME', '')
PASSWORD = os.environ.get('MAVEN_REPOSITORY_PASSWORD', '')
# Test that GetJenkinsVersion returns the correct value
class TestGetJenkinsVersion(unittest.TestCase):
'''Unit Test getJenkinversion.py scripts'''
data_set = {
'all_versions': [
"1", "1.10", "1.11", "1.10.1", "1.10.2", "1.11.0", "1.11.2",
"1.999",
"2", "2.10", "2.11", "2.10.1", "2.10.2", "2.11.0", "2.11.2",
"2.99", "2.249", "2.249.1", "2.265", "2.265.3"
],
'url': "https://repo.jenkins-ci.org/releases/org/jenkins-ci/main/jenkins-war/maven-metadata.xml",
'versions': [
{
'name': 'latest',
'expected': '2.265'
},
{
'name': '1',
'expected': '1.658'
},
{
'name': '2',
'expected': '2.265'
},
{
'name': '2.249',
'expected': '2.249.3'
},
{
'name': '2.249.3',
'expected': '2.249.3'
}],
}
def test_latest_version(self):
'''Test that we correclty get Jenkins version value'''
result = get_latest_version(self.data_set["all_versions"])
self.assertEqual("2.265.3", result)
def test_result(self):
'''Test that we correclty get Jenkins version value'''
for version in self.data_set["versions"]:
result = get_jenkins_version(self.data_set["url"],
version["name"],
USERNAME,
PASSWORD)
self.assertEqual(version["expected"], result)
if __name__ == '__main__':
unittest.main()
|
py | 1a423280e32c13ed7a358e9af62d71f591d8ee86 | from django.utils.translation import ugettext_lazy as _
INVALID_GOOGLE_DOC_URL = _('It should be Google Doc URL!.')
INVALID_KEY_IN_GOOGLE_DOC_URL = _("The key should be 44 as a length, it's only {}!.")
INVALID_STATUS_WHEN_REASSIGN_BLOG_USER = _("You can only reassign user with (Draft or Reject) blogs.")
|
py | 1a4232b0bff046836c359ca0de7a533c1178a1e4 | """Script to run stacking scripts on the DESY cluster.
Through use of argparse, a given configuration for the code can be selected.
This can be given from the command line, in the form:
python RunCluster.py -c Desired_Configuration_Name -n Number_Of_Tasks -s
Each available configuration must be listed in "config.ini", and controls
options for fitting, such as which catalogue is to be used, and which seasons
of data should be included. If -x is included, then a new job is submitted
to the cluster. Having submitted the job to the cluster it will be run in
parallel Number_of_Tasks times. The shell script SubmitOne.sh is called for
each task, which in turn calls RunLocal.py with the given configuration setting.
The Wait function will periodically query the cluster
to check on the status of the job, and will output the job status occasionally.
Once all sub-tasks are completed, the script will proceed to call
MergeFiles.run() for the given configuration, combining results.
"""
import subprocess
import time
import os
import os.path
import logging
import argparse
import numpy as np
from flarestack.shared import log_dir, fs_dir
from flarestack.cluster.submitter import Submitter
from flarestack.cluster.make_desy_cluster_script import (
make_desy_submit_file,
submit_file,
)
logger = logging.getLogger(__name__)
username = os.path.basename(os.environ["HOME"])
cmd = "qstat -u " + username
def wait_for_cluster(job_ids=None):
logger.warning(
"The wait_for_cluster function is deprecated! "
"Use the Submitter class instead."
)
Submitter.wait_for_cluster(job_ids)
# if not job_ids:
# wait_for_job()
# else:
# try:
# for i, job_id in enumerate(job_ids):
#
# logger.debug(f'waiting for job {job_id}')
# prog_str = f'{i}/{len(job_ids)}'
# wait_for_job(job_id, prog_str)
#
# except TypeError:
# logger.debug('Only waiting for one job')
# wait_for_job(job_ids)
def wait_for_job(job_id=None, progress_str=None):
"""
Runs the command cmd, which queries the status of the job on the
cluster, and reads the output. While the output is not an empty
string (indicating job completion), the cluster is re-queried
every 30 seconds. Occasionally outputs the number of remaining sub-tasks
on cluster, and outputs full table result every ~ 8 minutes. On
completion of job, terminates function process and allows the script to
continue.
"""
if not job_id:
job_id_str = "s"
else:
if progress_str:
job_id_str = f" {progress_str} {job_id}"
else:
job_id_str = " " + str(job_id)
time.sleep(10)
cmd = f"qstat -u {username}"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
tmp = process.stdout.read().decode()
n_total = n_tasks(tmp, job_id)
i = 31
j = 6
while n_total != 0:
if i > 3:
running_process = subprocess.Popen(
cmd + " -s r", stdout=subprocess.PIPE, shell=True
)
running_tmp = running_process.stdout.read().decode()
if running_tmp != "":
n_running = n_tasks(running_tmp, job_id)
else:
n_running = 0
logger.info(
f"{time.asctime(time.localtime())} - Job{job_id_str}:"
f" {n_total} entries in queue. "
f"Of these, {n_running} are running tasks, and "
f"{n_total-n_running} are tasks still waiting to be executed."
)
i = 0
j += 1
if j > 7:
logger.info(str(tmp))
j = 0
time.sleep(30)
i += 1
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
tmp = process.stdout.read().decode()
n_total = n_tasks(tmp, job_id)
def submit_to_cluster(path, n_cpu=2, n_jobs=10, ram_per_core=None, **kwargs):
for file in os.listdir(log_dir):
os.remove(log_dir + file)
# Submits job to the cluster
submit_cmd = "qsub "
if n_cpu > 1:
submit_cmd += " -pe multicore {0} -R y ".format(n_cpu)
ram_per_core = (
"{0:.1f}G".format(6.0 / float(n_cpu) + 2.0)
if not ram_per_core
else ram_per_core
)
print("Ram per core:", ram_per_core)
submit_cmd += "-t 1-{0}:1 {1} {2} {3}".format(n_jobs, submit_file, path, n_cpu)
make_desy_submit_file(ram_per_core, **kwargs)
print(time.asctime(time.localtime()), submit_cmd, "\n")
process = subprocess.Popen(submit_cmd, stdout=subprocess.PIPE, shell=True)
msg = process.stdout.read().decode()
print(msg)
job_id = int(str(msg).split("job-array")[1].split(".")[0])
return job_id
def n_tasks(tmp, job_id):
"""
Returns the number of tasks given the output of qsub
:param tmp: output of qsub
:param job_id: int, optional, if given only tasks belonging to this job will we counted
:return: int
"""
st = str(tmp)
ids = np.array([int(s.split(" ")[2]) for s in st.split("\n")[2:-1]])
if job_id:
return len(ids[ids == job_id])
else:
return len(ids)
if not os.path.isfile(submit_file):
make_desy_submit_file()
|
py | 1a423372fbf7d4d862e0b356ef4120257a92c8d0 | #
# Spark job to bin data in WebMercator Spatial Reference
# The bin is a hexagon with a width of 100 meters
#
from pyspark import SparkContext
from hexgrid import HexGrid
from mercator import Mercator
def line_to_row_col(line, hg):
splits = line.split(',')
try:
lon = float(splits[10])
lat = float(splits[11])
x, y = Mercator.to_web_mercator(lon, lat)
rc = hg.xy2rc(x, y)
return rc, 1
except:
return (0, 0), -1
if __name__ == "__main__":
hg = HexGrid(100)
sc = SparkContext()
sc.textFile("hdfs:///trips"). \
map(lambda line: line_to_row_col(line, hg)). \
filter(lambda (rowcol, count): count > 0). \
reduceByKey(lambda a, b: a + b). \
filter(lambda (rowcol, count): count > 10). \
map(lambda ((row, col), count): "{0},{1},{2}".format(row, col, count)). \
saveAsTextFile("hdfs:///tmp/hex")
|
py | 1a4233bd2c96a3b5390c642436f3e2984dc094e3 | from rest_framework import routers
from markers.api_views import MarkerViewSet
router = routers.DefaultRouter()
router.register(r"markers", MarkerViewSet)
urlpatterns = router.urls
|
py | 1a4233e6e74b550534490e0a6aa753a02cb2ef2e | # SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Not(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node(
'Not',
inputs=['x'],
outputs=['not'],
)
# 2d
x = (np.random.randn(3, 4) > 0).astype(bool)
expect(node, inputs=[x], outputs=[np.logical_not(x)],
name='test_not_2d')
# 3d
x = (np.random.randn(3, 4, 5) > 0).astype(bool)
expect(node, inputs=[x], outputs=[np.logical_not(x)],
name='test_not_3d')
# 4d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
expect(node, inputs=[x], outputs=[np.logical_not(x)],
name='test_not_4d')
|
py | 1a42355c75e031cd2fd5a65acdcdb571bdad9a2a | """
File: sillystream/examples/daemon.py
Author: John Andersen
Description: A process that forks off and redirects stdout to sillystream server
To run:
python examples/daemon.py
python sillystream/__main__.py client
"""
import os
import sys
import time
import sillystream
# Send stdout to sillystream
STREAM = True
# Seconds to stay alive for
STAY_ALIVE = 20
def make_daemon():
"""
Daemonize to run in background
"""
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
pid = os.fork()
if pid > 0:
# exit second parent
sys.exit(0)
if STREAM:
# Create sillystream server
output = sillystream.server()
# Start the server
output.start_thread()
else:
output = open("/dev/null", 'wb')
sys.stdout = output
sys.stderr = output
def main():
"""
Daemonizes and prints numbers
"""
make_daemon()
i = 0
while i < STAY_ALIVE:
print("test {}".format(i))
i += 1
time.sleep(1)
if __name__ == '__main__':
main()
|
py | 1a4235aa003e1ea35caf0828a4d57d33e579cf5a | import ctypes
import numpy as np
import os
import subprocess
import tempfile
import tvm
from tvm import relay, get_global_func, target, register_func
from tvm.relay.expr import Expr, Function, Let, GlobalVar
from tvm.relay.adt import Constructor
from tvm.relay.expr_functor import ExprFunctor, ExprVisitor
from tvm.relay.backend import compile_engine
from .little_cpp import PackedCall, CPPFunction, Invoke, Decl, CPPIf, CPPTuple, CPPMatch, CPPConstructor, CPPTupleGetItem
from .little_cpp import CPPRefCreate, CPPRefRead, CPPRefWrite
from . import to_source
from .convert import convert
TVM_PATH = os.environ['TVM_HOME']
def must_run_process(args):
proc = subprocess.run(args)
assert proc.returncode == 0
def compile_cpp(source, lib_name, flags=None, lib_path=None):
if flags is None:
flags = []
if lib_path is None:
lib_path = os.curdir
debug_source_path = os.path.join(lib_path, 'source.cc')
# Write out the file for debugging.
with open(debug_source_path, 'w') as source_file:
source_file.write(source)
# with tempfile.TmporaryDirectory() as tmpdir:
tmpdir = tempfile.mkdtemp(prefix="relay_aot_compiler")
lib_path = os.path.join(tmpdir, lib_name)
source_path = os.path.join(tmpdir, 'source.cc')
with open(source_path, 'w') as source_file:
source_file.write(source)
must_run_process(["clang-format", "-i", debug_source_path])
system = os.uname()[0]
if system == 'Darwin':
command = [
"clang",
"-std=c++14",
"-shared",
"-undefined",
"dynamic_lookup",
"-o",
lib_path,
source_path,
f"-I{TVM_PATH}/3rdparty/dmlc-core/include",
f"-I{TVM_PATH}/3rdparty/dlpack/include",
f"-I{TVM_PATH}/3rdparty/HalideIR/src",
f"-I{TVM_PATH}/include",
f"-L{TVM_PATH}/build",
"-ltvm"
] + flags
else:
command = [
"clang",
"-std=c++14",
"-shared",
"-fPIC",
"-o",
lib_path,
source_path,
f"-I{TVM_PATH}/3rdparty/dmlc-core/include",
f"-I{TVM_PATH}/3rdparty/dlpack/include",
f"-I{TVM_PATH}/3rdparty/HalideIR/src",
f"-I{TVM_PATH}/include",
f"-L{TVM_PATH}/build",
"-ltvm"
] + flags
must_run_process(command)
return lib_path
def load_lib(name):
return ctypes.CDLL(name, ctypes.RTLD_GLOBAL)
def is_primitive(e: relay.Expr):
return isinstance(e, relay.Function) and e.attrs and e.attrs.Primitive.value == 1
class AoTCompiler(ExprFunctor):
def __init__(self, mod, tgt) -> None:
super().__init__()
self.mod = mod
self.tgt = tgt
self.engine = compile_engine.get()
self.bindings = [[]]
self.gv_map = {}
def add_binding(self, var, value):
self.bindings[-1].append((var, value))
def optimize(self, expr: Function) -> Function:
opts = relay.transform.Sequential([relay.transform.FuseOps(),
relay.transform.ToANormalForm()])
self.mod['main'] = expr
self.mod = opts(self.mod)
ret = self.mod['main']
return ret
def mk_primitive_op(self, func: Expr, args, output_type) -> Expr:
cc_key = compile_engine.CCacheKey(func, self.tgt)
hash = relay.analysis.structural_hash(func)
name = f"op_{hash}"
if not get_global_func(name, allow_missing=True):
jit_func = self.engine.jit(cc_key, self.tgt)
register_func(name, jit_func)
return PackedCall(name, args, [x.checked_type for x in args], output_type)
def visit_call(self, call: Expr) -> Expr:
if is_primitive(call.op):
return self.mk_primitive_op(call.op, call.args, call.checked_type)
elif isinstance(call.op, Constructor):
return CPPConstructor(call.op.tag, [self.visit(arg) for arg in call.args])
else:
assert(call.attrs == None)
args = [self.visit(arg) for arg in call.args]
fn = self.visit(call.op)
return Invoke(fn, args)
def visit_let(self, let: Expr) -> Expr:
self.bindings.append([])
while isinstance(let, Let):
cpp_value = self.visit(let.value)
self.add_binding(let.var, cpp_value)
let = let.body
bindings = self.bindings.pop()
body = self.visit(let)
return Decl(bindings, body)
def visit_var(self, var):
return var
def visit_global_var(self, gv):
if gv not in self.gv_map:
self.gv_map[gv] = "to be updated"
self.gv_map[gv] = self.visit(self.mod[gv])
return gv
def visit_function(self, func):
if is_primitive(func):
body = self.mk_primitive_op(func, func.params, func.ret_type)
return CPPFunction(func.params, body, func.checked_type.ret_type)
else:
return CPPFunction(func.params, self.visit(func.body), func.checked_type.ret_type)
def visit_constant(self, const):
return const
def visit_if(self, i):
return CPPIf(self.visit(i.cond),
self.visit(i.true_branch),
self.visit(i.false_branch),
i.checked_type)
def visit_tuple(self, t):
return CPPTuple([self.visit(f) for f in t.fields], t.checked_type)
def visit_match(self, m):
return CPPMatch(self.visit(m.data),
[(c.lhs, self.visit(c.rhs)) for c in m.clauses],
m.checked_type)
def visit_op(self, op):
raise Exception(f'op outside of primitive: {op}')
def visit_tuple_getitem(self, t):
return CPPTupleGetItem(self.visit(t.tuple_value), t.index, t.checked_type)
def visit_ref_create(self, r):
return CPPRefCreate(self.visit(r.value), r.checked_type)
def visit_ref_read(self, r):
return CPPRefRead(self.visit(r.ref), r.checked_type)
def visit_ref_write(self, r):
return CPPRefWrite(self.visit(r.ref), self.visit(r.value))
_LIB_COUNTER = 1
_LIB = []
def lib_and_func_name(name):
global _LIB_COUNTER
packed_name = f'relay.aot.{name}.{_LIB_COUNTER}'
lib_name = f"librelay_aot_{_LIB_COUNTER}.so"
_LIB_COUNTER += 1
return lib_name, packed_name
import time
def _mk_wrapper(fn, ctx, constants, record_time):
def _wrapper(*args):
new_constants = [convert(a, ctx) for a in constants]
new_args = [convert(a, ctx) for a in args]
begin = time.perf_counter()
res = fn(*new_constants, *new_args)
end = time.perf_counter()
return res if not record_time else (res, end - begin)
return _wrapper
import sys
sys.setrecursionlimit(10000)
def compile(func, mod, ctx, tgt, name='default', record_time=False):
"""Compile a relay function into a native library function.
Parameters
----------
func: Expr
The function.
mod: Module
The Module.
ctx: Context
The Context.
tgt: Target
The target
name: String
The name of the target binary library.
record_time: Bool
Time cost to call f?
Returns
-------
result: Function
A function that, when pass in some values,
will convert them to the right format and call the compiled func.
"""
global _LIB
if isinstance(func, GlobalVar):
func = mod[func]
assert isinstance(func, Function)
compiler = AoTCompiler(mod, tgt)
func = compiler.optimize(func)
func = compiler.visit(func)
lib_name, packed_name = lib_and_func_name(name)
constants, source_code = to_source.to_source(mod, func, compiler.gv_map, ctx, packed_name)
lib_name = f"librelay_aot_{_LIB_COUNTER}.so"
library_path = compile_cpp(source_code, lib_name, flags=["-O3"])
_LIB.append(load_lib(library_path))
fn = get_global_func(packed_name)
return _mk_wrapper(fn, ctx, constants, record_time)
|
py | 1a4236e68cd76c1583be6cf087d00044c6b712b0 | #!/opt/local/bin/python2.7
#
# Get lat/lon, in WGS84 (EPSG:4326), from gage point layer (Gage_Loc) for
# gage identified by a source_fea (e.g. USGS Site Number)
#
# Returns "<x>, <y>" or "Gage not found"
#
# Author(s): Brian Miles - [email protected]
# Date: 20130124
#
# Revisions: 20130124: 1.0: First fully working version
#
# Example command line: PYTHONPATH=${PYTHONPATH}:../../NHDPlus2Lib:../../SpatialDataLib ./GetLocationForNHDStreamflowGage.py -i macosx2.cfg -g 01589330
#
import os
import sys
import errno
import argparse
import ConfigParser
from nhdplus2lib.networkanalysis import getLocationForStreamGageByGageSourceFea
# Handle command line options
parser = argparse.ArgumentParser(description='Get NHDPlus2 streamflow gage identifiers for a USGS gage. Outputs a string (reachcode) and a float (measure)')
parser.add_argument('-i', '--configfile', dest='configfile', required=True,
help='The configuration file')
parser.add_argument('-g', '--gageid', dest='gageid', required=True,
help='An integer representing the USGS site identifier')
args = parser.parse_args()
if not os.access(args.configfile, os.R_OK):
raise IOError(errno.EACCES, "Unable to read configuration file %s" %
args.configfile)
config = ConfigParser.RawConfigParser()
config.read(args.configfile)
if not config.has_option('NHDPLUS2', 'PATH_OF_NHDPLUS2_GAGELOC'):
sys.exit("Config file %s does not define option %s in section %s" & \
(args.configfile, 'NHDPLUS2', 'PATH_OF_NHDPLUS2_GAGELOC'))
result = getLocationForStreamGageByGageSourceFea(config, args.gageid)
if result:
print "%s %s" % (result[0], result[1])
else:
print "Gage not found"
|
py | 1a4237a73d49711fb03fe89706b84f53fda4ea81 | import pandas as pd
from pyecharts.components import Table
from pyecharts.options import ComponentTitleOpts
__all__ = ['statistics']
def statistics(model, jupyter=True, path='Model Summary.html', title="Model Summary", subtitle=""):
t = pd.DataFrame([[i.name, i.__class__.__name__, i.trainable, i.dtype, i.input_shape, i.output_shape, i.count_params()] for i in model.layers],
columns=['layer_custom_name', 'layer_object_name', 'trainable', 'dtype', 'input_shape', 'output_shape', 'params'])
# t['output_memory(MB)'] = (t.output_shape.map(lambda x:sum([reduce(lambda y,z:y*z, i[1:]) for i in x]) if isinstance(x, list) else reduce(lambda y,z:y*z, x[1:]))
# *t.dtype.map(lambda x:int(re.sub("\D", "", x))))/32#/1024/1024)
t.loc['total'] = ['', '', '', '', '', '', t.params.sum()]
t['input_shape'] = t.input_shape.map(lambda x:str(x).replace("),(", "),\n(") if isinstance(x, list) else x)
t = t.reset_index().rename(columns={'index':''})
for i in t.columns:
t[i] = t[i].astype(str)
table = Table()
headers = t.columns.tolist()
rows = t.values.tolist()
table.add(headers, rows).set_global_opts(title_opts=ComponentTitleOpts(title=title, subtitle=subtitle))
return table.render_notebook() if jupyter else table.render(path)
|
py | 1a423845c91e50b4bdf7bfee545de882c7cfffc6 | # This scripts demonstrates how to use mitmproxy's filter pattern in inline scripts.
# Usage: mitmdump -s "filt.py FILTER"
import sys
from mitmproxy import filt
def start(context):
if len(sys.argv) != 2:
raise ValueError("Usage: -s 'filt.py FILTER'")
context.filter = filt.parse(sys.argv[1])
def response(context, flow):
if flow.match(context.filter):
print("Flow matches filter:")
print(flow)
|
py | 1a423916459791fb7002815667ac0e62b6fa688f | """Shows images for debugging purposes."""
import cv2
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import sys
___author___ = "Tess Bianchi"
class Debug(object):
"""Class that contains methods that assist with debugging with images."""
def __init__(self, nh=None, w=1000, h=800, total=8, win_name="debug", wait=True):
"""
Initialize the Debug class.
@param w = The width of the image that smaller images are added to
@param h = The height of the image that smaller images are added to
@param win_name = the name of the window that is shown in opencv
@param wait = whether or not to wait after showing the image
"""
self.width = w
self.height = h
self.img = np.zeros((h, w, 3), np.uint8)
self.total = total
self.hor_num = total / 2
self.vert_num = 2
self.max_width = w / self.hor_num
self.max_height = h / self.vert_num
self.wait = wait
self.nh = nh
self.curr_w = 0
self.curr_h = 0
self.num_imgs = 0
self.win_name = win_name
self.name_to_starting = {}
self.bridge = CvBridge()
if nh is not None:
self.base_topic = "/debug/scan_the_code/"
self.topic_to_pub = {}
self.pub = nh.advertise("/debug/scan_the_code/image", Image)
def add_image(self, img, name, wait=33, topic="image"):
"""
Add an image to show to either with a topic or using cv2.imshow.
@param name = a unique key name for the image, use the same name if you want to switch out this image for another
@param wait = the amount of wait time for the imshow image
"""
if topic != "image":
self._add_new_topic(img, name, wait, topic)
return
if self.wait:
wait = 0
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
h, w, r = img.shape
if w > h:
img = cv2.resize(img, (self.max_width, h * self.max_width / w))
if h > w:
img = cv2.resize(img, (w * self.max_height / h, self.max_height))
h, w, r = img.shape
if name not in self.name_to_starting:
if self.num_imgs == self.total:
print "Too many images"
return
self.name_to_starting[name] = (self.curr_w, self.curr_h)
self.num_imgs += 1
self.curr_w += w
if self.num_imgs == self.total / 2:
self.curr_w = 0
self.curr_h = self.max_height
if self.num_imgs > self.total / 2:
self.name_to_starting[name] = (self.curr_w, self.curr_h)
my_w, my_h = self.name_to_starting[name]
self.img[my_h: my_h + h, my_w: my_w + w] = img
if self.nh is None:
cv2.imshow("img", self.img)
if cv2.waitKey(wait) & 0xFF == ord('q'):
cv2.destroyAllWindows()
sys.exit()
else:
self.pub.publish(self.bridge.cv2_to_imgmsg(self.img, "bgr8"))
def _add_new_topic(self, img, name, wait, topic):
pub = None
if topic in self.topic_to_pub.keys():
pub = self.topic_to_pub[topic]
else:
pub = self.nh.advertise("/debug/scan_the_code/" + topic, Image)
self.topic_to_pub[topic] = pub
pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.