ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40f49440f4af625868d13b5bd5c829d841548ad | from .homepage import Homepage
from .notification import Notification
arguments = {
Homepage.name: Homepage,
Notification.name: Notification,
}
|
py | b40f49bc21cf0feed64ac530c328f45144c9155c | # by huhan3
from __future__ import division, print_function, unicode_literals
import numpy as np
from numpy import linalg as la
import array
# This is a helper class for reading pre-trained word vector files
# Now supports 1) Word2Vec (binary file) 2) GloVe (text file)
# Word2Vec default parameter set for that GoogleNews file, change 'encoding' and 'newLine' parameter for normal file
class PreTrainedVectorReader(object):
def __init__(self, vocab, vectors):
self.vocab = vocab
self.vectors = vectors
self.dic = {}
for i, word in enumerate(vocab):
self.dic[word] = i
def __len__(self):
return len(self.dic)
def __getitem__(self, word):
return self.get_vector(word)
def __contains__(self, word):
return word in self.dic
def get_vector(self, word):
try:
index = self.dic[word]
return self.vectors[index]
except KeyError:
return None
def get_word(self, index):
try:
return self.vocab[index]
except IndexError:
return None
def get_index(self, word):
try:
return self.dic[word]
except KeyError:
return None
@classmethod
def from_word2vec_binary(cls, filename, vocabUnicodeSize=78, desired_vocab=None, encoding='ISO-8859-1', newLines=False, unitvector=True):
"""
Parameter:
-----------------------
filename: The file name of the desired binary file
vocabUnicodeSize: Maximum string length (78 default)
desired_vocab: A list (set is better) of words.
All words not in this list will be ignored.
(Note this list can contain bytes or str)
encoding: The codec used for the file
newlines: If there's an empty char bofore a new line
unitvector: If convert vectors into unitvectors
Returns:
-------------------
The PreTrainedVectorReader object
"""
with open(filename, 'rb') as inf:
header = inf.readline() # read header and get size
vocab_size, vector_size = list(map(int, header.split()))
vocab = np.empty(vocab_size, dtype='<U%s' % vocabUnicodeSize) # init vocab (little-endian) and vectors
vectors = np.empty((vocab_size, vector_size), dtype=np.float64)
binary_len = np.dtype(np.float32).itemsize * vector_size # important to know how long a vector is
for i in range(vocab_size):
word = b''
while True:
ch = inf.read(1)
if ch == b' ':
break
word += ch
include = desired_vocab is None or word.decode(encoding) in desired_vocab or word in desired_vocab # check if need to ignore this word
if include:
vocab[i] = word.decode(encoding)
vector = np.fromstring(inf.read(binary_len), dtype=np.float32) # read bytes and convert to vector
if include:
if unitvector:
vectors[i] = (1.0 / la.norm(vector, ord=2)) * vector # convert this vector to unitvector
else:
vectors[i] = vector
if newLines:
inf.read(1) # for normal file, read a empty char to begin a newline, not needed for GoogleNews file
if desired_vocab is not None:
vectors = vectors[vocab != '', :] # this is numpy's vector operation, find out all index of not-empty strings
vocab = vocab[vocab != '']
return cls(vocab=vocab, vectors=vectors)
@classmethod
def from_glove_plain(cls, filename, vocab_size=5000000, vocabUnicodeSize=78, desired_vocab=None, encoding='utf-8', unitvector=False):
"""
Parameter:
-----------------------
filename: The file name of the desired binary file
vocab_size: The maximun number of vocab size, 5000000 default
vocabUnicodeSize: Maximum string length (78 default)
desired_vocab: A list (set is better) of words.
All words not in this list will be ignored.
(Note this list can contain bytes or str)
encoding: The codec used for the file
newlines: If there's an empty char bofore a new line
unitvector: If convert vectors into unitvectors
Returns:
-------------------
The PreTrainedVectorReader object
"""
init = False
vector_size = 0
c = 0
with open(filename, 'rt', encoding=encoding) as inf:
for i, line in enumerate(inf):
raw = line.split(' ')
if not init:
vector_size = len(raw) - 1
vocab = np.empty(vocab_size, dtype='<U%s' % vocabUnicodeSize) # init vocab (little-endian) and vectors
vectors = np.empty((vocab_size, vector_size), dtype=np.float64)
init = True
vector = np.array([float(x) for x in raw[1:]], dtype=np.float64)
if desired_vocab is None or word in desired_vocab:
vocab[c] = raw[0]
if unitvector:
vectors[c] = (1.0 / la.norm(vector, ord=2)) * vector # convert this vector to unitvector
else:
vectors[c] = vector
c += 1
vectors = np.resize(vectors, (c,vector_size)) # resize to remove empty elements
vocab = np.resize(vocab, c)
return cls(vocab=vocab, vectors=vectors)
|
py | b40f49c7fcd319215a3063e3573370f789db6cf5 | """
alebot
------
alebot is a super lean and highly modularized irc bot that lets you
extend it in a python way, using classes and decorators. It supports
both hooks and background tasks in an easy and fail-safe way.
Links
`````
* `source code <https://github.com/alexex/alebot>`_
* `docs <https://alebot.readthedocs.org/`_
"""
from setuptools import setup
setup(
name='alebot',
version='0.1.2',
url='https://github.com/alexex/alebot',
license='MIT',
author='Alexander Jung-Loddenkemper',
author_email='[email protected]',
description='A super lean and highly modularized irc bot',
long_description=__doc__,
packages=['alebot', 'alebot.plugins'],
zip_safe=False,
platforms='any',
install_requires=[
'click'
],
entry_points='''
[console_scripts]
alebot=alebot.cli:run
'''
)
|
py | b40f4a344d706d2c419c0d3ef7d49e4fd6760a67 | # Copyright 2021 Softwerks LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
blueprint = flask.Blueprint("game", __name__)
import chateau.game.routes
|
py | b40f4ab52a4d107a9d603c2b2a641146dc5e2d87 | import unittest
from main import delete_nth
class Test_delete_nth(unittest.TestCase):
def test(self):
self.subTest("Basic tests")
self.assertEqual(delete_nth([20, 37, 20, 21], 1), [20, 37, 21])
self.assertEqual(delete_nth([1, 1, 3, 3, 7, 2, 2, 2, 2], 3), [1, 1, 3, 3, 7, 2, 2, 2])
if __name__ == '__main__':
unittest.main() |
py | b40f4b43afd659bcaed7899b6e79c091e499370b | import builtins
from typing import Any, Generic, List, Callable, Union, Tuple
import numpy as np
import ray
from ray.types import ObjectRef
from ray.data.block import Block, BlockAccessor, \
BlockMetadata, T
from ray.data.impl.arrow_block import ArrowRow
from ray.util.annotations import PublicAPI
from ray.data.impl.util import _check_pyarrow_version
WriteResult = Any
@PublicAPI(stability="beta")
class Datasource(Generic[T]):
"""Interface for defining a custom ``ray.data.Dataset`` datasource.
To read a datasource into a dataset, use ``ray.data.read_datasource()``.
To write to a writable datasource, use ``Dataset.write_datasource()``.
See ``RangeDatasource`` and ``DummyOutputDatasource`` for examples
of how to implement readable and writable datasources.
"""
def prepare_read(self, parallelism: int,
**read_args) -> List["ReadTask[T]"]:
"""Return the list of tasks needed to perform a read.
Args:
parallelism: The requested read parallelism. The number of read
tasks should be as close to this value as possible.
read_args: Additional kwargs to pass to the datasource impl.
Returns:
A list of read tasks that can be executed to read blocks from the
datasource in parallel.
"""
raise NotImplementedError
def do_write(self, blocks: List[ObjectRef[Block]],
metadata: List[BlockMetadata],
**write_args) -> List[ObjectRef[WriteResult]]:
"""Launch Ray tasks for writing blocks out to the datasource.
Args:
blocks: List of data block references. It is recommended that one
write task be generated per block.
metadata: List of block metadata.
write_args: Additional kwargs to pass to the datasource impl.
Returns:
A list of the output of the write tasks.
"""
raise NotImplementedError
def on_write_complete(self, write_results: List[WriteResult],
**kwargs) -> None:
"""Callback for when a write job completes.
This can be used to "commit" a write output. This method must
succeed prior to ``write_datasource()`` returning to the user. If this
method fails, then ``on_write_failed()`` will be called.
Args:
write_results: The list of the write task results.
kwargs: Forward-compatibility placeholder.
"""
pass
def on_write_failed(self, write_results: List[ObjectRef[WriteResult]],
error: Exception, **kwargs) -> None:
"""Callback for when a write job fails.
This is called on a best-effort basis on write failures.
Args:
write_results: The list of the write task result futures.
error: The first error encountered.
kwargs: Forward-compatibility placeholder.
"""
pass
@PublicAPI(stability="beta")
class ReadTask(Callable[[], Block]):
"""A function used to read a block of a dataset.
Read tasks are generated by ``datasource.prepare_read()``, and return
a ``ray.data.Block`` when called. Metadata about the read operation can
be retrieved via ``get_metadata()`` prior to executing the read.
Ray will execute read tasks in remote functions to parallelize execution.
"""
def __init__(self, read_fn: Callable[[], Block], metadata: BlockMetadata):
self._metadata = metadata
self._read_fn = read_fn
def get_metadata(self) -> BlockMetadata:
return self._metadata
def __call__(self) -> Block:
return self._read_fn()
class RangeDatasource(Datasource[Union[ArrowRow, int]]):
"""An example datasource that generates ranges of numbers from [0..n).
Examples:
>>> source = RangeDatasource()
>>> ray.data.read_datasource(source, n=10).take()
... [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def prepare_read(self,
parallelism: int,
n: int,
block_format: str = "list",
tensor_shape: Tuple = (1, )) -> List[ReadTask]:
read_tasks: List[ReadTask] = []
block_size = max(1, n // parallelism)
# Example of a read task. In a real datasource, this would pull data
# from an external system instead of generating dummy data.
def make_block(start: int, count: int) -> Block:
if block_format == "arrow":
return pyarrow.Table.from_arrays(
[np.arange(start, start + count)], names=["value"])
elif block_format == "tensor":
tensor = TensorArray(
np.ones(tensor_shape, dtype=np.int64) * np.expand_dims(
np.arange(start, start + count),
tuple(range(1, 1 + len(tensor_shape)))))
return pyarrow.Table.from_pydict({"value": tensor})
else:
return list(builtins.range(start, start + count))
i = 0
while i < n:
count = min(block_size, n - i)
if block_format == "arrow":
_check_pyarrow_version()
import pyarrow
schema = pyarrow.Table.from_pydict({"value": [0]}).schema
elif block_format == "tensor":
_check_pyarrow_version()
from ray.data.extensions import TensorArray
import pyarrow
tensor = TensorArray(
np.ones(tensor_shape, dtype=np.int64) * np.expand_dims(
np.arange(0, 10), tuple(
range(1, 1 + len(tensor_shape)))))
schema = pyarrow.Table.from_pydict({"value": tensor}).schema
elif block_format == "list":
schema = int
else:
raise ValueError("Unsupported block type", block_format)
read_tasks.append(
ReadTask(
lambda i=i, count=count: make_block(i, count),
BlockMetadata(
num_rows=count,
size_bytes=8 * count,
schema=schema,
input_files=None)))
i += block_size
return read_tasks
class DummyOutputDatasource(Datasource[Union[ArrowRow, int]]):
"""An example implementation of a writable datasource for testing.
Examples:
>>> output = DummyOutputDatasource()
>>> ray.data.range(10).write_datasource(output)
>>> assert output.num_ok == 1
"""
def __init__(self):
# Setup a dummy actor to send the data. In a real datasource, write
# tasks would send data to an external system instead of a Ray actor.
@ray.remote
class DataSink:
def __init__(self):
self.rows_written = 0
self.enabled = True
def write(self, block: Block) -> str:
block = BlockAccessor.for_block(block)
if not self.enabled:
raise ValueError("disabled")
self.rows_written += block.num_rows()
return "ok"
def get_rows_written(self):
return self.rows_written
def set_enabled(self, enabled):
self.enabled = enabled
self.data_sink = DataSink.remote()
self.num_ok = 0
self.num_failed = 0
def do_write(self, blocks: List[ObjectRef[Block]],
metadata: List[BlockMetadata],
**write_args) -> List[ObjectRef[WriteResult]]:
tasks = []
for b in blocks:
tasks.append(self.data_sink.write.remote(b))
return tasks
def on_write_complete(self, write_results: List[WriteResult]) -> None:
assert all(w == "ok" for w in write_results), write_results
self.num_ok += 1
def on_write_failed(self, write_results: List[ObjectRef[WriteResult]],
error: Exception) -> None:
self.num_failed += 1
class RandomIntRowDatasource(Datasource[ArrowRow]):
"""An example datasource that generates rows with random int64 columns.
Examples:
>>> source = RandomIntRowDatasource()
>>> ray.data.read_datasource(source, n=10, num_columns=2).take()
... {'c_0': 1717767200176864416, 'c_1': 999657309586757214}
... {'c_0': 4983608804013926748, 'c_1': 1160140066899844087}
"""
def prepare_read(self, parallelism: int, n: int,
num_columns: int) -> List[ReadTask]:
_check_pyarrow_version()
import pyarrow
read_tasks: List[ReadTask] = []
block_size = max(1, n // parallelism)
def make_block(count: int, num_columns: int) -> Block:
return pyarrow.Table.from_arrays(
np.random.randint(
np.iinfo(np.int64).max,
size=(num_columns, count),
dtype=np.int64),
names=[f"c_{i}" for i in range(num_columns)])
schema = pyarrow.Table.from_pydict(
{f"c_{i}": [0]
for i in range(num_columns)}).schema
i = 0
while i < n:
count = min(block_size, n - i)
read_tasks.append(
ReadTask(
lambda count=count, num_columns=num_columns:
make_block(count, num_columns),
BlockMetadata(
num_rows=count,
size_bytes=8 * count * num_columns,
schema=schema,
input_files=None)))
i += block_size
return read_tasks
|
py | b40f4b912fd7eb1f24a1aeacfd7b0e9ef3df2f46 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 21:08:17 2019
@author: Rashid Haffadi
"""
class Optimizer():
def __init__(self, acf, lr, *args, **kwargs):
self.lr = lr
self.acf = acf #activation function
def init_grads(self, W, X):
self.grads = self.acf.get_grads(W, X)
def one_step(self, W, X):
self.init_grads(W, X)
# self.acf |
py | b40f4fd5d280194efae83fd50c99444c74c32aff | from flask import Flask, request, jsonify, render_template
import server.util as util
app = Flask(__name__, static_url_path="/client", static_folder='../client', template_folder="../client")
@app.route('/', methods=['GET'])
def index():
if request.method=="GET":
return render_template("app.html")
@app.route('/get_location_names',methods=['GET'])
def get_location_names():
response = jsonify(
{
'locations': util.get_location_names()
}
)
response.headers.add('Access-Control-Allow-Origin','*')
return response
@app.route('/predict_home_price',methods=['POST'])
def predict_home_price():
# we will get the data from server/user using form request
total_sqft = float(request.form['total_sqft'])
location = request.form['location']
bath = int(request.form['bath'])
balcony = int(request.form['balcony'])
Bedroom = int(request.form['Bedroom'])
response = jsonify({
'estimated_price': util.get_etimated_prices(location, total_sqft, bath, balcony, Bedroom)
})
return response
if __name__ == '__main__':
print('Starting python flask server for Bangalore Home Price prediction..........')
app.run() |
py | b40f514376e1a4d1040a64715b43b078717fd4b9 | # coding: utf-8
import argparse
import collections
import copy
import io
import itertools
import json
import _jsonnet
import asdl
import attr
import networkx
import tqdm
from seq2struct import datasets
from seq2struct import models
from seq2struct.utils import registry
# Initial units: node and its required product-type fields, recursively
# Eligible triples: node, field_name then
# - opt: None, or type of field
# - seq: None, or if len > 0:
# - sum type: type of first element
# - product type: type of first element (indicates there's more than one element)
# - constant: value
# - neither: type of field, or constant value
#
# Merging
# 1. Replace type of node with something else
# 2. Promote fields of the involved field
# - None: nothing to promote
# - seq, type of first element: all fields of that type
# - neither: type of field
class IdentitySet(collections.abc.MutableSet):
def __init__(self, iterable=()):
self.map = {id(x): x for x in iterable}
def __contains__(self, value):
return id(value) in self.map
def __iter__(self):
return self.map.values()
def __len__(self):
return len(self.map)
def add(self, value):
self.map[id(value)] = value
def discard(self, value):
self.map.pop(id(value))
@attr.s
class TypeInfo:
name = attr.ib()
base_name = attr.ib()
predecessor_name = attr.ib()
predecessor_triple = attr.ib()
# Fields whose values need to be specified in a node of this type.
# OrderedDict or dict. Keys: str if single element, tuple if more than one element
unset_fields = attr.ib()
# Fields whose values are already known.
# dict. Keys: always tuple (even if only one element)
preset_fields = attr.ib()
## Sequential fields which have been ended.
## set. Elements: always tuple (even if only one element)
#depleted_fields = attr.ib()
preset_seq_elem_counts = attr.ib(factory=lambda: collections.Counter())
@attr.s(frozen=True)
class Primitive:
value = attr.ib()
class TreeBPE:
def __init__(self, grammar):
self.grammar = grammar
self.ast_wrapper = grammar.ast_wrapper
self.type_infos = {
k: TypeInfo(
name=k,
base_name=k,
predecessor_name=k,
predecessor_triple=None,
unset_fields=collections.OrderedDict((field.name, field) for field in v.fields),
preset_fields={},
) for k, v in self.ast_wrapper.singular_types.items()
}
self.type_graph = networkx.DiGraph()
for k in self.ast_wrapper.singular_types:
self.type_graph.add_node(k)
self.created_types = []
self.pre_iteration_counts = []
self.iterations_finished = 0
def run_iteration(self, trees):
triple_occurrences, node_type_counts = self.count_triples(trees)
self.pre_iteration_counts.append(node_type_counts)
# Most frequent
most_freq_triple, most_freq_occurrences = max(
triple_occurrences.items(), key=lambda kv: len(kv[1]))
if len(most_freq_occurrences) == 1:
raise Exception('No more work to do!')
existing_type_name, field_name, field_info = most_freq_triple
tuple_name = field_name if isinstance(field_name, tuple) else (field_name,)
existing_type = self.type_infos[existing_type_name]
existing_field = existing_type.unset_fields[field_name]
promoted_fields = []
promoted_seq_elem_counts = collections.Counter()
promoted_preset_fields = {}
if isinstance(field_info, Primitive) or field_info is None:
pass
else:
# Figure out which fields of type `field_info` should be promoted
# Example:
# most_freq_triple = ('Call', 'func', 'Name')
# field_info = 'Name'
# type_infos['Name'].unset_fields = {'id': Field(identifier, id)}
for is_preset, (field_field_name, field_field) in itertools.chain(
zip(itertools.repeat(False), self.type_infos[field_info].unset_fields.items()),
zip(itertools.repeat(True), self.type_infos[field_info].preset_fields.items())):
if isinstance(field_field_name, tuple):
field_field_tuple_name = field_field_name
else:
field_field_tuple_name = (field_field_name,)
if existing_field.seq:
suffix = (existing_type.preset_seq_elem_counts[tuple_name],) + field_field_tuple_name
else:
suffix = field_field_tuple_name
new_name = tuple_name + suffix
if isinstance(field_field, asdl.Field):
new_field = asdl.Field(
type=field_field.type,
name=new_name,
seq=field_field.seq,
opt=field_field.opt)
else:
new_field = field_field
if is_preset:
promoted_preset_fields[new_name] = new_field
else:
promoted_fields.append((field_field, new_field))
seq_elem_count = self.type_infos[field_info].preset_seq_elem_counts[field_field_tuple_name]
if seq_elem_count:
promoted_seq_elem_counts[new_name] = seq_elem_count
# Create a new type
new_preset_fields = {**existing_type.preset_fields, **promoted_preset_fields}
new_preset_seq_elem_counts = existing_type.preset_seq_elem_counts + promoted_seq_elem_counts
if existing_field.seq and field_info is not None:
new_preset_fields[
tuple_name + (new_preset_seq_elem_counts[tuple_name],)] = field_info
new_preset_seq_elem_counts[tuple_name] += 1
else:
new_preset_fields[tuple_name] = field_info
new_unset_fields = {
**{f.name: f for old_field, f in promoted_fields},
**existing_type.unset_fields
}
if field_info is None or not existing_field.seq:
# Only unset if...
# - field is not sequential
# - field has been set to None, meaning the end of a sequence
del new_unset_fields[field_name]
new_type = TypeInfo(
name='Type{:04d}_{}'.format(self.iterations_finished, existing_type.base_name),
base_name=existing_type.base_name,
predecessor_name=existing_type.name,
predecessor_triple=most_freq_triple,
unset_fields=new_unset_fields,
preset_fields=new_preset_fields,
preset_seq_elem_counts = new_preset_seq_elem_counts
)
self.type_infos[new_type.name] = new_type
self.created_types.append(new_type)
self.type_graph.add_edge(new_type.name, existing_type.name)
self.iterations_finished += 1
# Tracks which occurrences have been removed due to promotion.
discarded = IdentitySet()
for occ in most_freq_occurrences:
if occ in discarded:
continue
occ['_type'] = new_type.name
def delete_obsoleted_field():
if existing_field.seq:
# todo: change 0 if we can promote other elements
del occ[field_name][0]
if not occ[field_name]:
del occ[field_name]
else:
del occ[field_name]
if isinstance(field_info, Primitive):
delete_obsoleted_field()
elif field_info is None:
pass
else:
if existing_field.seq:
# todo: change 0 if we can promote other elements
value_to_promote = occ[field_name][0]
else:
value_to_promote = occ[field_name]
delete_obsoleted_field()
discarded.add(value_to_promote)
for old_field, new_field in promoted_fields:
if old_field.name not in value_to_promote:
assert old_field.opt or old_field.seq
continue
occ[new_field.name] = value_to_promote[old_field.name]
assert occ[new_field.name]
def finish(self, trees):
_, node_type_counts = self.count_triples(trees)
self.pre_iteration_counts.append(node_type_counts)
def count_triples(self, trees):
triple_occurrences = collections.defaultdict(list)
node_type_counts = collections.Counter()
for tree in trees:
queue = collections.deque([tree])
while queue:
node = queue.pop()
node_type_counts[node['_type']] += 1
for field_name, field in self.type_infos[node['_type']].unset_fields.items():
if field_name in node:
field_value = node[field_name]
is_primitive = field.type in self.ast_wrapper.primitive_types
if field.seq:
relevant_value = field_value[0]
if not is_primitive:
queue.extend(field_value)
else:
relevant_value = field_value
if not is_primitive:
queue.append(field_value)
if is_primitive:
field_info = Primitive(relevant_value)
else:
field_info = relevant_value['_type']
else:
assert field.seq or field.opt
field_info = None
triple_occurrences[node['_type'], field_name, field_info].append(node)
for field_name in self.type_infos[node['_type']].preset_fields:
assert field_name not in node
return triple_occurrences, node_type_counts
def visualize(self, root_type: TypeInfo):
result = io.StringIO()
def print_type(this_type, parent_lasts, field_prefix):
def print_child(s, last, parent_lasts):
for parent_last in parent_lasts:
if parent_last:
result.write(' ')
else:
result.write('│ ')
if last:
result.write('└─')
else:
result.write('├─')
print(s, file=result)
if parent_lasts:
print_child(this_type.base_name, parent_lasts[-1], parent_lasts[:-1])
else:
print(this_type.base_name, file=result)
fields = self.type_infos[this_type.base_name].unset_fields
for i, field in enumerate(fields.values()):
last_field = i + 1 == len(fields)
# Print the name of the field
print_child(
'{} [{}]{}'.format(
field.name, field.type, '?' if field.opt else '*' if field.seq else ''),
last_field,
parent_lasts)
field_path = field_prefix + (field.name,)
parent_lasts_for_field = parent_lasts + (last_field,)
if field.opt and field_path in root_type.preset_fields and root_type.preset_fields[field_path] is None:
# Don't print '??' because we've already determined that the field should be unset
pass
elif field.seq:
# Print all the elements
if field_path in root_type.preset_fields:
assert root_type.preset_fields[field_path] is None
seq_complete = True
else:
seq_complete = False
preset_count = root_type.preset_seq_elem_counts[field_path]
for i in range(preset_count):
last_seq_elem = seq_complete and i + 1 == preset_count
seq_elem_path = field_path + (i,)
field_value = root_type.preset_fields[seq_elem_path]
if isinstance(field_value, Primitive):
print_child(
repr(field_value.value),
last_seq_elem,
parent_lasts_for_seq)
else:
print_type(
self.type_infos[field_value],
parent_lasts_for_field + (last_seq_elem,),
seq_elem_path)
if not seq_complete:
print_child('??', True, parent_lasts_for_field)
else:
if field_path not in root_type.preset_fields:
print_child('??', True, parent_lasts_for_field)
else:
field_value = root_type.preset_fields[field_path]
if isinstance(field_value, Primitive):
print_child(repr(field_value.value), True, parent_lasts_for_field)
else:
print_type(
self.type_infos[field_value],
parent_lasts_for_field + (True,),
field_path)
print_type(root_type, (), ())
return result.getvalue()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
parser.add_argument('--section', default='train')
parser.add_argument('--num-iters', type=int, default=100)
parser.add_argument('--vis-out')
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
# 0. Construct preprocessors
model_preproc = registry.instantiate(
registry.lookup('model', config['model']).Preproc,
config['model'])
model_preproc.load()
# 3. Get training data somewhere
preproc_data = model_preproc.dataset(args.section)
all_trees = [dec.tree for enc, dec in preproc_data]
tree_bpe = TreeBPE(model_preproc.dec_preproc.grammar)
for i in tqdm.tqdm(range(args.num_iters), dynamic_ncols=True):
tree_bpe.run_iteration(all_trees)
tree_bpe.finish(all_trees)
print('Finished')
if args.vis_out:
f = open(args.vis_out, 'w')
f.write('''# Documentation
#
# Idiom trees are printed like this:
# NodeType
# ├─field1 [field1_type]
# ├─field2 [field2_type]?
# └─field3 [field3_type]*
# ? indicates the field is optional.
# * indicates the field is sequential.
#
# If a field has a known primitive value, it is written like this:
# └─field3 [str]
# └─'value'
#
# If a field has a known type for its value, it is written like this:
# └─field3 [field3_type]
# └─Field3NodeType
# └─...
#
# If a field:
# - does not have a known value, or
# - is sequential and the idiom allows for further entries at the end
# it is written like this:
# └─field3 [field3_type]
# └─??
#
# If a field:
# - is optional and known to lack a value, or
# - is sequential and the idiom does not allow for further entries at the end
# then there is no ??.
Initial node type frequency:
''')
for k, v in tree_bpe.pre_iteration_counts[0].most_common():
print('- {}: {}'.format(k, v), file=f)
print(file=f)
for i, type_info in enumerate(tree_bpe.created_types):
print('# Idiom {} [{}]'.format(i, type_info.name), file=f)
print('# Descended from {} by setting {} to {}'.format(*type_info.predecessor_triple), file=f)
print('# Frequency at creation: {}'.format(tree_bpe.pre_iteration_counts[i + 1][type_info.name]), file=f)
print(tree_bpe.visualize(type_info), file=f)
f.close()
else:
import IPython; IPython.embed()
if __name__ == '__main__':
main()
# ast_wrapper = grammar.ast_wrapper
#
# # TODO: Revive the following
# ## Preprocess the grammar
# ## Create initial units: node and its required product-type fields, recursively
# #units = {name: {} for name in ast_wrapper.singular_types}
# #for name, cons in ast_wrapper.singular_types.items():
# # unit_fields = units[name]
# # for field in cons.fields:
# # if not field.seq and not field.opt and field.type in ast_wrapper.singular_types:
# # unit_fields[field.name] = units[field.type]
#
# # field name syntax:
# # (field_name{1}, ..., field_name{k}, i, field_name{k+1}, ..., field_name{n})
#
# type_infos = {
# k: TypeInfo(
# name=k,
# base_name=k,
# predecessor_name=k,
# unset_fields={field.name: field for field in v.fields},
# preset_fields={}
# ) for k, v in ast_wrapper.singular_types.items()
# }
#
# # Count types
# for iteration in range(100): |
py | b40f51730a9b9a10c6b0beb60a135202c0beda70 | # coding: utf-8
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ks_api_client.api_client import ApiClient
from ks_api_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class MarginTradingApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cancel_mtf_order(self, consumerKey, sessionToken, orderId, **kwargs): # noqa: E501
"""Cancel an order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_mtf_order(consumerKey, sessionToken, orderId, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param orderId: Order ID to cancel. (required)
:type orderId: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.cancel_mtf_order_with_http_info(consumerKey, sessionToken, orderId, **kwargs) # noqa: E501
def cancel_mtf_order_with_http_info(self, consumerKey, sessionToken, orderId, **kwargs): # noqa: E501
"""Cancel an order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_mtf_order_with_http_info(consumerKey, sessionToken, orderId, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param orderId: Order ID to cancel. (required)
:type orderId: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'orderId'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_mtf_order" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `cancel_mtf_order`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `cancel_mtf_order`") # noqa: E501
# verify the required parameter 'orderId' is set
if self.api_client.client_side_validation and ('orderId' not in local_var_params or # noqa: E501
local_var_params['orderId'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `orderId` when calling `cancel_mtf_order`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orderId' in local_var_params:
path_params['orderId'] = local_var_params['orderId'] # noqa: E501
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/orders/1.0/order/mtf/{orderId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def modify_mtf_order(self, consumerKey, sessionToken, ExistingMTFOrder, **kwargs): # noqa: E501
"""Modify an existing MTF order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_mtf_order(consumerKey, sessionToken, ExistingMTFOrder, async_req=True)
>>> result = thread.get()
:param consumerKey: Unique ID for your application (required)
:type consumerKey: str
:param sessionToken: Session ID for your application (required)
:type sessionToken: str
:param ExistingMTFOrder: (required)
:type ExistingMTFOrder: ExistingMTFOrder
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.modify_mtf_order_with_http_info(consumerKey, sessionToken, ExistingMTFOrder, **kwargs) # noqa: E501
def modify_mtf_order_with_http_info(self, consumerKey, sessionToken, ExistingMTFOrder, **kwargs): # noqa: E501
"""Modify an existing MTF order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_mtf_order_with_http_info(consumerKey, sessionToken, ExistingMTFOrder, async_req=True)
>>> result = thread.get()
:param consumerKey: Unique ID for your application (required)
:type consumerKey: str
:param sessionToken: Session ID for your application (required)
:type sessionToken: str
:param ExistingMTFOrder: (required)
:type ExistingMTFOrder: ExistingMTFOrder
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'ExistingMTFOrder'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_mtf_order" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `modify_mtf_order`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `modify_mtf_order`") # noqa: E501
# verify the required parameter 'ExistingMTFOrder' is set
if self.api_client.client_side_validation and ('ExistingMTFOrder' not in local_var_params or # noqa: E501
local_var_params['ExistingMTFOrder'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ExistingMTFOrder` when calling `modify_mtf_order`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ExistingMTFOrder' in local_var_params:
body_params = local_var_params['ExistingMTFOrder']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/orders/1.0/order/mtf', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def place_new_mtf_order(self, consumerKey, sessionToken, NewMTFOrder, **kwargs): # noqa: E501
"""Place a New MTF order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.place_new_mtf_order(consumerKey, sessionToken, NewMTFOrder, async_req=True)
>>> result = thread.get()
:param consumerKey: Unique ID for your application (required)
:type consumerKey: str
:param sessionToken: Session ID Generated with successful login. (required)
:type sessionToken: str
:param NewMTFOrder: (required)
:type NewMTFOrder: NewMTFOrder
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.place_new_mtf_order_with_http_info(consumerKey, sessionToken, NewMTFOrder, **kwargs) # noqa: E501
def place_new_mtf_order_with_http_info(self, consumerKey, sessionToken, NewMTFOrder, **kwargs): # noqa: E501
"""Place a New MTF order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.place_new_mtf_order_with_http_info(consumerKey, sessionToken, NewMTFOrder, async_req=True)
>>> result = thread.get()
:param consumerKey: Unique ID for your application (required)
:type consumerKey: str
:param sessionToken: Session ID Generated with successful login. (required)
:type sessionToken: str
:param NewMTFOrder: (required)
:type NewMTFOrder: NewMTFOrder
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'NewMTFOrder'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method place_new_mtf_order" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `place_new_mtf_order`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `place_new_mtf_order`") # noqa: E501
# verify the required parameter 'NewMTFOrder' is set
if self.api_client.client_side_validation and ('NewMTFOrder' not in local_var_params or # noqa: E501
local_var_params['NewMTFOrder'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `NewMTFOrder` when calling `place_new_mtf_order`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'NewMTFOrder' in local_var_params:
body_params = local_var_params['NewMTFOrder']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/orders/1.0/order/mtf', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
py | b40f517350ec1eedd283c2fe5fb59ee7ba64343c | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe import _
from frappe.modules.export_file import export_to_files
from frappe.model.document import Document
from frappe.model.rename_doc import rename_doc
from frappe.desk.desktop import save_new_widget
from frappe.desk.utils import validate_route_conflict
from json import loads
class Workspace(Document):
def validate(self):
if (self.public and not is_workspace_manager() and not disable_saving_as_public()):
frappe.throw(_("You need to be Workspace Manager to edit this document"))
validate_route_conflict(self.doctype, self.name)
try:
if not isinstance(loads(self.content), list):
raise
except Exception:
frappe.throw(_("Content data shoud be a list"))
def on_update(self):
if disable_saving_as_public():
return
if frappe.conf.developer_mode and self.module and self.public:
export_to_files(record_list=[['Workspace', self.name]], record_module=self.module)
@staticmethod
def get_module_page_map():
pages = frappe.get_all("Workspace", fields=["name", "module"], filters={'for_user': ''}, as_list=1)
return { page[1]: page[0] for page in pages if page[1] }
def get_link_groups(self):
cards = []
current_card = frappe._dict({
"label": "Link",
"type": "Card Break",
"icon": None,
"hidden": False,
})
card_links = []
for link in self.links:
link = link.as_dict()
if link.type == "Card Break":
if card_links and (not current_card.get('only_for') or current_card.get('only_for') == frappe.get_system_settings('country')):
current_card['links'] = card_links
cards.append(current_card)
current_card = link
card_links = []
else:
card_links.append(link)
current_card['links'] = card_links
cards.append(current_card)
return cards
def build_links_table_from_card(self, config):
for idx, card in enumerate(config):
links = loads(card.get('links'))
# remove duplicate before adding
for idx, link in enumerate(self.links):
if link.label == card.get('label') and link.type == 'Card Break':
del self.links[idx : idx + link.link_count + 1]
self.append('links', {
"label": card.get('label'),
"type": "Card Break",
"icon": card.get('icon'),
"hidden": card.get('hidden') or False,
"link_count": card.get('link_count'),
"idx": 1 if not self.links else self.links[-1].idx + 1
})
for link in links:
self.append('links', {
"label": link.get('label'),
"type": "Link",
"link_type": link.get('link_type'),
"link_to": link.get('link_to'),
"onboard": link.get('onboard'),
"only_for": link.get('only_for'),
"dependencies": link.get('dependencies'),
"is_query_report": link.get('is_query_report'),
"idx": self.links[-1].idx + 1
})
def disable_saving_as_public():
return frappe.flags.in_install or \
frappe.flags.in_patch or \
frappe.flags.in_test or \
frappe.flags.in_fixtures or \
frappe.flags.in_migrate
def get_link_type(key):
key = key.lower()
link_type_map = {
"doctype": "DocType",
"page": "Page",
"report": "Report"
}
if key in link_type_map:
return link_type_map[key]
return "DocType"
def get_report_type(report):
report_type = frappe.get_value("Report", report, "report_type")
return report_type in ["Query Report", "Script Report", "Custom Report"]
@frappe.whitelist()
def new_page(new_page):
if not loads(new_page):
return
page = loads(new_page)
if page.get("public") and not is_workspace_manager():
return
doc = frappe.new_doc('Workspace')
doc.title = page.get('title')
doc.icon = page.get('icon')
doc.content = page.get('content')
doc.parent_page = page.get('parent_page')
doc.label = page.get('label')
doc.for_user = page.get('for_user')
doc.public = page.get('public')
doc.sequence_id = last_sequence_id(doc) + 1
doc.save(ignore_permissions=True)
return doc
@frappe.whitelist()
def save_page(title, public, new_widgets, blocks):
public = frappe.parse_json(public)
filters = {
'public': public,
'label': title
}
if not public:
filters = {
'for_user': frappe.session.user,
'label': title + "-" + frappe.session.user
}
pages = frappe.get_list("Workspace", filters=filters)
if pages:
doc = frappe.get_doc("Workspace", pages[0])
doc.content = blocks
doc.save(ignore_permissions=True)
save_new_widget(doc, title, blocks, new_widgets)
return {"name": title, "public": public, "label": doc.label}
@frappe.whitelist()
def update_page(name, title, icon, parent, public):
public = frappe.parse_json(public)
doc = frappe.get_doc("Workspace", name)
filters = {
'parent_page': doc.title,
'public': doc.public
}
child_docs = frappe.get_list("Workspace", filters=filters)
if doc:
doc.title = title
doc.icon = icon
doc.parent_page = parent
if doc.public != public:
doc.sequence_id = frappe.db.count('Workspace', {'public':public}, cache=True)
doc.public = public
doc.for_user = '' if public else doc.for_user or frappe.session.user
doc.label = '{0}-{1}'.format(title, doc.for_user) if doc.for_user else title
doc.save(ignore_permissions=True)
if name != doc.label:
rename_doc("Workspace", name, doc.label, force=True, ignore_permissions=True)
# update new name and public in child pages
if child_docs:
for child in child_docs:
child_doc = frappe.get_doc("Workspace", child.name)
child_doc.parent_page = doc.title
child_doc.public = doc.public
child_doc.save(ignore_permissions=True)
return {"name": doc.title, "public": doc.public, "label": doc.label}
@frappe.whitelist()
def duplicate_page(page_name, new_page):
if not loads(new_page):
return
new_page = loads(new_page)
if new_page.get("is_public") and not is_workspace_manager():
return
old_doc = frappe.get_doc("Workspace", page_name)
doc = frappe.copy_doc(old_doc)
doc.title = new_page.get('title')
doc.icon = new_page.get('icon')
doc.parent_page = new_page.get('parent') or ''
doc.public = new_page.get('is_public')
doc.for_user = ''
doc.label = doc.title
if not doc.public:
doc.for_user = doc.for_user or frappe.session.user
doc.label = '{0}-{1}'.format(doc.title, doc.for_user)
doc.name = doc.label
if old_doc.public == doc.public:
doc.sequence_id += 0.1
else:
doc.sequence_id = last_sequence_id(doc) + 1
doc.insert(ignore_permissions=True)
return doc
@frappe.whitelist()
def delete_page(page):
if not loads(page):
return
page = loads(page)
if page.get("public") and not is_workspace_manager():
return
if frappe.db.exists("Workspace", page.get("name")):
frappe.get_doc("Workspace", page.get("name")).delete(ignore_permissions=True)
return {"name": page.get("name"), "public": page.get("public"), "title": page.get("title")}
@frappe.whitelist()
def sort_pages(sb_public_items, sb_private_items):
if not loads(sb_public_items) and not loads(sb_private_items):
return
sb_public_items = loads(sb_public_items)
sb_private_items = loads(sb_private_items)
workspace_public_pages = get_page_list(['name', 'title'], {'public': 1})
workspace_private_pages = get_page_list(['name', 'title'], {'for_user': frappe.session.user})
if sb_private_items:
return sort_page(workspace_private_pages, sb_private_items)
if sb_public_items and is_workspace_manager():
return sort_page(workspace_public_pages, sb_public_items)
return False
def sort_page(workspace_pages, pages):
for seq, d in enumerate(pages):
for page in workspace_pages:
if page.title == d.get('title'):
doc = frappe.get_doc('Workspace', page.name)
doc.sequence_id = seq + 1
doc.parent_page = d.get('parent_page') or ""
doc.save(ignore_permissions=True)
break
return True
def last_sequence_id(doc):
doc_exists = frappe.db.exists({
'doctype': 'Workspace',
'public': doc.public,
'for_user': doc.for_user
})
if not doc_exists:
return 0
return frappe.db.get_list('Workspace',
fields=['sequence_id'],
filters={
'public': doc.public,
'for_user': doc.for_user
},
order_by="sequence_id desc"
)[0].sequence_id
def get_page_list(fields, filters):
return frappe.get_list("Workspace", fields=fields, filters=filters, order_by='sequence_id asc')
def is_workspace_manager():
return "Workspace Manager" in frappe.get_roles()
|
py | b40f5186f2c8a01c230917daa1477bb0f302dccc | from __future__ import absolute_import, unicode_literals
import os
import mock
import tornado.testing
import tornado.wsgi
import mopidy
from mopidy.http import actor, handlers
class HttpServerTest(tornado.testing.AsyncHTTPTestCase):
def get_config(self):
return {
'http': {
'hostname': '127.0.0.1',
'port': 6680,
'zeroconf': '',
'allowed_origins': [],
}
}
def get_app(self):
core = mock.Mock()
core.get_version = mock.MagicMock(name='get_version')
core.get_version.return_value = mopidy.__version__
testapps = [dict(name='testapp')]
teststatics = [dict(name='teststatic')]
apps = [{
'name': 'mopidy',
'factory': handlers.make_mopidy_app_factory(testapps, teststatics),
}]
http_server = actor.HttpServer(
config=self.get_config(), core=core, sockets=[],
apps=apps, statics=[])
return tornado.web.Application(http_server._get_request_handlers())
class RootRedirectTest(HttpServerTest):
def test_should_redirect_to_mopidy_app(self):
response = self.fetch('/', method='GET', follow_redirects=False)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/mopidy/')
class MopidyAppTest(HttpServerTest):
def test_should_return_index(self):
response = self.fetch('/mopidy/', method='GET')
body = tornado.escape.to_unicode(response.body)
self.assertIn(
'This web server is a part of the Mopidy music server.', body)
self.assertIn('testapp', body)
self.assertIn('teststatic', body)
self.assertEqual(
response.headers['X-Mopidy-Version'], mopidy.__version__)
self.assertEqual(response.headers['Cache-Control'], 'no-cache')
def test_without_slash_should_redirect(self):
response = self.fetch('/mopidy', method='GET', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/mopidy/')
def test_should_return_static_files(self):
response = self.fetch('/mopidy/mopidy.css', method='GET')
self.assertIn(
'html {',
tornado.escape.to_unicode(response.body))
self.assertEqual(
response.headers['X-Mopidy-Version'], mopidy.__version__)
self.assertEqual(response.headers['Cache-Control'], 'no-cache')
class MopidyWebSocketHandlerTest(HttpServerTest):
def test_should_return_ws(self):
response = self.fetch('/mopidy/ws', method='GET')
self.assertEqual(
'Can "Upgrade" only to "WebSocket".',
tornado.escape.to_unicode(response.body))
def test_should_return_ws_old(self):
response = self.fetch('/mopidy/ws/', method='GET')
self.assertEqual(
'Can "Upgrade" only to "WebSocket".',
tornado.escape.to_unicode(response.body))
class MopidyRPCHandlerTest(HttpServerTest):
def test_should_return_rpc_error(self):
cmd = tornado.escape.json_encode({'action': 'get_version'})
response = self.fetch('/mopidy/rpc', method='POST', body=cmd, headers={
'Content-Type': 'application/json'})
self.assertEqual(
{'jsonrpc': '2.0', 'id': None, 'error':
{'message': 'Invalid Request', 'code': -32600,
'data': '"jsonrpc" member must be included'}},
tornado.escape.json_decode(response.body))
def test_should_return_parse_error(self):
cmd = '{[[[]}'
response = self.fetch('/mopidy/rpc', method='POST', body=cmd, headers={
'Content-Type': 'application/json'})
self.assertEqual(
{'jsonrpc': '2.0', 'id': None, 'error':
{'message': 'Parse error', 'code': -32700}},
tornado.escape.json_decode(response.body))
def test_should_return_mopidy_version(self):
cmd = tornado.escape.json_encode({
'method': 'core.get_version',
'params': [],
'jsonrpc': '2.0',
'id': 1,
})
response = self.fetch('/mopidy/rpc', method='POST', body=cmd, headers={
'Content-Type': 'application/json'})
self.assertEqual(
{'jsonrpc': '2.0', 'id': 1, 'result': mopidy.__version__},
tornado.escape.json_decode(response.body))
def test_should_return_extra_headers(self):
response = self.fetch('/mopidy/rpc', method='HEAD')
self.assertIn('Accept', response.headers)
self.assertIn('X-Mopidy-Version', response.headers)
self.assertIn('Cache-Control', response.headers)
self.assertIn('Content-Type', response.headers)
def test_should_require_correct_content_type(self):
cmd = tornado.escape.json_encode({
'method': 'core.get_version',
'params': [],
'jsonrpc': '2.0',
'id': 1,
})
response = self.fetch('/mopidy/rpc', method='POST', body=cmd, headers={
'Content-Type': 'text/plain'})
self.assertEqual(response.code, 415)
self.assertEqual(
response.reason, 'Content-Type must be application/json')
def test_different_origin_returns_access_denied(self):
response = self.fetch('/mopidy/rpc', method='OPTIONS', headers={
'Host': 'me:6680', 'Origin': 'http://evil:666'})
self.assertEqual(response.code, 403)
self.assertEqual(
response.reason, 'Access denied for origin http://evil:666')
def test_same_origin_returns_cors_headers(self):
response = self.fetch('/mopidy/rpc', method='OPTIONS', headers={
'Host': 'me:6680', 'Origin': 'http://me:6680'})
self.assertEqual(
response.headers['Access-Control-Allow-Origin'], 'http://me:6680')
self.assertEqual(
response.headers['Access-Control-Allow-Headers'], 'Content-Type')
class HttpServerWithStaticFilesTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
config = {
'http': {
'hostname': '127.0.0.1',
'port': 6680,
'zeroconf': '',
}
}
core = mock.Mock()
statics = [dict(name='static', path=os.path.dirname(__file__))]
http_server = actor.HttpServer(
config=config, core=core, sockets=[], apps=[], statics=statics)
return tornado.web.Application(http_server._get_request_handlers())
def test_without_slash_should_redirect(self):
response = self.fetch('/static', method='GET', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/static/')
def test_can_serve_static_files(self):
response = self.fetch('/static/test_server.py', method='GET')
self.assertEqual(200, response.code)
self.assertEqual(
response.headers['X-Mopidy-Version'], mopidy.__version__)
self.assertEqual(
response.headers['Cache-Control'], 'no-cache')
def wsgi_app_factory(config, core):
def wsgi_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return ['Hello, world!\n']
return [
('(.*)', tornado.web.FallbackHandler, {
'fallback': tornado.wsgi.WSGIContainer(wsgi_app),
}),
]
class HttpServerWithWsgiAppTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
config = {
'http': {
'hostname': '127.0.0.1',
'port': 6680,
'zeroconf': '',
}
}
core = mock.Mock()
apps = [{
'name': 'wsgi',
'factory': wsgi_app_factory,
}]
http_server = actor.HttpServer(
config=config, core=core, sockets=[], apps=apps, statics=[])
return tornado.web.Application(http_server._get_request_handlers())
def test_without_slash_should_redirect(self):
response = self.fetch('/wsgi', method='GET', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/wsgi/')
def test_can_wrap_wsgi_apps(self):
response = self.fetch('/wsgi/', method='GET')
self.assertEqual(200, response.code)
self.assertIn(
'Hello, world!', tornado.escape.to_unicode(response.body))
|
py | b40f519faa02a7f82f3fa6af4b19d092657587fd | "fdf functions"
from encodings import utf_8
import io
fdf_head = """<?xml version="1.0" encoding="UTF-8"?>
<xfdf xmlns="http://ns.adobe.com/xfdf/" xml:space="preserve">
<fields>
"""
fdf_tail = """
</fields>
</xfdf>
"""
def generate_fdf(fields, data):
"generate an xfdf with to fill out form fields"
fdf = io.StringIO()
fdf.write(fdf_head)
fdf.write("\n".join(fdf_fields(fields, data)))
fdf.write(fdf_tail)
return fdf.getvalue()
def fdf_fields(fields, data):
"format xfdf fields"
template = " <field name=\"{field_name}\">\n <value>{data}</value>\n </field>"
for n, d in data.items():
field_def = fields.get(n)
if field_def:
field_name = field_def.get("name")
if field_name:
yield template.format(field_name=field_name, data=d)
|
py | b40f52f35180f985146ef6975e1396e763701f7d | import sys
sys.path.append("./qcnn")
import os
#Activate the cuda env
import glob
import time
import pickle
from qcnn.small_qsr import gen_train_from_wave
datasetPath = "/ceph/mstrobl/dataset"
waveformPath = "/ceph/mstrobl/waveforms"
featurePath = "/ceph/mstrobl/features/"
def gen_train(labels, train_audio_path, outputPath, samplingRate=16000, port=1):
global sr
sr = samplingRate
all_wave = list()
for label in labels:
datasetLabelFiles = glob.glob(f"{train_audio_path}/{label}/*.wav")
portDatsetLabelFiles = datasetLabelFiles[0::port]
print(f"\nUsing {len(portDatsetLabelFiles)} out of {len(datasetLabelFiles)} files for label '{label}'\n")
with Pool(PoolSize) as p:
temp_waves = p.map(poolProcess, portDatsetLabelFiles)
all_wave.append(temp_waves)
tid = time.time()
print(f"Finished generating waveforms at {tid}")
with open(f"{waveformPath}/waveforms{tid}.pckl", 'wb') as fid:
pickle.dump(all_wave, fid, pickle.HIGHEST_PROTOCOL)
with open(f"{waveformPath}/labels{tid}.pckl", 'wb') as fid:
pickle.dump(labels, fid, pickle.HIGHEST_PROTOCOL)
print(f"Finished dumping cache. Starting Feature export")
return gen_train_from_wave(all_wave=all_wave, all_label=labels, output=outputPath)
if __name__ == '__main__':
print(f"\n\n\n-----------------------\n\n\n")
print(f"Generate Feature From Wave @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
waveformFiles = glob.glob(f"{waveformPath}/waveforms*.pckl")
waveformFiles.sort(key=os.path.getmtime)
print(f"Using file {waveformFiles[-1]}")
labelFiles = glob.glob(f"{waveformPath}/labels*.pckl")
labelFiles.sort(key=os.path.getmtime)
print(f"Using file {labelFiles[-1]}")
with open(waveformFiles[-1], 'rb') as fid:
all_wave = pickle.load(fid)
with open(labelFiles[-1], 'rb') as fid:
all_labels = pickle.load(fid)
# gen_train(labels, datasetPath, featurePath, port=10)
gen_train_from_wave(all_wave=all_wave, all_label=all_labels, output=featurePath) |
py | b40f531cbc8a7f9ce4e2ccc620b8d30be8d432d6 | class ConsumerConfig:
def __init__(self, url, exchange, exchange_type, routing_key, queue_name):
self.url = url
self.exchange = exchange
self.exchange_type = exchange_type
self.routing_key = routing_key
self.queue_name = queue_name
|
py | b40f5361edd511da4ac26cfa42857ecca6100342 | #!/usr/bin/env python3
"""Tests for SSML"""
import sys
import unittest
from gruut import sentences
from gruut.utils import print_graph
class SSMLTestCase(unittest.TestCase):
"""Test cases for SSML"""
def test_wikipedia_example(self):
"""Test SSML example from Wikipedia"""
text = """<?xml version="1.0"?>
<speak xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:dc="http://purl.org/dc/elements/1.1/"
version="1.0">
<metadata>
<dc:title xml:lang="en">Telephone Menu: Level 1</dc:title>
</metadata>
<p>
<s xml:lang="en-US">
<voice name="David" gender="male" age="25">
For English, press <emphasis>one</emphasis>.
</voice>
</s>
<s xml:lang="es-MX">
<voice name="Miguel" gender="male" age="25">
Para español, oprima el <emphasis>dos</emphasis>.
</voice>
</s>
</p>
</speak>"""
results = [
(w.sent_idx, w.idx, w.lang, w.voice, w.text)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[
(0, 0, "en-US", "David", "For"),
(0, 1, "en-US", "David", "English"),
(0, 2, "en-US", "David", ","),
(0, 3, "en-US", "David", "press"),
(0, 4, "en-US", "David", "one"),
(0, 5, "en-US", "David", "."),
(1, 0, "es-MX", "Miguel", "Para"),
(1, 1, "es-MX", "Miguel", "español"),
(1, 2, "es-MX", "Miguel", ","),
(1, 3, "es-MX", "Miguel", "oprima"),
(1, 4, "es-MX", "Miguel", "el"),
(1, 5, "es-MX", "Miguel", "dos"),
(1, 6, "es-MX", "Miguel", "."),
],
)
def test_lang_s(self):
"""Test lang on <s>"""
text = """<?xml version="1.0" encoding="ISO-8859-1"?>
<speak version="1.1" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/10/synthesis
http://www.w3.org/TR/speech-synthesis11/synthesis.xsd"
xml:lang="en-US">
<s>Today, 2/1/2000.</s>
<!-- Today, February first two thousand -->
<s xml:lang="it">Un mese fà, 2/1/2000.</s>
<!-- Un mese fà, il due gennaio duemila -->
<!-- One month ago, the second of January two thousand -->
</speak>"""
results = [
(w.sent_idx, w.idx, w.lang, w.text)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[
(0, 0, "en-US", "Today"),
(0, 1, "en-US", ","),
(0, 2, "en-US", "February"),
(0, 3, "en-US", "first"),
(0, 4, "en-US", ","),
(0, 5, "en-US", "two"),
(0, 6, "en-US", "thousand"),
(0, 7, "en-US", "."),
(1, 0, "it", "Un"),
(1, 1, "it", "mese"),
(1, 2, "it", "fà"),
(1, 3, "it", ","),
# no "il"
(1, 4, "it", "due"),
(1, 5, "it", "gennaio"),
(1, 6, "it", "duemila"),
(1, 7, "it", "."),
],
)
def test_phoneme(self):
"""Test manual phoneme insertion"""
text = """<?xml version="1.0"?>
<speak version="1.1" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/10/synthesis
http://www.w3.org/TR/speech-synthesis11/synthesis.xsd"
xml:lang="en-US">
<phoneme alphabet="ipa" ph="təmei̥ɾou̥"> tomato </phoneme>
<!-- This is an example of IPA using character entities -->
<!-- Because many platform/browser/text editor combinations do not
correctly cut and paste Unicode text, this example uses the entity
escape versions of the IPA characters. Normally, one would directly
use the UTF-8 representation of these symbols: "təmei̥ɾou̥". -->
</speak>"""
results = [
(w.sent_idx, w.idx, w.lang, w.text, w.phonemes)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[(0, 0, "en-US", "tomato", ["t", "ə", "m", "e", "i̥", "ɾ", "o", "u̥"])],
)
def test_sentences(self):
"""Test <s>"""
text = """<?xml version="1.0"?>
<speak version="1.1" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/10/synthesis
http://www.w3.org/TR/speech-synthesis11/synthesis.xsd"
xml:lang="en-US">
<p>
<s>This is the first sentence of the paragraph.</s>
<s>Here's another sentence.</s>
</p>
</speak>"""
results = [
(w.sent_idx, w.idx, w.text)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[
(0, 0, "This"),
(0, 1, "is"),
(0, 2, "the"),
(0, 3, "first"),
(0, 4, "sentence"),
(0, 5, "of"),
(0, 6, "the"),
(0, 7, "paragraph"),
(0, 8, "."),
(1, 0, "Here's"),
(1, 1, "another"),
(1, 2, "sentence"),
(1, 3, "."),
],
)
def test_token(self):
"""Test explicit tokenization"""
# NOTE: Added full stops
text = """<?xml version="1.0"?>
<speak version="1.1" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/10/synthesis
http://www.w3.org/TR/speech-synthesis11/synthesis.xsd"
xml:lang="zh-CN">
<!-- The Nanjing Changjiang River Bridge -->
<token>南京市</token><token>长江大桥</token>。
<!-- The mayor of Nanjing city, Jiang Daqiao -->
南京市长<w>江大桥</w>。
<!-- Shanghai is a metropolis -->
上海是个<w>大都会</w>。
<!-- Most Shanghainese will say something like that -->
上海人<w>大都</w>会那么说。
</speak>"""
results = [
(w.sent_idx, w.idx, w.text)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[
(0, 0, "南京市"),
(0, 1, "长江大桥"),
(0, 2, "。"),
(1, 0, "南"),
(1, 1, "京"),
(1, 2, "市"),
(1, 3, "长"),
(1, 4, "江大桥"),
(1, 5, "。"),
(2, 0, "上"),
(2, 1, "海"),
(2, 2, "是"),
(2, 3, "个"),
(2, 4, "大都会"),
(2, 5, "。"),
(3, 0, "上"),
(3, 1, "海"),
(3, 2, "人"),
(3, 3, "大都"),
(3, 4, "会"),
(3, 5, "那"),
(3, 6, "么"),
(3, 7, "说"),
(3, 8, "。"),
],
)
def test_sub(self):
"""Test <sub>"""
text = """<?xml version="1.0"?>
<speak version="1.1" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/10/synthesis
http://www.w3.org/TR/speech-synthesis11/synthesis.xsd"
xml:lang="en-US">
<sub alias="World Wide Web Consortium">W3C</sub> is an international community
<!-- World Wide Web Consortium -->
</speak>"""
results = [
(w.sent_idx, w.idx, w.text)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[(0, 0, "World"), (0, 1, "Wide"), (0, 2, "Web"), (0, 3, "Consortium"), (0, 4, "is"), (0, 5, "an"), (0, 6, "international"), (0, 7, "community")],
)
def test_lang_element(self):
"""Test <lang>"""
text = """<?xml version="1.0"?>
<speak version="1.1" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/10/synthesis
http://www.w3.org/TR/speech-synthesis11/synthesis.xsd"
xml:lang="en-US">
The French word for cat is <w xml:lang="fr">chat</w>.
He prefers to eat pasta that is <lang xml:lang="it">al dente</lang>.
</speak>"""
results = [
(w.sent_idx, w.idx, w.lang, w.text)
for sent in sentences(text, ssml=True)
for w in sent
]
self.assertEqual(
results,
[
(0, 0, "en-US", "The"),
(0, 1, "en-US", "French"),
(0, 2, "en-US", "word"),
(0, 3, "en-US", "for"),
(0, 4, "en-US", "cat"),
(0, 5, "en-US", "is"),
(0, 6, "fr", "chat"),
(0, 7, "en-US", "."),
(1, 0, "en-US", "He"),
(1, 1, "en-US", "prefers"),
(1, 2, "en-US", "to"),
(1, 3, "en-US", "eat"),
(1, 4, "en-US", "pasta"),
(1, 5, "en-US", "that"),
(1, 6, "en-US", "is"),
(1, 7, "it", "al"),
(1, 8, "it", "dente"),
(1, 9, "en-US", "."),
],
)
def print_graph_stderr(graph, root):
"""Print graph to stderr"""
print_graph(graph, root, print_func=lambda *p: print(*p, file=sys.stderr))
# -----------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
|
py | b40f54b41f43293d2b83567d7e86d2a6702f114a | from unittest import TestCase, main
from videos.factory import create_camera
from videos.jpg_camera import JpgCamera
from videos.lifegame_camera import LifeGameCamera
from videos.vcap_camera import DelayVideoCaptureCamera, VideoCaptureCamera
from yukari import create_app
class TestVideo(TestCase):
def setUp(self):
app = create_app(__name__, '../../frontend/dist/')
app.config['RTSP_CAMERA'] = False
app.config['WEB_CAMERA'] = False
app.config['MP4_CAMERA'] = False
app.config['DELAY_RTSP_CAMERA'] = False
app.config['DELAY_WEB_CAMERA'] = False
app.config['DELAY_MP4_CAMERA'] = False
app.config['LIFE_GAME_CAMERA'] = False
app.config['JPG_CAMERA'] = False
self.__app = app.test_client()
self.__config = app.config
def tearDown(self):
pass
def test_video_route(self):
res = self.__app.get('/video')
self.assertEqual(308, res.status_code)
def test_video_factory(self):
camera = create_camera(self.__config, None)
self.assertTrue(isinstance(camera, JpgCamera))
self.__config['JPG_CAMERA'] = True
camera = create_camera(self.__config, None)
self.assertTrue(isinstance(camera, JpgCamera))
self.__config['LIFE_GAME_CAMERA'] = True
camera = create_camera(self.__config, None)
self.assertTrue(isinstance(camera, LifeGameCamera))
self.__config['DELAY_MP4_CAMERA'] = True
camera = create_camera(self.__config, None)
self.assertTrue(isinstance(camera, DelayVideoCaptureCamera))
self.__config['MP4_CAMERA'] = True
camera = create_camera(self.__config, None)
self.assertTrue(isinstance(camera, VideoCaptureCamera))
if __name__ == '__main__':
main()
|
py | b40f56191120e60336ac6fd1b455da9dd7413160 | r"""wamp is a module that provide classes that extend any
WAMP related class for the purpose of vtkWeb.
"""
import string
import random
import types
import logging
from threading import Timer
from twisted.python import log
from twisted.internet import reactor
from autobahn.resource import WebSocketResource
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerProtocol, WampCraProtocol, \
WampCraServerProtocol, WampServerFactory
try:
from vtkWebCore import vtkWebApplication
except:
from vtkWebCorePython import vtkWebApplication
# =============================================================================
salt = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
application = None
# =============================================================================
#
# Base class for vtkWeb WampServerProtocol
#
# =============================================================================
class ServerProtocol(WampCraServerProtocol):
"""
Defines the core server protocol for vtkWeb. Adds support to
marshall/unmarshall RPC callbacks that involve ServerManager proxies as
arguments or return values.
Applications typically don't use this class directly, since it doesn't
register any RPC callbacks that are required for basic web-applications with
interactive visualizations. For that, use vtkWebServerProtocol.
"""
AUTHEXTRA = {'salt': salt, 'keylen': 32, 'iterations': 1000}
SECRETS = {'vtkweb': WampCraProtocol.deriveKey("vtkweb-secret", AUTHEXTRA)}
def __init__(self):
self.vtkWebProtocols = []
self.Application = self.initApplication()
self.initialize()
def initialize(self):
"""
Let the sub class define what they need to do to properly initialize
themselves.
"""
pass
def initApplication(self):
"""
Let subclass optionally initialize a custom application in lieu
of the default vtkWebApplication.
"""
global application
if not application:
application = vtkWebApplication()
return application
def getAuthPermissions(self, authKey, authExtra):
return {'permissions': 'all', 'authextra': self.AUTHEXTRA}
def updateSecret(self, newSecret):
self.SECRETS['vtkweb'] = WampCraProtocol.deriveKey(newSecret, self.AUTHEXTRA)
def getAuthSecret(self, authKey):
## return the auth secret for the given auth key or None when the auth key
## does not exist
secret = self.SECRETS.get(authKey, None)
return secret
def onAuthenticated(self, authKey, perms):
## register RPC endpoints
if authKey is not None:
self.registerForPubSub("http://vtk.org/event#", True)
self.registerForRpc(self, "http://vtk.org/vtk#")
for protocol in self.vtkWebProtocols:
self.registerForRpc(protocol, "http://vtk.org/vtk#")
def setApplication(self, application):
self.Application = application
def registerVtkWebProtocol(self, protocol):
protocol.setApplication(self.Application)
self.vtkWebProtocols.append(protocol)
def getVtkWebProtocols(self):
return self.vtkWebProtocols
def onAfterCallSuccess(self, result, callid):
"""
Callback fired after executing incoming RPC with success.
The default implementation will just return `result` to the client.
:param result: Result returned for executing the incoming RPC.
:type result: Anything returned by the user code for the endpoint.
:param callid: WAMP call ID for incoming RPC.
:type callid: str
:returns obj -- Result send back to client.
"""
return self.marshall(result)
def onBeforeCall(self, callid, uri, args, isRegistered):
"""
Callback fired before executing incoming RPC. This can be used for
logging, statistics tracking or redirecting RPCs or argument mangling i.e.
The default implementation just returns the incoming URI/args.
:param uri: RPC endpoint URI (fully-qualified).
:type uri: str
:param args: RPC arguments array.
:type args: list
:param isRegistered: True, iff RPC endpoint URI is registered in this session.
:type isRegistered: bool
:returns pair -- Must return URI/Args pair.
"""
return uri, self.unmarshall(args)
def marshall(self, argument):
return argument
def unmarshall(self, argument):
"""
Demarshalls the "argument".
"""
if isinstance(argument, types.ListType):
# for lists, unmarshall each argument in the list.
result = []
for arg in argument:
arg = self.unmarshall(arg)
result.append(arg)
return result
return argument
def onConnect(self, connection_request):
"""
Callback fired during WebSocket opening handshake when new WebSocket
client connection is about to be established.
Call the factory to increment the connection count.
"""
try:
self.factory.on_connect()
except AttributeError:
pass
return WampCraServerProtocol.onConnect(self, connection_request)
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection from client was lost.
Call the factory to decrement the connection count and start a reaper if
necessary.
"""
try:
self.factory.connection_lost()
except AttributeError:
pass
WampCraServerProtocol.connectionLost(self, reason)
@exportRpc("exit")
def exit(self):
"""RPC callback to exit"""
reactor.stop()
# =============================================================================
#
# Base class for vtkWeb WampServerFactory
#
# =============================================================================
class ReapingWampServerFactory(WampServerFactory):
"""
ReapingWampServerFactory is WampServerFactory subclass that adds support to
close the web-server after a timeout when the last connected client drops.
Currently, the protocol must call on_connect() and connection_lost() methods
to notify the factory that the connection was started/closed.
If the connection count drops to zero, then the reap timer
is started which will end the process if no other connections are made in
the timeout interval.
"""
def __init__(self, url, debugWamp, timeout):
self._reaper = None
self._connection_count = 0
self._timeout = timeout
WampServerFactory.__init__(self, url, debugWamp)
def startFactory(self):
if not self._reaper:
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
WampServerFactory.startFactory(self)
def on_connect(self):
"""
Called when a new connection is made.
"""
if self._reaper:
log.msg("Client has reconnected, cancelling reaper",
logLevel=logging.DEBUG)
self._reaper.cancel()
self._reaper = None
self._connection_count += 1
log.msg("on_connect: connection count = %s" % self._connection_count,
logLevel=logging.DEBUG)
def connection_lost(self):
"""
Called when a connection is lost.
"""
if self._connection_count > 0:
self._connection_count -= 1
log.msg("connection_lost: connection count = %s" % self._connection_count,
logLevel=logging.DEBUG)
if self._connection_count == 0 and not self._reaper:
log.msg("Starting timer, process will terminate in: %ssec" % self._timeout,
logLevel=logging.DEBUG)
self._reaper = reactor.callLater(self._timeout, lambda: reactor.stop())
|
py | b40f5665ff237b5a2cf3af54d1d1d6bd2b6ad1a4 | #!/usr/bin/env jython
import os
import re
import requests
import socket
import threading
from burp import IBurpExtender
from burp import IContextMenuFactory
from javax.swing import JMenuItem
from java.util import List, ArrayList
from java.net import URL
# to use this, you need to set the bing api env var
bing_api_key = os.environ.get('BING_API_KEY')
print('welcome to ctlfish blackhatpython bing BurpExtender')
class BurpExtender(IBurpExtender, IContextMenuFactory):
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
self.context = None
# set up extension
self._callbacks.setExtensionName('ctlfish blackhatpython bing')
callbacks.registerContextMenuFactory(self)
return
def createMenuItems(self, context_menu):
self.context = context_menu
menu_list = ArrayList()
menu_list.add(JMenuItem("Send to Bing", actionPerformed=self.bing_menu))
return menu_list
def bing_menu(self, event):
# get details of users clicked item
http_traffic = self.context.getSelectedMessages()
print('{} requests highlighted'.format(len(http_traffic)))
for traffic in http_traffic:
http_service = traffic.getHttpService()
host = http_service.getHost()
print('User selected host: {}'.format(host))
self.bing_search(host)
return
def bing_search(self, host):
# check if we have ip or hostname
is_ip = re.match(r'((?:\d+\.){3}\d+)', host)
if is_ip:
ip_address = host
domain = False
else:
ip_address = socket.gethostbyname(host)
domain = True
bing_query_string = 'ip:{}'.format(ip_address)
t = threading.Thread(target=self.bing_query, args=(bing_query_string,))
t.daemon = True
t.start()
#self.bing_query(bing_query_string)
if domain:
bing_query_string = 'domain:{}'.format(host)
t = threading.Thread(target=self.bing_query, args=(bing_query_string,))
t.daemon = True
t.start()
#self.bing_query(bing_query_string)
return
def bing_query(self, bing_query_string):
# FYI: you *must* set the lib path for python addons correctly and install requests there
# while testing, I just pointed to the lib64 version of site-packages within my venv
# todo: csuttles fix this to use the burp libs to send the requests
print('Performing Bing search for: {}'.format(bing_query_string))
bing_url = 'https://api.cognitive.microsoft.com/bing/v7.0/search'
headers = {'user-agent': 'ctlfish/blackhatpython/0.0.1', "Ocp-Apim-Subscription-Key": bing_api_key}
params = {"q": bing_query_string, "textDecorations": True, "textFormat": "HTML"}
resp = requests.get(bing_url, params=params, headers=headers)
#return resp
try:
rjson = resp.json()
for page in rjson['webPages']['value']:
print('*' * 80)
print('page url: {}'.format(page["url"]))
print('page id: {}'.format(page["id"]))
print('page name: {}'.format(page["name"]))
j_url = URL(page['url'])
print('page in scope: {}'.format(self._callbacks.isInScope(j_url)))
if not self._callbacks.isInScope(j_url):
self._callbacks.includeInScope(j_url)
print('added {} to Burp Scope'.format(j_url))
else:
print('url {} already in Burp Scope'.format(j_url))
except Exception as ex:
print('caught exception {}:{}'.format(ex.__class__.__name__, ex))
print('no results from Bing')
pass
return
|
py | b40f566a6f72ba84ac372c99b2381fa6b24303ea | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of VM disk images.
"""
import os
import re
from oslo.config import cfg
from nova import exception
from nova.image import glance
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
image_opts = [
cfg.BoolOpt('force_raw_images',
default=True,
help='Force backing images to raw format'),
]
CONF = cfg.CONF
CONF.register_opts(image_opts)
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output)
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encryption = details.get('encryption')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - ie for usage in kwargs and such...
field = field.lower().strip()
for c in (" ", "-"):
field = field.replace(c, '_')
return field
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if real_size:
details = real_size.group(1)
try:
details = utils.to_bytes(details)
except (TypeError, ValueError):
pass
return details
def _extract_details(self, root_cmd, root_details, lines_after):
consumed_lines = 0
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after[0].startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
consumed_lines += 1
possible_contents = lines_after[1:]
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
for line in possible_contents:
line_pieces = line.split(None)
if len(line_pieces) != 6:
break
else:
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
consumed_lines += 1
return (real_details, consumed_lines)
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
if not cmd_output:
cmd_output = ''
contents = {}
lines = cmd_output.splitlines()
i = 0
line_am = len(lines)
while i < line_am:
line = lines[i]
if not line.strip():
i += 1
continue
consumed_lines = 0
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
i += 1
continue
root_details = top_level.group(2).strip()
details, consumed_lines = self._extract_details(root,
root_details,
lines[i + 1:])
contents[root] = details
i += consumed_lines + 1
return contents
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
return QemuImgInfo()
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
return QemuImgInfo(out)
def convert_image(source, dest, out_format, run_as_root=False):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_href, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
(image_service, image_id) = glance.get_remote_image_service(context,
image_href)
with utils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(context, image_id, image_file)
def fetch_to_raw(context, image_href, path, user_id, project_id):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, user_id, project_id)
with utils.remove_path_on_error(path_tmp):
data = qemu_img_info(path_tmp)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_href)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % locals())
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
with utils.remove_path_on_error(staged):
convert_image(path_tmp, staged, 'raw')
os.unlink(path_tmp)
data = qemu_img_info(staged)
if data.file_format != "raw":
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("Converted to raw, but format is now %s") %
data.file_format)
os.rename(staged, path)
else:
os.rename(path_tmp, path)
|
py | b40f56cf079af2381f00d67dfe89aa6b1b64976b | name = "gatherup"
|
py | b40f56d42c99bbf4f4f795afec3909dbc295304d | # --------------------------------------------------------
# (c) Copyright 2014, 2020 by Jason DeLaat.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
import common_tests
import pymonad.tools
from pymonad.maybe import Maybe, Just, Nothing
from pymonad.maybe import Option, Some
class MaybeTests(unittest.TestCase):
def test_repr_Just(self):
self.assertEqual(str(Just(9)), 'Just 9')
def test_repr_Nothing(self):
self.assertEqual(str(Nothing), 'Nothing')
def test_insert(self):
self.assertEqual(Maybe.insert(9), Just(9))
def test_exceptions_do_not_return_nothing_normal_functions(self):
with self.assertRaises(ZeroDivisionError):
Maybe.insert(1).then(lambda x: x / 0)
def test_exceptions_do_not_return_nothing_kleisli_functions(self):
with self.assertRaises(ZeroDivisionError):
Maybe.insert(1).then(lambda x: Just(x / 0)),
def test_maybe_method_with_Nothing_value(self):
self.assertEqual(Nothing.maybe('a', lambda i: str(i)), 'a')
def test_maybe_method_with_Just_value(self):
self.assertEqual(Just(1).maybe('a', lambda i: str(i)), '1')
class OptionTests(unittest.TestCase):
def test_repr_Some(self):
self.assertEqual(str(Some(9)), 'Some 9')
def test_insert(self):
self.assertEqual(Option.insert(9), Some(9))
def test_insert_repr(self):
self.assertEqual(str(Option.insert(9)), 'Some 9')
def test_option_method_with_Nothing_value(self):
self.assertEqual(Nothing.option('a', lambda i: str(i)), 'a')
def test_option_method_with_Just_value(self):
self.assertEqual(Some(1).option('a', lambda i: str(i)), '1')
class MaybeFunctor(common_tests.FunctorTests, unittest.TestCase):
def setUp(self):
self._class = Maybe
def test_mapping_over_nothing(self):
self.assertEqual(
Nothing.map(lambda x: x),
Nothing
)
class MaybeApplicative(common_tests.ApplicativeTests, unittest.TestCase):
def setUp(self):
self._class = Maybe
def test_applying_with_nothing_in_first_arg(self):
self.assertEqual(
Maybe.apply(common_tests.add).to_arguments(Nothing, Just(1)),
Nothing
)
def test_applying_with_nothing_in_second_arg(self):
self.assertEqual(
Maybe.apply(common_tests.add).to_arguments(Just(1), Nothing),
Nothing
)
class MaybeMonad(common_tests.MonadTests, unittest.TestCase):
def setUp(self):
self._class = Maybe
def test_binding_with_nothing(self):
self.assertEqual(
Nothing.bind(Maybe.insert),
Nothing
)
class MaybeThen(common_tests.ThenTests, unittest.TestCase):
def setUp(self):
self._class = Maybe
|
py | b40f5702345079d3ccb3aeb8e17c31c70216c723 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Newick Tokenization."""
import re
from enum import Enum
from .input_output import read_filepath
_WS = re.compile(r'\s+')
_PUNC = re.compile(r'[(),:;\[\]]')
_SINGLE_QUOTED_STR = re.compile(r"([^']*)'")
_COMMENT_STR = re.compile(r"([^\]]*)\]")
_UNQUOTED_STR = re.compile(r"([^'():,;\\[]+)(?=$|['():,;\\[])")
class NewickTokenType(Enum):
"""Enum of Newick Token Types."""
NONE = 0
OPEN = 1
CLOSE = 2
COMMA = 3
COLON = 4
LABEL = 5
EDGE_INFO = 6
SEMICOLON = 7
class NewickTokenizer(object):
"""Class providing an Newick token iteration interface.
Name tokens are stripped of whitespace and comments.
"""
def __init__(self, stream=None, newick=None, filepath=None):
"""Newick input as `stream`, a `newick` string, of a `filepath` of __init__."""
if stream is None:
if newick is not None:
self._src = newick
else:
if filepath is None:
raise ValueError('"stream", "newick", or "filepath" must be provided')
self._src = read_filepath(filepath)
else:
self._src = stream.read()
self._last_ind = len(self._src) - 1
self._index = -1
self.num_open_parens = 0
self.num_close_parens = 0
self._token = None
self.comments = []
self.prev_token = NewickTokenType.NONE
self._cb = {'(': self._handle_open_parens,
')': self._handle_close_parens,
',': self._handle_comma,
':': self._handle_colon,
';': self._handle_semicolon,
'[': self._handle_comment,
}
self._default_cb = self._handle_label
self.finished = False
c = self._eat_whitespace_get_next_char()
self._index -= 1
if c != '(':
m = 'Expected the first character to be a "(", but found "{}"'.format(c)
self._raise_unexpected(m)
# just so we don't have to check for NONE on every ( we fake a legal preceding token
self.prev_token = NewickTokenType.OPEN
def tokens(self):
"""Returns a list of remaining tokens (all if not yet iterated over)."""
return list(iter(self))
def _raise_unexpected(self, m):
if self.prev_token != NewickTokenType.NONE:
# noinspection PyUnresolvedReferences
em = 'Error: {m} at {f} after a/an {p} token'
em = em.format(m=m, f=self.file_pos(), p=self.prev_token.name)
raise ValueError(em)
raise ValueError('Error: {m} at {f}'.format(m=m, f=self.file_pos()))
def __iter__(self):
"""Returns self as the internal state is used to achieve iteration."""
return self
# noinspection PyUnresolvedReferences
def _eat_whitespace(self):
# if (1 + self._index) <= self._last_ind:
w = _WS.match(self._src, 1 + self._index)
if w:
self._index = w.end() - 1
def _eat_whitespace_get_next_char(self):
self._eat_whitespace()
return self._get_next_char()
def _get_next_char(self):
self._index += 1
try:
x = self._src[self._index]
if self.finished:
m = 'Unexpected newick content after the semicolon. Found "{c}" and {f}'
m = m.format(c=x, f=self.file_pos())
raise ValueError(m)
return x
except IndexError:
if self.num_close_parens != self.num_open_parens:
raise ValueError('Number of close parentheses ({c}) does not equal '
'the number of open parentheses ({o}) at the end '
'of the input ({f}).'.format(c=self.num_close_parens,
o=self.num_open_parens,
f=self.file_pos()))
raise StopIteration
def _peek(self):
if self._index >= self._last_ind:
return None
return self._src[1 + self._index]
def _grab_one_single_quoted_word(self):
b = self._index + 1
m = _SINGLE_QUOTED_STR.match(self._src, b)
if not m:
self._index = b - 1
self._raise_unexpected("Found an opening single-quote, but not closing quote")
self._index = m.end() - 1
word = m.group(1)
return word
def _read_quoted_label(self):
label = self._grab_one_single_quoted_word()
if self._peek() == "'":
word_list = [label]
while self._peek() == "'":
assert self._get_next_char() == "'"
label = self._grab_one_single_quoted_word()
word_list.append(label)
return "'".join(word_list)
return label
def _read_unquoted_label(self):
b = self._index
# called after we grabbed the first letter, so we look one back
m = _UNQUOTED_STR.match(self._src, b)
if not m: # pragma: no cover
self._raise_unexpected('Expecting a label but found "{}"'.format(self._src[b]))
label = m.group(1)
self._index = m.end() - 1
label = label.strip() # don't preserve whitespace
return label.replace('_', ' ')
def _handle_open_parens(self):
if self.prev_token != NewickTokenType.OPEN and self.prev_token != NewickTokenType.COMMA:
self._raise_unexpected('Expecting "(" to be preceded by "," or "("')
self.num_open_parens += 1
self.prev_token = NewickTokenType.OPEN
return '('
def _handle_colon(self):
if self.prev_token not in [NewickTokenType.LABEL, NewickTokenType.CLOSE]:
self._raise_unexpected('Expecting ":" to be preceded by ")" or a taxon label')
self.prev_token = NewickTokenType.COLON
self._default_cb = self._handle_edge_info
return ':'
def _handle_comment(self):
b = self._index + 1
m = _COMMENT_STR.match(self._src, b)
if not m:
self._index = b
self._raise_unexpected("Found an opening [ of a comment, but not closing ]")
self._index = m.end() - 1
comment = m.group(1)
self.comments.append(comment)
return self._read_next()
def _handle_semicolon(self):
ok_list = [NewickTokenType.LABEL, NewickTokenType.CLOSE, NewickTokenType.EDGE_INFO]
if self.prev_token not in ok_list:
m = 'Expecting ";" to be preceded by ")", a taxon label, or branch information'
self._raise_unexpected(m)
self.finished = True
self.prev_token = NewickTokenType.SEMICOLON
return ';'
def _handle_comma(self):
ok_list = [NewickTokenType.LABEL, NewickTokenType.CLOSE, NewickTokenType.EDGE_INFO]
if self.prev_token not in ok_list:
m = 'Expecting "," to be preceded by ")", a taxon label, or branch information'
self._raise_unexpected(m)
self.prev_token = NewickTokenType.COMMA
return ','
def file_pos(self):
"""Returns a string describing the position within the input stream."""
return 'character #{}'.format(1 + self._index)
def _handle_edge_info(self):
x = self._src[self._index]
if x == "'":
label = self._read_quoted_label()
else:
label = self._read_unquoted_label()
assert self.prev_token == NewickTokenType.COLON
self.prev_token = NewickTokenType.EDGE_INFO
self._default_cb = self._handle_label
return label
def _handle_label(self):
x = self._src[self._index]
if x == "'":
label = self._read_quoted_label()
else:
label = self._read_unquoted_label()
ok_list = [NewickTokenType.OPEN, NewickTokenType.CLOSE, NewickTokenType.COMMA]
if self.prev_token not in ok_list:
m = 'Found "{}", but expected a label to be preceded by "(", ")", or a comma'
m = m.format(label)
self._raise_unexpected(m)
self.prev_token = NewickTokenType.LABEL
return label
def _handle_close_parens(self):
if self.prev_token != NewickTokenType.LABEL \
and self.prev_token != NewickTokenType.EDGE_INFO \
and self.prev_token != NewickTokenType.CLOSE:
m = 'Expecting ")" to be preceded by a label or branch information'
self._raise_unexpected(m)
self.num_close_parens += 1
if self.num_close_parens > self.num_open_parens:
m = 'Number of close parentheses exceeds the number of open parentheses'
self._raise_unexpected(m)
self.prev_token = NewickTokenType.CLOSE
return ')'
def __next__(self):
"""Deletes comments from previous tokens, then to the next token."""
del self.comments[:]
c = self._read_next()
return c
def _read_next(self):
c = self._eat_whitespace_get_next_char()
cb = self._cb.get(c, self._default_cb)
return cb()
next = __next__
class NewickEvents(Enum):
"""Newick Event types for event-based parsing."""
OPEN_SUBTREE = 0
TIP = 1
CLOSE_SUBTREE = 2
class NewickEventFactory(object):
"""Class providing an Newick event iteration interface.
Higher level interface for reading newick strings into
a series of events.
Each event will be a dict with the keys:
* `'type'`: a facet of the `NewickEvents` Enum, and
* `'comments'`: a list of all comments contained
`NewickEvents.TIP` and `NewickEvents.CLOSE_SUBTREE` events can also have
a `label` and/or an `edge_info` string.
*NOTE* for the sake of performance, the value of the comments field
may be the same list in different events! So client code should copy
the list if they need a stable copy.
You must make a copy of it if you want to process comments later.
"""
def __init__(self, tokenizer=None, newick=None, filepath=None, event_handler=None):
"""Inputs via `tokenizer`, `newick`, or `filepath` of __init__.
If `event_handler` is not `None` in __init__, the initializer will iterate
over all events, passing each one to the handler. So no iteration is
needed or supported.
If `event_handler` is `None`, the object will be ready for iteration.
"""
if tokenizer is None:
if newick is None and filepath is None:
raise ValueError('tokenizer or newick argument must be supplied')
self._tokenizer = NewickTokenizer(newick=newick, filepath=filepath)
else:
self._tokenizer = tokenizer
self._base_it = iter(self._tokenizer)
self._tok_stack = []
self._start_pos = 0
self._comments = []
self._comments_stack = []
self._prev_type = NewickEvents.OPEN_SUBTREE
if event_handler is not None:
for event in iter(self):
event_handler(event)
def __iter__(self):
"""Returns self as the internal state is used to achieve iteration."""
return self
# pylint: disable=inconsistent-return-statements
def __next__(self):
"""Deletes comments and returns next event dict."""
del self._comments[:]
if self._tok_stack:
tok = self._tok_stack.pop()
self._comments.extend(self._comments_stack)
del self._comments_stack[:]
assert not self._tok_stack
else:
tok = next(self._base_it)
self._comments.extend(self._tokenizer.comments)
if self._tokenizer.prev_token == NewickTokenType.OPEN:
self._prev_type = NewickEvents.OPEN_SUBTREE
return {'type': NewickEvents.OPEN_SUBTREE,
'comments': self._comments}
if self._tokenizer.prev_token == NewickTokenType.LABEL:
# when reading a tip, be greedy about grabbing surrounding comments
return self._greedy_token_seq(tok, NewickEvents.TIP)
if self._tokenizer.prev_token == NewickTokenType.CLOSE:
# when reading a tip, be greedy about grabbing trailing comments
return self._greedy_token_seq(tok, NewickEvents.CLOSE_SUBTREE)
if self._tokenizer.prev_token == NewickTokenType.COMMA:
self._comments.extend(self._tokenizer.comments)
return next(self)
if self._tokenizer.prev_token == NewickTokenType.SEMICOLON: # pragma: no cover
raise StopIteration
assert False # pragma: no cover
def _greedy_token_seq(self, label, t):
tok = next(self._base_it)
self._comments.extend(self._tokenizer.comments)
if t == NewickEvents.CLOSE_SUBTREE \
and self._tokenizer.prev_token == NewickTokenType.LABEL:
label = tok
tok = next(self._base_it)
self._comments.extend(self._tokenizer.comments)
if tok == ':':
tok = next(self._base_it)
self._comments.extend(self._tokenizer.comments)
# assert self._prev_type == NewickTokenType.EDGE_INFO
edge_info = tok
tok = next(self._base_it)
self._comments.extend(self._tokenizer.comments)
else:
edge_info = None
self._tok_stack.append(tok)
self._prev_type = t
return {'type': t,
'label': label,
'edge_info': edge_info,
'comments': self._comments}
next = __next__
|
py | b40f58f0b6fabba714518154c8638f4a365fec20 | import copy
import logging
import time
from dataclasses import dataclass
import numpy as np
from scipy.interpolate import RectBivariateSpline
from tqdm.auto import trange
def complex_interp(src_grid, xi, yi, z):
interpolator_r = RectBivariateSpline(src_grid[0], src_grid[1], z.real)
interpolator_i = RectBivariateSpline(src_grid[0], src_grid[1], z.imag)
zz_r = interpolator_r(xi, yi)
zz_i = interpolator_i(xi, yi)
return zz_r + 1j * zz_i
class twix_map_obj:
@property
def filename(self):
return self.fname
@property
def rampSampTrj(self):
return self.rstrj
@property
def dataType(self):
return self.dType
@property
def fullSize(self):
if self.full_size is None:
self.clean()
return self.full_size
# @fullSize.setter
# def fullSize(self, val):
# self.full_size = val
@property
def dataSize(self):
out = self.fullSize.copy()
if self.removeOS:
ix = self.dataDims.index('Col')
out[ix] = self.NCol / 2
if self.average_dim[0] | self.average_dim[1]:
print('averaging in col and cha dim not supported, resetting flag')
self.average_dim[0:2] = False
out[self.average_dim] = 1
return out
@property
def sqzSize(self):
return self.dataSize[self.dataSize > 1].astype(int)
@property
def sqzDims(self):
out = []
squeezedDim = self.dataSize > 1
for sd, dim in zip(squeezedDim, self.dataDims):
if sd:
out.append(dim)
return out
@property
def flagRemoveOS(self):
return self.removeOS
@flagRemoveOS.setter
def flagRemoveOS(self, removeOS):
self.removeOS = removeOS
@property
def flagAverageDim(self):
return self.average_dim
@flagAverageDim.setter
def flagAverageDim(self, val):
self.average_dim = val
@property
def flagDoAverage(self):
ix = self.dataDims.index('Ave')
return self.average_dim[ix]
@flagDoAverage.setter
def flagDoAverage(self, bval):
ix = self.dataDims.index('Ave')
self.average_dim[ix] = bval
@property
def flagAverageReps(self):
ix = self.dataDims.index('Rep')
return self.average_dim[ix]
@flagAverageReps.setter
def flagAverageReps(self, bval):
ix = self.dataDims.index('Rep')
self.average_dim[ix] = bval
@property
def flagAverageSets(self):
ix = self.dataDims.index('Set')
return self.average_dim[ix]
@flagAverageSets.setter
def flagAverageSets(self, bval):
ix = self.dataDims.index('Set')
self.average_dim[ix] = bval
@property
def flagIgnoreSeg(self):
ix = self.dataDims.index('Seg')
return self.average_dim[ix]
@flagIgnoreSeg.setter
def flagIgnoreSeg(self, bval):
ix = self.dataDims.index('Seg')
self.average_dim[ix] = bval
@property
def flagSkipToFirstLine(self):
return self.skipToFirstLine
@flagSkipToFirstLine.setter
def flagSkipToFirstLine(self, bval):
if bval != self.skipToFirstLine:
self.skipToFirstLine = bval
if bval:
self.skipLin = np.min(self.Lin)
self.skipPar = np.min(self.Par)
else:
self.skipLin = 0
self.skipPar = 0
self.full_size[2] = np.maximum(1, self.NLin - self.skipLin)
self.full_size[3] = np.maximum(1, self.NPar - self.skipPar)
@property
def flagRampSampRegrid(self):
return self.regrid
@flagRampSampRegrid.setter
def flagRampSampRegrid(self, bval):
if bval and self.rstrj is None:
raise Exception('No trajectory for regridding available')
self.regrid = bval
# TODO: flagDoRawDataCorrect
@property
def flagDoRawDataCorrect(self):
return False
@flagDoRawDataCorrect.setter
def flagDoRawDataCorrect(self, bval):
pass
# TODO: RawDataCorrectionFactors
@property
def RawDataCorrectionFactors(self):
return []
@RawDataCorrectionFactors.setter
def RawDataCorrectionFactors(self, bval):
pass
def __init__(self, dataType, fname, version, rstraj=None, **kwargs):
self.ignoreROoffcenter = kwargs.get('ignoreROoffcenter', False)
self.removeOS = kwargs.get('removeOS', True)
self.regrid = kwargs.get('regrid', True)
self.doAverage = kwargs.get('doAverage', False)
self.averageReps = kwargs.get('averageReps', False)
self.averageSets = kwargs.get('averageSets', False)
self.ignoreSeg = kwargs.get('ignoreSeg', False)
self.squeeze = kwargs.get('squeeze', False)
self.dType = dataType.lower()
self.fname = fname
self.softwareVersion = version
# self.IsReflected = logical([]);
# self.IsRawDataCorrect = logical([]); %SRY
self.NAcq = 0
self.isBrokenFile = False
self.dataDims = ['Col', 'Cha', 'Lin', 'Par', 'Sli', 'Ave', 'Phs',
'Eco', 'Rep', 'Set', 'Seg', 'Ida', 'Idb', 'Idc', 'Idd', 'Ide']
@dataclass
class FRI:
szScanHeader: int # bytes
szChannelHeader: int # bytes
iceParamSz: int
sz: np.array = np.zeros(2)
shape: np.array = np.zeros(2)
cut: np.array = None
if self.softwareVersion == 'vb':
self.freadInfo = FRI(0, 128, 4)
elif self.softwareVersion == 'vd':
self.freadInfo = FRI(192, 32, 24) # vd version supports up to 24 ice params
else:
raise ValueError('software version not supported')
self.rstrj = rstraj
if rstraj is None:
self.regrid = False
self.NCol = None
self.NCha = None
self.Lin = None
self.Ave = None
self.Sli = None
self.Par = None
self.Eco = None
self.Phs = None
self.Rep = None
self.Set = None
self.Seg = None
self.Ida = None
self.Idb = None
self.Idc = None
self.Idd = None
self.Ide = None
self.centerCol = None
self.centerLin = None
self.centerPar = None
self.cutOff = None
self.coilSelect = None
self.ROoffcenter = None
self.timeSinceRF = None
self.IsReflected = None
self.scancounter = None
self.timestamp = None
self.pmutime = None
self.IsRawDataCorrect = None
self.slicePos = None
self.iceParam = None
self.freeParam = None
self.memPos = None
self.NLin = None
self.NPar = None
self.NSli = None
self.NAve = None
self.NPhs = None
self.NEco = None
self.NRep = None
self.NSet = None
self.NSeg = None
self.NIda = None
self.NIdb = None
self.NIdc = None
self.NIdd = None
self.NIde = None
self.skipLin = None
self.skipPar = None
self.full_size = None
# Flags
self.average_dim = np.full(16, False, dtype=bool)
self.average_dim[self.dataDims.index('Ave')] = self.doAverage
self.average_dim[self.dataDims.index('Rep')] = self.averageReps
self.average_dim[self.dataDims.index('Set')] = self.averageSets
self.average_dim[self.dataDims.index('Seg')] = self.ignoreSeg
if self.dType == 'image' or self.dType == 'phasestab':
self.skipToFirstLine = False
else:
self.skipToFirstLine = True
def __str__(self):
data_sz = np.array2string(self.fullSize, formatter={"float": lambda x: "%.0f" % x}, separator=",")
data_sz_sqz = np.array2string(self.sqzSize, formatter={"int": lambda x: "%i" % x}, separator=",")
des_str = ('***twix_map_obj***\n'
f'File: {self.fname}\n'
f'Software: {self.softwareVersion}\n'
f'Number of acquisitions read {self.NAcq}\n'
f'Data size is {data_sz}\n'
f'Squeezed data size is {data_sz_sqz} ({self.sqzDims})\n'
f'NCol = {self.NCol:0.0f}\n'
f'NCha = {self.NCha:0.0f}\n'
f'NLin = {self.NLin:0.0f}\n'
f'NAve = {self.NAve:0.0f}\n'
f'NSli = {self.NSli:0.0f}\n'
f'NPar = {self.NPar:0.0f}\n'
f'NEco = {self.NEco:0.0f}\n'
f'NPhs = {self.NPhs:0.0f}\n'
f'NRep = {self.NRep:0.0f}\n'
f'NSet = {self.NSet:0.0f}\n'
f'NSeg = {self.NSeg:0.0f}\n'
f'NIda = {self.NIda:0.0f}\n'
f'NIdb = {self.NIdb:0.0f}\n'
f'NIdc = {self.NIdc:0.0f}\n'
f'NIdd = {self.NIdd:0.0f}\n'
f'NIde = {self.NIde:0.0f}')
return des_str
def __repr__(self):
return str(self)
def readMDH(self, mdh, filePos, useScan):
# % extract all values in all MDHs at once
# %
# % data types:
# % Use double for everything non-logical, both ints and floats. Seems the
# % most robust way to avoid unexpected cast-issues with very nasty side effects.
# % Examples: eps(single(16777216)) == 2
# % uint32( 10 ) - uint32( 20 ) == 0
# % uint16(100) + 1e5 == 65535
# % size(1 : 10000 * uint16(1000)) == [1 65535]
# %
# % The 1st example always hits the timestamps.
self.NAcq = np.sum(useScan)
sLC = mdh.sLC.astype(float)
evalInfoMask1 = mdh.aulEvalInfoMask[useScan, 0]
# save mdh information for each line
self.NCol = mdh.ushSamplesInScan[useScan].astype(float)
self.NCha = mdh.ushUsedChannels[useScan].astype(float)
self.Lin = sLC[useScan, 0].astype(float)
self.Ave = sLC[useScan, 1].astype(float)
self.Sli = sLC[useScan, 2].astype(float)
self.Par = sLC[useScan, 3].astype(float)
self.Eco = sLC[useScan, 4].astype(float)
self.Phs = sLC[useScan, 5].astype(float)
self.Rep = sLC[useScan, 6].astype(float)
self.Set = sLC[useScan, 7].astype(float)
self.Seg = sLC[useScan, 8].astype(float)
self.Ida = sLC[useScan, 9].astype(float)
self.Idb = sLC[useScan, 10].astype(float)
self.Idc = sLC[useScan, 11].astype(float)
self.Idd = sLC[useScan, 12].astype(float)
self.Ide = sLC[useScan, 13].astype(float)
self.centerCol = mdh.ushKSpaceCentreColumn[useScan].astype(float)
self.centerLin = mdh.ushKSpaceCentreLineNo[useScan].astype(float)
self.centerPar = mdh.ushKSpaceCentrePartitionNo[useScan].astype(float)
self.cutOff = mdh.sCutOff[useScan].astype(float)
self.coilSelect = mdh.ushCoilSelect[useScan].astype(float)
self.ROoffcenter = mdh.fReadOutOffcentre[useScan].astype(float)
self.timeSinceRF = mdh.ulTimeSinceLastRF[useScan].astype(float)
self.IsReflected = np.minimum(evalInfoMask1 & 2 ** 24, 1).astype(bool)
self.scancounter = mdh.ulScanCounter[useScan].astype(float)
self.timestamp = mdh.ulTimeStamp[useScan].astype(float)
self.pmutime = mdh.ulPMUTimeStamp[useScan].astype(float)
self.IsRawDataCorrect = np.minimum(evalInfoMask1 & 2 ** 10, 1).astype(bool)
self.slicePos = mdh.SlicePos[useScan].astype(float)
self.iceParam = mdh.aushIceProgramPara[useScan].astype(float)
self.freeParam = mdh.aushFreePara[useScan].astype(float)
self.memPos = filePos[useScan]
def tryAndFixLastMdh(self):
isLastAcqGood = False
cnt = 0
while not isLastAcqGood and self.NAcq > 0 and cnt < 100:
try:
# self.clean()
self.unsorted(self.NAcq)
isLastAcqGood = True
except Exception:
logging.exception(f'An error occurred whilst trying to fix last MDH. NAcq = {self.NAcq:.0f}')
self.isBrokenFile = True
self.NAcq -= 1
cnt += 1
def clean(self):
# Cut mdh data to actual size. Maybe we rejected acquisitions at the end
# due to read errors.
if self.NAcq == 0:
return
# fields = ['NCol', 'NCha',
# 'Lin', 'Par', 'Sli', 'Ave', 'Phs', 'Eco', 'Rep',
# 'Set', 'Seg', 'Ida', 'Idb', 'Idc', 'Idd', 'Ide',
# 'centerCol', 'centerLin', 'centerPar', 'cutOff',
# 'coilSelect', 'ROoffcenter', 'timeSinceRF', 'IsReflected',
# 'scancounter', 'timestamp', 'pmutime', 'IsRawDataCorrect',
# 'slicePos', 'iceParam', 'freeParam', 'memPos']
# nack = self.NAcq
# idx = np.arange(0, nack - 1)
# for f in fields:
# curr = getattr(self, f)
# if curr.shape[0] > nack: # rarely
# print('Here')
# setattr(self, f, curr[idx]) # 1st dim: samples, 2nd dim acquisitions
self.NLin = np.max(self.Lin) + 1 # +1 so that size isn't 0
self.NPar = np.max(self.Par) + 1
self.NSli = np.max(self.Sli) + 1
self.NAve = np.max(self.Ave) + 1
self.NPhs = np.max(self.Phs) + 1
self.NEco = np.max(self.Eco) + 1
self.NRep = np.max(self.Rep) + 1
self.NSet = np.max(self.Set) + 1
self.NSeg = np.max(self.Seg) + 1
self.NIda = np.max(self.Ida) + 1
self.NIdb = np.max(self.Idb) + 1
self.NIdc = np.max(self.Idc) + 1
self.NIdd = np.max(self.Idd) + 1
self.NIde = np.max(self.Ide) + 1
# ok, let us assume for now that all NCol and NCha entries are
# the same for all mdhs:
# WTC not sure if this is a good idea - will keep the same as original for now
if self.NCol.ndim > 0:
self.NCol = self.NCol[0]
if self.NCha.ndim > 0:
self.NCha = self.NCha[0]
if self.dType == 'refscan':
# pehses: check for lines with 'negative' line/partition numbers
# this can happen when the reference scan line/partition range
# exceeds the one of the actual imaging scan
if self.NLin > 65500: # uint overflow check
self.Lin = np.mod(self.Lin + (65536 - np.min(self.Lin[self.Lin > 65500])), 65536)
self.NLin = np.max(self.Lin)
if self.NPar > 65500: # %uint overflow check
self.Par = np.mod(self.Par + (65536 - np.min(self.Par[self.Par > 65500])), 65536)
self.NPar = np.max(self.Par)
# to reduce the matrix sizes of non-image scans, the size
# of the refscan_obj()-matrix is reduced to the area of the
# actually scanned acs lines (the outer part of k-space
# that is not scanned is not filled with zeros)
# this behaviour is controlled by flagSkipToFirstLine which is
# set to true by default for everything but image scans
# both used to have a -1 but WTC thinks that in python they won't be needed
if not self.skipToFirstLine:
self.skipLin = 0
self.skipPar = 0
else:
self.skipLin = np.min(self.Lin) # -1
self.skipPar = np.min(self.Par) # -1
NLinAlloc = np.maximum(1, self.NLin - self.skipLin)
NParAlloc = np.maximum(1, self.NPar - self.skipPar)
self.full_size = np.array(
[self.NCol, self.NCha, NLinAlloc, NParAlloc,
self.NSli, self.NAve, self.NPhs, self.NEco,
self.NRep, self.NSet, self.NSeg, self.NIda,
self.NIdb, self.NIdc, self.NIdd, self.NIde],
dtype=int)
nByte = self.NCha * (self.freadInfo.szChannelHeader + 8 * self.NCol)
# size for fread
self.freadInfo.sz = np.array([2, nByte / 8])
# reshape size
self.freadInfo.shape = np.array([self.NCol + self.freadInfo.szChannelHeader / 8, self.NCha])
# we need to cut MDHs from fread data
self.freadInfo.cut = self.freadInfo.szChannelHeader / 8 + np.arange(self.NCol)
def calcRange(self, S):
self.clean()
selRange = [np.zeros(1, dtype=int)] * self.dataSize.size
outSize = np.ones(self.dataSize.shape, dtype=int)
bSqueeze = self.squeeze
if S is None or S is slice(None, None, None):
# shortcut to select all data
for k in range(0, self.dataSize.size):
selRange[k] = np.arange(0, self.dataSize[k]).astype(int)
if not bSqueeze:
outSize = self.dataSize.astype(int)
else:
outSize = self.sqzSize
else:
# import pdb; pdb.set_trace()
for k, s in enumerate(S):
if not bSqueeze:
cDim = k # nothing to do
else:
# we need to rearrange selRange from squeezed
# to original order
for i, x in enumerate(self.dataDims):
if x == self.sqzDims[k]:
cDim = i
if s == slice(None, None, None):
if k < (len(S) - 1):
selRange[cDim] = np.arange(0, self.dataSize[cDim]).astype(int)
else: # all later dimensions selected and 'vectorized'!
for ll in range(cDim, self.dataSize.size):
selRange[ll] = np.arange(0, self.dataSize[ll]).astype(int)
outSize[k] = np.prod(self.dataSize[cDim:])
break
elif isinstance(s, slice):
tmpTuple = s.indices(self.dataSize[cDim].astype(int))
selRange[cDim] = np.arange(tmpTuple[0], tmpTuple[1], tmpTuple[2])
else: # numeric
selRange[cDim] = np.array([s])
outSize[k] = selRange[cDim].size
for r, s in zip(selRange, self.dataSize):
if np.max(r) > s:
raise Exception('selection out of range')
# To implement indexing
selRangeSz = np.ones(self.dataSize.shape, dtype=int)
for idx, k in enumerate(selRange):
selRangeSz[idx] = k.size
# now select all indices for the dims that are averaged
for iDx, k in enumerate(np.nditer(self.average_dim)):
if k:
self.clean()
selRange[iDx] = np.arange(0, self.fullSize[iDx])
return selRange, selRangeSz, outSize
def calcIndices(self):
# calculate indices to target & source(raw)
LinIx = self.Lin - self.skipLin
ParIx = self.Par - self.skipPar
sz = self.fullSize[2:]
ixToTarget = np.zeros(LinIx.size, dtype=int)
for i, _ in enumerate(ixToTarget):
ixToTarget[i] = np.ravel_multi_index((LinIx[i].astype(int), ParIx[i].astype(int), self.Sli[i].astype(int),
self.Ave[i].astype(int), self.Phs[i].astype(int),
self.Eco[i].astype(int), self.Rep[i].astype(int),
self.Set[i].astype(int), self.Seg[i].astype(int),
self.Ida[i].astype(int), self.Idb[i].astype(int),
self.Idc[i].astype(int), self.Idd[i].astype(int),
self.Ide[i].astype(int)), dims=sz.astype(int), order='C')
# now calc. inverse index (page table: virtual to physical addresses)
# indices of lines that are not measured are zero
ixToRaw = np.full(self.fullSize[2:].prod().astype(int), np.nan, dtype=int)
for i, itt in enumerate(ixToTarget):
ixToRaw[itt] = i
return ixToRaw, ixToTarget
def unsorted(self, ival=None):
# returns the unsorted data [NCol,NCha,#samples in acq. order]
if ival:
mem = np.atleast_1d(self.memPos[ival - 1])
else:
mem = self.memPos
out = self.readData(mem)
return out
# Replicate matlab subscripting
# Overloads []
def __getitem__(self, key=None):
# print(f'In [], key is {key}.')
# import pdb; pdb.set_trace()
if isinstance(key, slice): # Handle single input e.g. [:]
key = (key,) # make an iterable for calcRange
elif key == '':
key = None
selRange, selRangeSz, outSize = self.calcRange(key) # True for squeezed data
# calculate page table (virtual to physical addresses)
# this is now done every time, i.e. result is no longer saved in
# a property - slower but safer (and easier to keep track of updates)
ixToRaw, _ = self.calcIndices()
tmp = np.arange(0, self.fullSize[2:].prod().astype(int)).reshape(self.fullSize[2:].astype(int))
# tmpSelRange = [x-1 for x in selRange] # python indexing from 0
for i, ids in enumerate(selRange[2:]):
tmp = np.take(tmp, ids.astype(int), i)
# tmp = tmp[tuple(selRange[2:])]
ixToRaw = ixToRaw[tmp]
ixToRaw = ixToRaw.ravel()
# delete all entries that point to zero (the "NULL"-pointer)
# notAcquired = np.isnan(ixToRaw)
ixToRaw = ixToRaw[np.where(ixToRaw > -1)[0]]
# import pdb; pdb.set_trace()
# broken ixToRaw = np.delete(ixToRaw, ~notAcquired)# Why do I have to negate bool here? Not clear
# maar = np.ma.MaskedArray(ixToRaw, mask=~notAcquired)
# ixToRaw = maar.compressed()
# calculate ixToTarg for possibly smaller, shifted + segmented
# target matrix:
cIx = np.zeros((14, ixToRaw.size), dtype=int)
if ~self.average_dim[2]:
cIx[0, :] = self.Lin[ixToRaw] - self.skipLin
if ~self.average_dim[3]:
cIx[1, :] = self.Par[ixToRaw] - self.skipPar
if ~self.average_dim[4]:
cIx[2, :] = self.Sli[ixToRaw]
if ~self.average_dim[5]:
cIx[3, :] = self.Ave[ixToRaw]
if ~self.average_dim[6]:
cIx[4, :] = self.Phs[ixToRaw]
if ~self.average_dim[7]:
cIx[5, :] = self.Eco[ixToRaw]
if ~self.average_dim[8]:
cIx[6, :] = self.Rep[ixToRaw]
if ~self.average_dim[9]:
cIx[7, :] = self.Set[ixToRaw]
if ~self.average_dim[10]:
cIx[8, :] = self.Seg[ixToRaw]
if ~self.average_dim[11]:
cIx[9, :] = self.Ida[ixToRaw]
if ~self.average_dim[12]:
cIx[10, :] = self.Idb[ixToRaw]
if ~self.average_dim[13]:
cIx[11, :] = self.Idc[ixToRaw]
if ~self.average_dim[14]:
cIx[12, :] = self.Idd[ixToRaw]
if ~self.average_dim[15]:
cIx[13, :] = self.Ide[ixToRaw]
# import pdb; pdb.set_trace()
# make sure that indices fit inside selection range
for k in range(2, len(selRange)):
tmp = cIx[k - 2, :]
for ll in range(0, selRange[k].size):
cIx[k - 2, tmp == selRange[k][ll]] = ll
sz = selRangeSz[2:]
ixToTarg = np.zeros(cIx.shape[1], dtype=int) # pylint: disable=E1136 # pylint/issues/3139
for i, _ in enumerate(ixToTarg):
ixToTarg[i] = np.ravel_multi_index((cIx[0, i].astype(int), cIx[1, i].astype(int), cIx[2, i].astype(int),
cIx[3, i].astype(int), cIx[4, i].astype(int), cIx[5, i].astype(int),
cIx[6, i].astype(int), cIx[7, i].astype(int), cIx[8, i].astype(int),
cIx[9, i].astype(int), cIx[10, i].astype(int), cIx[11, i].astype(int),
cIx[12, i].astype(int), cIx[13, i].astype(int)), dims=sz.astype(int),
order='C')
mem = self.memPos[ixToRaw]
# sort mem for quicker access, sort cIxToTarg/Raw accordingly
ix = np.argsort(mem)
mem = mem[ix]
ixToTarg = ixToTarg[ix]
ixToRaw = ixToRaw[ix]
# import pdb; pdb.set_trace()
out = self.readData(mem, ixToTarg, ixToRaw, selRange, selRangeSz, outSize)
return out
@staticmethod
def cast2MinimalUint(N):
Nmax = np.max(N)
Nmin = np.min(N)
if (Nmin < 0) or (Nmax > np.iinfo(np.uint64).max):
return N
if Nmax > np.iinfo(np.uint32).max:
idxClass = np.uint64
elif Nmax > np.iinfo(np.uint16).max:
idxClass = np.uint32
else:
idxClass = np.uint16
return N.astype(idxClass)
def _fileopen(self):
fid = open(self.fname, 'rb')
return fid
def readData(self, mem, cIxToTarg=None, cIxToRaw=None, selRange=None, selRangeSz=None, outSize=None):
mem = mem.astype(int)
if outSize is None:
if selRange is None:
selRange = [np.arange(0, self.dataSize[0]).astype(int),
np.arange(0, self.dataSize[1]).astype(int)]
else:
selRange[0] = np.arange(0, self.dataSize[0]).astype(int)
selRange[1] = np.arange(0, self.dataSize[0]).astype(int)
outSize = np.concatenate((self.dataSize[0:2], mem.shape)).astype(int)
selRangeSz = outSize
cIxToTarg = np.arange(0, selRangeSz[2])
cIxToRaw = cIxToTarg
# else:
# if np.array_equiv(selRange[0],np.arange(0,self.dataSize()[0]).astype(int)):
# selRange[0] = slice(None,None,None)
# if np.array_equiv(selRange[1],np.arange(0,self.dataSize()[1]).astype(int)):
# selRange[1] = slice(None,None,None)
out = np.zeros(outSize, dtype=np.csingle)
out = out.reshape((selRangeSz[0], selRangeSz[1], -1))
cIxToTarg = twix_map_obj.cast2MinimalUint(cIxToTarg) # Possibly not needed
# These parameters were copied for speed in matlab, but just duplicate to keep code similar in python
szScanHeader = self.freadInfo.szScanHeader
readSize = self.freadInfo.sz.astype(int)
readShape = self.freadInfo.shape.astype(int)
readCut = self.freadInfo.cut.astype(int)
keepOS = np.concatenate([list(range(int(self.NCol / 4))), list(range(int(self.NCol * 3 / 4), int(self.NCol)))])
bIsReflected = self.IsReflected[cIxToRaw]
bRegrid = self.regrid and self.rstrj.size > 1
# slicedata = self.slicePos[cIxToRaw, :]
ro_shift = self.ROoffcenter[cIxToRaw] * int(not self.ignoreROoffcenter)
# %SRY store information about raw data correction
# bDoRawDataCorrect = this.arg.doRawDataCorrect;
# bIsRawDataCorrect = this.IsRawDataCorrect( cIxToRaw );
isBrokenRead = False
# if (bDoRawDataCorrect)
# rawDataCorrect = this.arg.rawDataCorrectionFactors;
# end
"""
% MiVö: Raw data are read line-by-line in portions of 2xNColxNCha float32 points (2 for complex).
% Computing and sorting(!) on these small portions is quite expensive, esp. when
% it employs non-sequential memory paths. Examples are non-linear k-space acquisition
% or reflected lines.
% This can be sped up if slightly larger blocks of raw data are collected, first.
% Whenever a block is full, we do all those operations and save it in the final "out" array.
% What's a good block size? Depends on data size and machine (probably L2/L3/L4 cache sizes).
% So...? Start with a small block, measure the time-per-line and double block size until
% a minimum is found. Seems sufficiently robust to end up in a close-to-optimal size for every
% machine and data.
"""
blockSz = 2 # size of blocks; must be 2^n; will be increased
doLockblockSz = False # whether blockSZ should be left untouched
tprev = float('inf') # previous time-per-line
blockCtr = 0
blockInit = np.full((readShape[0], readShape[1], blockSz), -np.inf, dtype=np.csingle) # init with garbage
block = copy.deepcopy(blockInit)
sz = 0
if bRegrid:
v1 = np.array(range(1, selRangeSz[1] * blockSz + 1))
rsTrj = [self.rstrj, v1]
trgTrj = np.linspace(np.min(self.rstrj), np.max(self.rstrj), int(self.NCol))
trgTrj = [trgTrj, v1]
# counter for proper scaling of averages/segments
count_ave = np.zeros((1, 1, out.shape[2]), np.single) # pylint: disable=E1136 # pylint/issues/3139
kMax = mem.size # max loop index
fid = self._fileopen()
for k in trange(kMax, desc='read data', leave=False): # could loop over mem, but keep it similar to matlab
# skip scan header
fid.seek(mem[k] + szScanHeader, 0)
raw = np.fromfile(fid, dtype=np.float32, count=readSize.prod()).reshape(
(readSize[1], readSize[0])) # do transpose by switching readSize order
# With incomplete files fread() returns less than readSize points. The subsequent reshape will
# therefore error out. We could check if numel(raw) == prod(readSize), but people recommend
# exception handling for performance reasons. Do it.
try:
raw = (raw[:, 0] + 1j * raw[:, 1]).reshape(readShape, order='F')
except ValueError:
offset_bytes = mem[k] + szScanHeader
# remainingSz = readSize(2) - size(raw,1);
import warnings
warnstring =\
'An unexpected read error occurred at this byte offset:'\
f' {offset_bytes} ({offset_bytes / 1024 ** 3} GiB).'\
f'\nActual read size is [{raw.shape}], desired size was: [{readSize}].\n'\
'Will ignore this line and stop reading.\n'
warnings.warn(warnstring)
# Reject this data fragment. To do so, init with the values of blockInit
raw[0:readShape.prod()] = blockInit[0]
raw = raw.reshape(readShape)
isBrokenRead = True # remember it and bail out later
block[:, :, blockCtr, None] = copy.deepcopy(raw).reshape(np.append(readShape, 1))
# fast serial storage in a cache array - this is probably all very dependent on whether I've got things
# contiguous in memory. I highly doubt that I have on this first pass. WTC
blockCtr += 1
# Do expensive computations and reorderings on the gathered block.
# Unfortunately, a lot of code is necessary, but that is executed much less
# frequent, so its worthwhile for speed.
# TODO: Do *everything* block-by-block
if (blockCtr == blockSz) or (k == kMax - 1) or (isBrokenRead & blockCtr > 1):
# measure the time to process a block of data
tic = time.perf_counter()
# remove MDH data from block:
block = block[readCut, :, :]
ix = np.arange(1 + k - blockCtr, k + 1, dtype=int) # +1 so that it goes to k
if blockCtr != blockSz:
block = block[:, :, 0:blockCtr]
# if bDoRawDataCorrect && bIsRawDataCorrect(k): WTC: not implemented yet
# reflect fids when necessary
isRefl = np.where(bIsReflected[ix])[0]
block[:, :, isRefl] = block[-1::-1, :, isRefl]
if bRegrid:
# correct for readout shifts
# the nco frequency is always scaled to the max. gradient amp and does account for ramp-sampling
deltak = np.max(np.abs(np.diff(rsTrj[0])))
fovshift = np.reshape(ro_shift[ix], (1, 1, len(ix)), order='F')
adcphase = deltak * (np.array(range(int(self.NCol)))[..., np.newaxis, np.newaxis] * fovshift)
fovphase = fovshift * rsTrj[0][..., np.newaxis, np.newaxis]
phase_factor = np.exp(1j * 2 * np.pi * (adcphase - fovphase))
block *= phase_factor
if block.shape != sz:
# update grid
sz = block.shape
ninterp = np.prod(sz[1:])
if ninterp == 1:
blockdims = [0]
else:
blockdims = [0, 1]
rsTrj[1] = np.array(range(ninterp)) + 1
src_grid = tuple(rsTrj[dim] for dim in blockdims)
# regrid the data
trgTrj[1] = rsTrj[1]
trg_grid = tuple(trgTrj[dim] for dim in blockdims)
z = np.reshape(block, (sz[0], -1), order='F')
# TODO: handle 1d regridding
yi, xi = np.meshgrid(trg_grid[1], trg_grid[0], sparse=True)
# NOTE: there is some minor differences in regridding precision between python and matlab, don't
# expect the same result from regridding
# Perform the interpolation with the given values
block = np.reshape(complex_interp(src_grid, xi, yi, z), sz,
order='F')
if self.removeOS:
block = np.fft.fft(
np.fft.ifft(block, axis=0)[keepOS, ...], axis=0)
# TODO: average across 1st and 2nd dims
# import pdb; pdb.set_trace()
# WTC whilst still using slices rather than just arrays.
# if (not isinstance(selRange[0],slice)) or (not isinstance(selRange[1],slice)):
# if isinstance(selRange[0],slice) and (selRange[0]==slice(None,None,None)):
# cur1stDim = block.shape[0]
# else:
# cur1stDim = selRange[0].size
# if isinstance(selRange[1],slice) and (selRange[1]==slice(None,None,None)):
# cur2ndDim = block.shape[1]
# else:
# cur2ndDim = selRange[1].size
cur1stDim = selRange[0].size
cur2ndDim = selRange[1].size
cur3rdDim = block.shape[2]
block = block[selRange[0][:, np.newaxis], selRange[1][np.newaxis, :], :]\
.reshape((cur1stDim, cur2ndDim, cur3rdDim))
toSort = cIxToTarg[ix]
II = np.argsort(toSort)
sortIdx = toSort[II]
block = block[:, :, II] # reorder according to sorted target indices
# Mark duplicate indices with 1; we'll have to treat them special for proper averaging
# Bonus: The very first storage can be made much faster, because it's in-place.
isDupe = np.concatenate((np.array([False]), np.diff(sortIdx) == 0))
idx1 = sortIdx[~isDupe] # acquired once in this block
idxN = sortIdx[isDupe] # acquired multiple times
count_ave[:, :, idx1] += 1
if idxN.size == 0:
# no duplicates
if (count_ave[:, :, idx1] == 1).all(): # first acquisition of this line
out[:, :, idx1] = block # fast
else:
out[:, :, idx1] = out[:, :, idx1] + block # slow
else:
out[:, :, idx1] = out[:, :, idx1] + block[:, :, ~isDupe] # slower
block = block[:, :, isDupe]
for n in range(0, idxN.size):
out[:, :, idxN[n]] = out[:, :, idxN[n]] + block[:, :, n] # snail :-)
count_ave[:, :, idxN[n]] += 1
# At the first few iterations, evaluate the spent time-per-line and decide
# what to do with the block size.
if not doLockblockSz:
toc = time.perf_counter()
t = 1e6 * (toc - tic) / blockSz # micro seconds
if t <= 1.1 * tprev: # allow 10% inaccuracy. Usually bigger == better
# New block size was faster. Go a step further.
blockSz = blockSz * 2
blockInit = np.concatenate((blockInit, blockInit), axis=2)
else:
# regression; reset size and lock it
blockSz = np.maximum(blockSz / 2, 1).astype(int)
blockInit = blockInit[:, :, :blockSz]
doLockblockSz = True
tprev = t
blockCtr = 0
block = blockInit # reset to garbage
if isBrokenRead:
self.isBrokenFile = True
break
fid.close()
# proper scaling (we don't want to sum our data but average it)
# For large "out" bsxfun(@rdivide,out,count_ave) is incredibly faster than
# bsxfun(@times,out,count_ave)!
# @rdivide is also running in parallel, while @times is not. :-/
if (count_ave > 1).any():
count_ave = np.maximum(1, count_ave)
out /= count_ave
out = np.ascontiguousarray(out.reshape(outSize))
return out if not self.squeeze else np.squeeze(out)
"""
ATH added August 2021
alter a loop counter, for example if its missing in the mdh, add it in here, or alter it
the length of the new loop must be = self.NAcq
fixLoopCounter('Ave',newLlistAve)
"""
def fixLoopCounter(self, loop, newLoop):
if newLoop.shape[0] != self.NAcq:
print('length of new array must equal NAcq: ' + str(self.NAcq))
self.__dict__[loop] = newLoop
N = max(newLoop)
self.__dict__['N' + loop] = N
self.fullSize[self.dataDims.index(loop)] = N
|
py | b40f59e2814f0081a10d055d5df94ceea9788133 | '''
Created by auto_sdk on 2020.10.13
'''
from dingtalk.api.base import RestApi
class OapiV2DepartmentGetRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.dept_id = None
self.language = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.v2.department.get'
|
py | b40f5a50349bf4bff00ed4317814db1c6abb9774 | import pytest
import mmcv
def test_registry():
CATS = mmcv.Registry('cat')
assert CATS.name == 'cat'
assert CATS.module_dict == {}
assert len(CATS) == 0
@CATS.register_module()
class BritishShorthair:
pass
assert len(CATS) == 1
assert CATS.get('BritishShorthair') is BritishShorthair
class Munchkin:
pass
CATS.register_module(Munchkin)
assert len(CATS) == 2
assert CATS.get('Munchkin') is Munchkin
assert 'Munchkin' in CATS
with pytest.raises(KeyError):
CATS.register_module(Munchkin)
CATS.register_module(Munchkin, force=True)
assert len(CATS) == 2
# force=False
with pytest.raises(KeyError):
@CATS.register_module()
class BritishShorthair:
pass
@CATS.register_module(force=True)
class BritishShorthair:
pass
assert len(CATS) == 2
assert CATS.get('PersianCat') is None
assert 'PersianCat' not in CATS
@CATS.register_module(name='Siamese')
class SiameseCat:
pass
assert CATS.get('Siamese').__name__ == 'SiameseCat'
class SphynxCat:
pass
CATS.register_module(name='Sphynx', module=SphynxCat)
assert CATS.get('Sphynx') is SphynxCat
repr_str = 'Registry(name=cat, items={'
repr_str += ("'BritishShorthair': <class 'test_registry.test_registry."
"<locals>.BritishShorthair'>, ")
repr_str += ("'Munchkin': <class 'test_registry.test_registry."
"<locals>.Munchkin'>, ")
repr_str += ("'Siamese': <class 'test_registry.test_registry."
"<locals>.SiameseCat'>, ")
repr_str += ("'Sphynx': <class 'test_registry.test_registry."
"<locals>.SphynxCat'>")
repr_str += '})'
assert repr(CATS) == repr_str
# the registered module should be a class
with pytest.raises(TypeError):
CATS.register_module(0)
# can only decorate a class
with pytest.raises(TypeError):
@CATS.register_module()
def some_method():
pass
# begin: test old APIs
with pytest.warns(DeprecationWarning):
CATS.register_module(SphynxCat)
assert CATS.get('SphynxCat').__name__ == 'SphynxCat'
with pytest.warns(DeprecationWarning):
CATS.register_module(SphynxCat, force=True)
assert CATS.get('SphynxCat').__name__ == 'SphynxCat'
with pytest.warns(DeprecationWarning):
@CATS.register_module
class NewCat:
pass
assert CATS.get('NewCat').__name__ == 'NewCat'
with pytest.warns(DeprecationWarning):
CATS.deprecated_register_module(SphynxCat, force=True)
assert CATS.get('SphynxCat').__name__ == 'SphynxCat'
with pytest.warns(DeprecationWarning):
@CATS.deprecated_register_module
class CuteCat:
pass
assert CATS.get('CuteCat').__name__ == 'CuteCat'
with pytest.warns(DeprecationWarning):
@CATS.deprecated_register_module(force=True)
class NewCat2:
pass
assert CATS.get('NewCat2').__name__ == 'NewCat2'
# end: test old APIs
def test_build_from_cfg():
BACKBONES = mmcv.Registry('backbone')
@BACKBONES.register_module()
class ResNet:
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
@BACKBONES.register_module()
class ResNeXt:
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 4
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args={'stages': 3})
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 3
cfg = dict(type='ResNeXt', depth=50, stages=3)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNeXt)
assert model.depth == 50 and model.stages == 3
cfg = dict(type=ResNet, depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 4
# not a registry
with pytest.raises(TypeError):
cfg = dict(type='VGG')
model = mmcv.build_from_cfg(cfg, 'BACKBONES')
# non-registered class
with pytest.raises(KeyError):
cfg = dict(type='VGG')
model = mmcv.build_from_cfg(cfg, BACKBONES)
# default_args must be a dict or None
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=1)
# cfg['type'] should be a str or class
with pytest.raises(TypeError):
cfg = dict(type=1000)
model = mmcv.build_from_cfg(cfg, BACKBONES)
# cfg should contain the key "type"
with pytest.raises(KeyError):
cfg = dict(depth=50, stages=4)
model = mmcv.build_from_cfg(cfg, BACKBONES)
# incorrect registry type
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, 'BACKBONES')
# incorrect default_args type
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=0)
|
py | b40f5ab3ceb75924f6f6d1985998964dbdb33752 | # import kuber
#
# bundle = kuber.create_bundle('pre')
# bundle.add_from_yaml(
# """
# apiVersion: core/v1
# kind: Pod
# metadata:
# name: test
# """
# )
# p = bundle.get('test')
# print(p)
# print(p.to_yaml())
#
# d = bundle.add('apps/v1', 'Deployment', 'dep').get('dep')
# print(d)
# print(d.to_yaml())
import typing
import kuber
from kuber.latest import core_v1
p = typing.cast(
core_v1.Pod,
kuber.new_resource(
api_version="v1", kind="Pod", name="my-pod", kubernetes_version="1.15"
),
)
p.spec.append_container(
name="c",
image="swernst/cauldron:current-standard",
ports=[core_v1.ContainerPort(host_port=5010, container_port=5010)],
)
print(p.to_yaml())
|
py | b40f5c2fc1d703b51ccf14605a2288d616148615 | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
from legate import pandas as lp
start = pd.to_datetime("2015-01-01")
end = pd.to_datetime("2020-01-01")
start_u = start.value // 10 ** 9
end_u = end.value // 10 ** 9
n = 30
s = pd.Series(
10 ** 9 * np.random.randint(start_u, end_u, n, dtype=np.int64),
dtype=np.dtype("datetime64[ns]"),
)
for i in range(n):
if i % 3 == 0:
s[i] = pd.NaT
ls = lp.Series(s)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
for field in fields:
print("Testing " + field)
out_s = getattr(s.dt, field).fillna(0.0)
out_ls = getattr(ls.dt, field).fillna(0).astype(np.double)
assert out_ls.equals(out_s)
|
py | b40f5f75dc827c905f83b7e8e29bbbddf6196145 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RACE Benchmark corpora."""
__all__ = ['RACEH', 'RACEM']
import collections
import fnmatch
import json
import os
import re
from gluonnlp.data.registry import register
from gluonnlp.base import get_home_dir
from mxnet.gluon.data import SimpleDataset
LETTER_TO_IDX = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4
}
def jaccard_similarity(x, y):
s1, s2 = set(x), set(y)
return len(s1.intersection(s2)) / len(s1.union(s2))
def RACEExpansion(line, is_test=False):
""" Each line is comprised of a dictionary with multiple-choice answers, like MultiRC
"""
expanded_lines = []
passage = line["article"]
options = line["options"]
questions = line["questions"]
answers = ["" for _ in options] if is_test else line["answers"]
for (question, ans, opts) in zip(questions, answers, options):
passage_ = passage + " " + question
opt_ = " </sep> ".join([o for o in opts])
if not is_test:
expanded_lines.append([passage_, opt_, str(LETTER_TO_IDX[ans])])
else:
expanded_lines.append([passage_, opt_])
return expanded_lines
class _TextDataset(SimpleDataset):
"""A dataset wrapping over multiple .txt files, each line is a json object.
Specific for RACE, to work with gluon==0.8.3
Parameters
----------
filename : str
Path to the .txt files.
"""
def __init__(self, filenames, segment):
if not isinstance(filenames, (tuple, list)):
filenames = (filenames, )
self._filenames = [os.path.expanduser(f) for f in filenames]
self._filenames.sort()
self._segment = segment
self._is_test = self._segment == "test"
super(_TextDataset, self).__init__(self._read())
def _read(self):
all_samples = []
for filename in self._filenames:
samples = []
with open(filename, 'r') as fin:
for line in fin.readlines():
line_dic = json.loads(
line, object_pairs_hook=collections.OrderedDict)
for l in RACEExpansion(line_dic, is_test=self._is_test):
samples.append(l)
samples = self._read_samples(samples)
all_samples += samples
return all_samples
def _read_samples(self, samples):
raise NotImplementedError
class _RACEDataset(_TextDataset):
def __init__(self, root, segment, task):
root = os.path.expanduser(root)
if not os.path.isdir(root):
os.makedirs(root)
self._root = root
self._segment = segment
file_path = os.path.join(self._root, segment, task)
filenames = [os.path.join(file_path, f) for f in os.listdir(
file_path) if fnmatch.fnmatch(f, '*.txt')]
super(_RACEDataset, self).__init__(filenames, segment=self._segment)
def _repo_dir(self):
raise NotImplementedError
@register(segment=['train', 'dev', 'test'])
class RACEH(_RACEDataset):
"""The RACE: Large-scale ReAding Comprehension Dataset From Examinations dataset,
from https://arxiv.org/pdf/1704.04683.pdf. Dataset is available upon request to the authors.
This is the class corresponding to the highschool version.
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default "$MXNET_HOME/datasets/race"
Path to folder where the datasets are.
MXNET_HOME defaults to '~/.mxnet'.
"""
def __init__(self, segment='train', root=None):
self._segment = segment
if root is None:
root = os.path.join(get_home_dir(), 'datasets', 'race')
super(RACEH, self).__init__(root, segment, "high")
def _read_samples(self, samples):
return samples
@register(segment=['train', 'dev', 'test'])
class RACEM(_RACEDataset):
"""The RACE: Large-scale ReAding Comprehension Dataset From Examinations dataset,
from https://arxiv.org/pdf/1704.04683.pdf. Dataset is available upon request to the authors.
This is the class corresponding to the middle school version.
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default "$MXNET_HOME/datasets/race"
Path to folder where the datasets are.
MXNET_HOME defaults to '~/.mxnet'.
"""
def __init__(self, segment='train', root=None):
self._segment = segment
if root is None:
root = os.path.join(get_home_dir(), 'datasets', 'race')
super(RACEM, self).__init__(root, segment, "middle")
def _read_samples(self, samples):
return samples
|
py | b40f5feb820d694bee13f68c296ef7685bbc0200 | import numpy as np
from qiskit import IBMQ
import argparse
parser = argparse.ArgumentParser(description='Enqueue QCMRF experiments on qiskit runtime systems.')
parser.add_argument('backend', metavar='backend', type=str, nargs=1, help='Qiskit backend.')
args = parser.parse_args()
options = {
'backend_name': args.backend[0]
}
runtime_inputs = {
#"graphs": [[[0]],[[0,1]],[[0,1],[1,2]],[[0,1],[1,2],[2,3]],[[0,1],[1,2],[2,3],[0,3]],[[0,1,2,3]]],
"graphs": [[[0]],[[0,1]],[[0,1],[1,2]],[[0,1],[1,2],[2,3]]],
"thetas": None,
"gammas": None,
"betas": None,
"repetitions": 10,
"shots": 32000,
"layout": None,
"measurement_error_mitigation": True,
"optimization_level": 3
}
IBMQ.load_account()
provider = IBMQ.get_provider(
hub='ibm-q-fraunhofer',
group='fhg-all',
project='ticket'
)
job = provider.runtime.run(
program_id='qcmrf-P8LxjPxQb8',
options=options,
inputs=runtime_inputs
)
# Job id
print(job.job_id())
# See job status
print(job.status())
# Get results
result = job.result()
|
py | b40f60253fbb1f814761730c14d592bce632a700 | """
Copyright (c) 2016-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import functools
from configparser import ConfigParser
from io import StringIO
from textwrap import dedent
import re
import os
from atomic_reactor.dirs import BuildDir
from atomic_reactor.constants import (DEFAULT_DOWNLOAD_BLOCK_SIZE, PLUGIN_ADD_FILESYSTEM_KEY,
PLUGIN_RESOLVE_COMPOSES_KEY)
from atomic_reactor.config import get_koji_session
from atomic_reactor.plugin import PreBuildPlugin, BuildCanceledException
from atomic_reactor.utils.koji import TaskWatcher, stream_task_output
from atomic_reactor.utils.yum import YumRepo
from atomic_reactor.util import get_platforms, base_image_is_custom, map_to_user_params
from atomic_reactor.metadata import label_map
from atomic_reactor import util
from osbs.utils import Labels
@label_map('filesystem-koji-task-id')
class AddFilesystemPlugin(PreBuildPlugin):
"""
Creates a base image by using a filesystem generated through Koji
Submits an image build task to Koji based on image build
configuration file to create the filesystem to be used in
creating the base image:
https://docs.pagure.org/koji/image_build/
Once image build task is complete the tarball is downloaded.
The existing FROM instruction value is replaced with a
FROM scratch and ADD <filesystem> <to_image> for Dockerfile
of each platform.
The "FROM" instruction should be in the following format:
FROM koji/image-build[:image-build-conf]
Where image-build-conf is the file name of the image build
configuration to be used. If omitted, image-build.conf is used.
This file is expected to be in the same folder as the Dockerfile.
Runs as a pre build plugin in order to properly adjust base image.
"""
key = PLUGIN_ADD_FILESYSTEM_KEY
is_allowed_to_fail = False
DEFAULT_IMAGE_BUILD_CONF = dedent('''\
[image-build]
name = default-name
arches = x86_64
format = docker
disk_size = 10
target = {target}
install_tree = {install_tree}
repo = {repo}
ksurl = {ksurl}
kickstart = kickstart.ks
[factory-parameters]
create_docker_metadata = False
''')
args_from_user_params = map_to_user_params(
"repos:yum_repourls",
"koji_target",
)
def __init__(self, workflow, poll_interval=5, blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE,
repos=None, koji_target=None):
"""
:param workflow: DockerBuildWorkflow instance
:param poll_interval: int, seconds between polling Koji while waiting
for task completion
:param blocksize: int, chunk size for downloading files from koji
:param repos: list<str>: list of yum repo URLs to be used during
base filesystem creation. First value will also
be used as install_tree. Only baseurl value is used
from each repo file.
:param koji_target: str, koji target name
"""
# call parent constructor
super(AddFilesystemPlugin, self).__init__(workflow)
self.poll_interval = poll_interval
self.blocksize = blocksize
self.repos = repos or []
self.architectures = get_platforms(self.workflow)
self.scratch = util.is_scratch_build(self.workflow)
self.koji_target = koji_target
self.session = None
def is_image_build_type(self, base_image):
return base_image.strip().lower() == 'koji/image-build'
def extract_base_url(self, repo_url):
yum_repo = YumRepo(repo_url)
yum_repo.fetch()
if not yum_repo.is_valid():
return []
repo = yum_repo.config
return [repo.get(section, 'baseurl') for section in repo.sections()
if repo.has_option(section, 'baseurl')]
def get_default_image_build_conf(self):
"""Create a default image build config
:rtype: ConfigParser
:return: Initialized config with defaults
"""
target = self.koji_target
vcs_info = self.workflow.source.get_vcs_info()
ksurl = '{}#{}'.format(vcs_info.vcs_url, vcs_info.vcs_ref)
base_urls = []
for repo in self.repos:
for url in self.extract_base_url(repo):
# Imagefactory only supports $arch variable.
url = url.replace('$basearch', '$arch')
base_urls.append(url)
install_tree = base_urls[0] if base_urls else ''
repo = ','.join(base_urls)
kwargs = {
'target': target,
'ksurl': ksurl,
'install_tree': install_tree,
'repo': repo,
}
config_fp = StringIO(self.DEFAULT_IMAGE_BUILD_CONF.format(**kwargs))
config = ConfigParser()
config.read_file(config_fp)
self.update_config_from_dockerfile(config)
return config
def update_config_from_dockerfile(self, config):
"""Updates build config with values from the Dockerfile
Updates:
* set "name" from LABEL com.redhat.component (if exists)
* set "version" from LABEL version (if exists)
:param config: ConfigParser object
"""
labels = Labels(self.workflow.build_dir.any_platform.dockerfile.labels)
for config_key, label in (
('name', Labels.LABEL_TYPE_COMPONENT),
('version', Labels.LABEL_TYPE_VERSION),
):
try:
_, value = labels.get_name_and_value(label)
except KeyError:
pass
else:
config.set('image-build', config_key, value)
def parse_image_build_config(self, config_file_name):
# Logic taken from koji.cli.koji.handle_image_build.
# Unable to re-use koji's code because "cli" is not
# a package of koji and this logic is intermingled
# with CLI specific instructions.
args = []
opts = {}
config = self.get_default_image_build_conf()
config.read(config_file_name)
if self.architectures:
config.set('image-build', 'arches', ','.join(self.architectures))
# else just use what was provided by the user in image-build.conf
config_str = StringIO()
config.write(config_str)
self.log.debug('Image Build Config: \n%s', config_str.getvalue())
image_name = None
section = 'image-build'
for option in ('name', 'version', 'arches', 'target', 'install_tree'):
value = config.get(section, option)
if not value:
raise ValueError('{} cannot be empty'.format(option))
if option == 'arches':
# pylint: disable=no-member
value = [arch for arch in value.split(',') if arch]
elif option == 'name':
image_name = value
args.append(value)
config.remove_option(section, option)
for option, value in config.items(section):
if option in ('repo', 'format'):
value = [v for v in value.split(',') if v]
elif option in ('disk_size',):
value = int(value)
opts[option] = value
section = 'ova-options'
if config.has_section(section):
ova = []
for k, v in config.items(section):
ova.append('{}={}'.format(k, v))
opts['ova_option'] = ova
section = 'factory-parameters'
if config.has_section(section):
factory = []
for option, value in config.items(section):
factory.append((option, value))
opts['factory_parameter'] = factory
return image_name, args, {'opts': opts}
def build_filesystem(self, image_build_conf):
# Image build conf file should be in the same folder as Dockerfile
image_build_conf = self.workflow.build_dir.any_platform.path / image_build_conf
if not os.path.exists(image_build_conf):
raise RuntimeError('Image build configuration file not found: {}'
.format(image_build_conf))
image_name, args, kwargs = self.parse_image_build_config(image_build_conf)
if self.scratch:
kwargs['opts']['scratch'] = True
task_id = self.session.buildImageOz(*args, **kwargs)
return task_id, image_name
def find_filesystem(self, task_id, filesystem_regex):
for f in self.session.listTaskOutput(task_id):
f = f.strip()
match = filesystem_regex.match(f)
if match:
return task_id, match.group(0)
# Not found in this task, search sub tasks
for sub_task in self.session.getTaskChildren(task_id):
found = self.find_filesystem(sub_task['id'], filesystem_regex)
if found:
return found
return None
def download_filesystem(self, task_id, filesystem_regex, build_dir: BuildDir):
found = self.find_filesystem(task_id, filesystem_regex)
if found is None:
raise RuntimeError('Filesystem not found as task output: {}'
.format(filesystem_regex.pattern))
task_id, file_name = found
self.log.info('Downloading filesystem: %s from task ID: %s',
file_name, task_id)
file_path = build_dir.path / file_name
if file_path.exists():
raise RuntimeError(f'Filesystem {file_name} already exists at {file_path}')
with open(file_path, 'w') as f:
for chunk in stream_task_output(self.session, task_id, file_name, self.blocksize):
f.write(chunk)
return file_name
def run_image_task(self, image_build_conf):
task_id, image_name = self.build_filesystem(image_build_conf)
task = TaskWatcher(self.session, task_id, self.poll_interval)
try:
task.wait()
except BuildCanceledException:
self.log.info("Build was canceled, canceling task %s", task_id)
try:
self.session.cancelTask(task_id)
self.log.info('task %s canceled', task_id)
except Exception as exc:
self.log.info("Exception while canceling a task (ignored): %s",
util.exception_message(exc))
if task.failed():
try:
# Koji may re-raise the error that caused task to fail
task_result = self.session.getTaskResult(task_id)
except Exception as exc:
task_result = util.exception_message(exc)
raise RuntimeError('image task, {}, failed: {}'
.format(task_id, task_result))
return task_id, image_name
def get_image_build_conf(self):
image_build_conf = None
for parent in self.workflow.data.dockerfile_images:
if base_image_is_custom(parent.to_str()):
image_build_conf = parent.tag
break
if not image_build_conf or image_build_conf == 'latest':
image_build_conf = 'image-build.conf'
return image_build_conf
def update_repos_from_composes(self):
resolve_comp_result = self.workflow.data.prebuild_results.get(PLUGIN_RESOLVE_COMPOSES_KEY)
if not resolve_comp_result:
return
for compose_info in resolve_comp_result['composes']:
self.log.info('adding repo file from compose: %s', compose_info['result_repofile'])
self.repos.append(compose_info['result_repofile'])
def _add_filesystem_to_dockerfile(self, file_name, build_dir: BuildDir):
"""
Put an ADD instruction into the Dockerfile (to include the filesystem
into the container image to be built)
"""
content = 'ADD {0} /\n'.format(file_name)
lines = build_dir.dockerfile.lines
# as we insert elements we have to keep track of the increment for inserting
offset = 1
for item in build_dir.dockerfile.structure:
if item['instruction'] == 'FROM' and base_image_is_custom(item['value'].split()[0]):
lines.insert(item['endline']+offset, content)
offset += 1
build_dir.dockerfile.lines = lines
new_parents = []
for image in build_dir.dockerfile.parent_images:
if base_image_is_custom(image):
new_parents.append('scratch')
else:
new_parents.append(image)
build_dir.dockerfile.parent_images = new_parents
self.log.info('added "%s" as image filesystem', file_name)
def inject_filesystem(self, task_id, image_name, build_dir: BuildDir):
prefix = '{}.*{}'.format(image_name, build_dir.platform)
pattern = (r'{}.*(\.tar|\.tar\.gz|\.tar\.bz2|\.tar\.xz)$'
.format(prefix))
filesystem_regex = re.compile(pattern, re.IGNORECASE)
file_name = self.download_filesystem(task_id, filesystem_regex, build_dir)
self._add_filesystem_to_dockerfile(file_name, build_dir)
def run(self):
if not self.workflow.data.dockerfile_images.custom_parent_image:
self.log.info('Nothing to do for non-custom base images')
return
self.update_repos_from_composes()
image_build_conf = self.get_image_build_conf()
self.session = get_koji_session(self.workflow.conf)
task_id, image_name = self.run_image_task(image_build_conf)
inject_filesystem_call = functools.partial(self.inject_filesystem, task_id, image_name)
self.workflow.build_dir.for_each_platform(inject_filesystem_call)
return {
'filesystem-koji-task-id': task_id,
}
|
py | b40f603731cfb2252405da64f40d8771ed99454a | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from gcloud.conf import settings
def get_user_info(request):
client = settings.ESB_GET_CLIENT_BY_USER(request.user.username)
auth = getattr(client, settings.ESB_AUTH_COMPONENT_SYSTEM)
_get_user_info = getattr(auth, settings.ESB_AUTH_GET_USER_INFO)
user_info = _get_user_info({})
if 'data' in user_info:
user_info['data']['bk_supplier_account'] = 0
return user_info
|
py | b40f615c2ad8cc8c4b150432c81394b41871061b |
import pickle, base64, zlib, hashlib
def squeeze(item):
return base64.standard_b64encode(zlib.compress(pickle.dumps(item)))
def inflate(squeezed):
return pickle.loads(zlib.decompress(base64.standard_b64decode(squeezed)))
def phash(x):
return hashlib.sha256(pickle.dumps(x)).hexdigest()
|
py | b40f61d4c899590474905f382162e00019efb777 | import uuid
from django.db import models
class Package(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
token = models.UUIDField(default=uuid.uuid4, editable=False)
core = models.BooleanField(default=False)
repository = models.CharField(max_length=255)
def __str__(self):
name = self.name if self.name else 'UNSYNCED'
return 'Package<name=%s, token=%s>' % (name, self.token)
class PackageBuild(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
package = models.ForeignKey('Package', on_delete=models.CASCADE, related_name='package_builds')
github_run_id = models.CharField(max_length=100)
version = models.CharField(max_length=255)
artifact_name = models.CharField(max_length=100)
def __str__(self):
return 'PackageBuild<github_run_id=%s, version=%s>' % (self.github_run_id, self.version)
|
py | b40f64320ce9342070b76b02318effc60820e015 | """ This file creates the flask app, initialises the api and adds resources to it """
from flask import Flask, request, render_template
from flask_restful import Api, Resource
API_URL = "/api/v1"
def create_app():
""" This function creates the flask app and adds the api resources to it"""
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisisatestsecretkey'
api = Api(app, prefix=API_URL)
@app.route('/', methods=['GET'])
def home():
""" Render HTML template with documentation reference """
return render_template("index.html")
from .resources import (DiaryResource, DiaryEditResource,
UserSignupResource, UserLoginResource,
NotificationsResource)
api.add_resource(UserSignupResource, "/auth/signup", "/auth/signup/")
api.add_resource(UserLoginResource, "/auth/login", "/auth/login/")
api.add_resource(DiaryResource, '/entries', '/entries/')
api.add_resource(DiaryEditResource, '/entries/<int:entryId>')
api.add_resource(NotificationsResource, '/notifications',
'/notifications/')
return app
|
py | b40f6583f53eb8675b7d144c1f3f482029aeb76c | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import chain
from logging import getLogger
import submitit
from codegen_sources.preprocessing.bpe_modes.bpe_mode import TMP_EXT
from codegen_sources.preprocessing.dataset_modes.dataset_mode import (
DATASET_SPLITS,
DatasetMode,
)
from codegen_sources.preprocessing.lang_processors.lang_processor import LangProcessor
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import (
REPLACE_DICT,
cleanup_obfuscated_function,
)
from codegen_sources.preprocessing.timeout import timeout
from submitit import Executor, LocalExecutor
OUTLIER_INDICES_THRESHOLDS = {"VAR_": 200, "FUNC_": 200, "CLASS_": 100}
FUNC_OBFUSCATION_SUFFIXES = ["obfuscated_func", "dictionary_func"]
logger = getLogger()
class ObfuscationFunctionsMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
"""
def __init__(
self,
folder,
languages,
bpe,
processed_lines: set = None,
nb_train_split: int = 8,
keep_comments: bool = False,
):
super().__init__(
suffixes=FUNC_OBFUSCATION_SUFFIXES,
folder=folder,
languages=languages,
bpe=bpe,
parallel_dataset=True,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
)
def checkpoint(
self, input_path: str, process_strings: bool
) -> submitit.helpers.DelayedSubmission:
return submitit.helpers.DelayedSubmission(
self.__class__(
self.folder, self.languages, self.bpe, self.processed_lines,
),
input_path,
process_strings,
)
@timeout(60)
def extract_data_for_line(
self,
line_id: int,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
for k, v in REPLACE_DICT.items():
content = content.replace(k, v)
try:
obfuscated, dico = lang_processor.obfuscate_code(content)
tokenized_obfuscated_file = " ".join(
lang_processor.tokenize_code(
obfuscated,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
except NotImplementedError:
logger.error(
f"Obfuscate method is not implemented for {lang_processor.__class__.__name__}"
)
raise
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f"Error obfuscating content {e} \n")
return default_return
obfuscated_functions = []
func_dicos = []
try:
f_standalone, f_class = lang_processor.extract_functions(
tokenized_obfuscated_file
)
functions = f_standalone + f_class
for func in functions:
func, func_dico = cleanup_obfuscated_function(func, dico)
obfuscated_functions.append(func)
func_dicos.append(func_dico)
assert len(obfuscated_functions) == len(func_dicos)
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f"error {e} extracting functions\n")
return default_return
return (
line_id,
json_line["repo_name"],
{"obfuscated_func": obfuscated_functions, "dictionary_func": func_dicos},
)
def filter(self, tokenized_data):
assert all(s in tokenized_data for s in self.suffixes)
assert len(tokenized_data["dictionary_func"]) == len(
tokenized_data["obfuscated_func"]
)
for var_prefix, var_number in OUTLIER_INDICES_THRESHOLDS.items():
for dico in tokenized_data["dictionary_func"]:
if f"{var_prefix}{var_number}" in dico:
return True
return False
def _learn_bpe(self, ncodes: int, executor: Executor = None):
raise Exception(
"BPE codes should not be learnt from obfuscated data. Learn them on monolingual data."
"Please provide bpe codes or learn them."
"To do so, please run pipepline with monolingual mode until BPE learning."
)
def apply_bpe(self, executor: Executor = None, local_parallelism: int = None):
"""
Overwrite the method as in the obfuscation mode, need to restore the BPE.
"""
logger.info("")
logger.info("")
logger.info("========== Apply BPE ===========")
if executor is None:
executor = LocalExecutor(folder=self.folder.joinpath("log"))
# apply BPE with tmp suffix
_bpe_ext = self.bpe.ext
self.bpe.ext += TMP_EXT
super().apply_bpe(executor)
self.bpe.ext = _bpe_ext
# restore BPE on obfuscation special tokens
jobs = []
to_restore = list(
chain(
*[
self.folder.glob(f"{lang}.{split}.*{self.bpe.ext}{TMP_EXT}")
for split in DATASET_SPLITS
for lang in self.languages
]
)
)
for f in to_restore:
job = executor.submit(
self.bpe.repair_bpe_for_obfuscation_file, f, f.with_suffix("")
)
jobs.append(job)
for job in jobs:
job.result()
for f in to_restore:
assert f.with_suffix("").is_file()
f.unlink()
def _get_vocab(self, executor: Executor = None):
raise Exception(
"Vocab should not be learnt from obfuscated data. Learn it on monolingual data."
"Please provide vocab or learn them."
"To do so, please run pipepline with monolingual mode until get_vocab."
)
|
py | b40f67526d9d625c0b22601cd0eec23f22f4de9e | import torch
from models.auxiliaries.physics_model_interface import PhysicsModel
from data.base_dataset import BaseDataset
import scipy.io as io
import numpy as np
from torch import from_numpy, empty
from util.util import normalize
class RegCycleGANDataset(BaseDataset):
def initialize(self, opt, phase):
self.phase = phase
self.opt = opt
self.roi = opt.roi
self.root = opt.dataroot
self.physics_model: PhysicsModel = opt.physics_model
self.empty_tensor = empty(0)
# Select relevant part of dataset
if opt.representation == 'real':
channel_index = slice(0,1)
elif opt.representation == 'imag':
channel_index = slice(1,2)
else:
channel_index = slice(None, None)
if phase == 'train':
self.selection = slice(0, opt.val_offset)
elif phase == 'val':
self.selection = slice(opt.val_offset, opt.test_offset)
else:
self.selection = slice(opt.test_offset, None)
# Load dataset from .mat file
all_data = io.loadmat(opt.dataroot)
self.dataset = np.array(all_data[opt.dataname]).astype(float)
self.innit_length(self.dataset.shape[-1])
self.dataset = self.dataset[self.selection, channel_index, self.roi]
if self.opt.representation == 'mag':
self.dataset = np.expand_dims(np.sqrt(self.dataset[:,0,:]**2 + self.dataset[:,1,:]**2), 0)
self.dataset = from_numpy(normalize(self.dataset))
self.A_size = len(self.dataset)
# Load labels from .mat file
self.labels = []
# if self.phase != 'test':
for label_name in self.physics_model.get_label_names():
if not label_name in all_data:
print('WARNING: ' + label_name + ' not found in dataroot!')
continue
self.labels.append(all_data[label_name])
self.num_labels = len(self.labels)
self.labels = from_numpy(np.transpose(np.concatenate(self.labels, 0)))
self.label_sampler = self.labels[self.selection]
# Either use random or fixed labels
# if self.opt.useAlabels:
# permutation = np.random.permutation(self.A_size)
# self.B_sampler = lambda ind: self.label_sampler[permutation[ind]]
# else:
self.B_sampler = self.generate_B_sample
def generate_B_sample(self, index = None):
param = torch.rand((1, self.num_labels))
return self.physics_model.param_to_quantity(param).squeeze(0)
def innit_length(self, full_length):
self.opt.full_data_length = full_length
self.opt.data_length = len(range(0, full_length)[self.roi])
def __getitem__(self, index):
sample: dict = {
'A': self.dataset[index % self.A_size],
'label_A': self.label_sampler[index % self.A_size]
}
# if self.phase != 'test':
# sample['label_A'] = self.label_sampler[index % self.A_size]
if self.phase == 'train':
sample['B'] = self.B_sampler(index)
return sample
def __len__(self):
return self.A_size
def name(self):
return 'Reg-CycleGAN-Dataset' |
py | b40f676a62974efaefcf91e08e7e76f6d2540f6c | """Simple helper library for generating problem detail documents.
As per http://datatracker.ietf.org/doc/draft-ietf-appsawg-http-problem/
"""
import json as j
import logging
from flask_babel import LazyString
from ..exceptions import BaseError
JSON_MEDIA_TYPE = "application/api-problem+json"
def json(type, status, title, detail=None, instance=None, debug_message=None):
d = dict(type=type, title=str(title), status=status)
if detail:
d["detail"] = str(detail)
if instance:
d["instance"] = instance
if debug_message:
d["debug_message"] = debug_message
return j.dumps(d)
class ProblemDetail(object):
"""A common type of problem."""
JSON_MEDIA_TYPE = JSON_MEDIA_TYPE
def __init__(
self,
uri,
status_code=None,
title=None,
detail=None,
instance=None,
debug_message=None,
):
self.uri = uri
self.title = title
self.status_code = status_code
self.detail = detail
self.instance = instance
self.debug_message = debug_message
@property
def response(self):
"""Create a Flask-style response."""
return (
json(
self.uri,
self.status_code,
self.title,
self.detail,
self.instance,
self.debug_message,
),
self.status_code or 400,
{"Content-Type": JSON_MEDIA_TYPE},
)
def detailed(
self, detail, status_code=None, title=None, instance=None, debug_message=None
):
"""Create a ProblemDetail for a more specific occurance of an existing
ProblemDetail.
The detailed error message will be shown to patrons.
"""
# Title and detail must be LazyStrings from Flask-Babel that are
# localized when they are first used as strings.
if title and not isinstance(title, LazyString):
logging.warn('"%s" has not been internationalized' % title)
if detail and not isinstance(detail, LazyString):
logging.warn('"%s" has not been internationalized' % detail)
return ProblemDetail(
self.uri,
status_code or self.status_code,
title or self.title,
detail,
instance,
debug_message,
)
def with_debug(
self, debug_message, detail=None, status_code=None, title=None, instance=None
):
"""Insert debugging information into a ProblemDetail.
The original ProblemDetail's error message will be shown to
patrons, but a more specific error message will be visible to
those who inspect the problem document.
"""
return ProblemDetail(
self.uri,
status_code or self.status_code,
title or self.title,
detail or self.detail,
instance or self.instance,
debug_message,
)
def __repr__(self):
return "<ProblemDetail(uri={0}, title={1}, status_code={2}, detail={3}, instance={4}, debug_message={5}".format(
self.uri,
self.title,
self.status_code,
self.detail,
self.instance,
self.debug_message,
)
class ProblemError(BaseError):
"""Exception class allowing to raise and catch ProblemDetail objects."""
def __init__(self, problem_detail):
"""Initialize a new instance of ProblemError class.
:param problem_detail: ProblemDetail object
:type problem_detail: ProblemDetail
"""
if not isinstance(problem_detail, ProblemDetail):
raise ValueError(
'Argument "problem_detail" must be an instance of ProblemDetail class'
)
self._problem_detail = problem_detail
@property
def problem_detail(self):
"""Return the ProblemDetail object associated with this exception.
:return: ProblemDetail object associated with this exception
:rtype: ProblemDetail
"""
return self._problem_detail
|
py | b40f67cdb65a40b7428387aae22b7e909d2ed5e3 | from setuptools import setup
microlib_name = 'gimmebio.stat_strains'
requirements = [
'scipy',
'numpy',
'pandas',
'click',
]
setup(
name=microlib_name,
version='0.4.5',
author='David Danko',
author_email='[email protected]',
license='MIT license',
classifiers=[
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
namespace_packages=['gimmebio'],
packages=[microlib_name],
install_requires=requirements,
)
|
py | b40f67ce9653f60b324649237467aaa79e796e99 | from rammbock import Rammbock
from .version import VERSION
MODBUS_PORT = 502
PROTOCOL_ID_VALUE = 1
PDU_OFFSET = 2
FC1_READ_COILS = 0x01
FC1_REQUEST_MSG = 'Read Coils (FC1)'
class Modbus(object):
"""Modbus TCP/IP is a protocol testing library for the Robot Framework
(generic test automation framework) which uses the test library Rammbock
(generic protocol library) to provide keywords for testing Modbus TCP/IP
v1.1b (NOT: Modbus over TCP/IP).
Have a look into https://en.wikipedia.org/wiki/Modbus#Frame_format for an
overview or into
http://www.modbus.org/docs/Modbus_Application_Protocol_V1_1b.pdf
for a deeper insight.
Implemented as hybrid Robot Framework API.
"""
ROBOT_LIBRARY_VERSION = VERSION
def start_modbus_server(self, ip, name=None, timeout=None):
"""Starts a new Modbus TCP/IP server.
Examples:
| Start Modbus Server | 10.10.10.2 |
| Start Modbus Server | 10.10.10.2 | name=Server1 | timeout=5 |
"""
Rammbock.start_tcp_server(ip=ip, port=MODBUS_PORT, name=name,
timeout=timeout, protocol='modbus', family='ipv4')
def switch_modbus_server(self, name):
"""Switches the current active Modbus server to the given server `name`.
Example:
| Switch modbus server | Server1 |
"""
Rammbock.switch_server(name=name)
def close_modbus_server(self, name=None):
"""Closes the Modbus server connection based on the server `name`.
If no `name` is provided it will close the current active connection.
You have to explicitly `Switch Modbus Server` after close when sending
or receiving any message without explicitly passing the server name.
Example:
| Close modbus server |
| Close modbus server | Server1 |
"""
Rammbock.close_server(name=name)
def start_modbus_client(self, ip, name=None, timeout=None):
"""Starts a new Modbus TCP/IP client.
Examples:
| Start Modbus Client | 10.10.10.2 |
| Start Modbus Client | 10.10.10.2 | name=Client1 | timeout=5 |
"""
Rammbock.start_tcp_client(ip, port=MODBUS_PORT, name=name,
timeout=timeout, protocol='modbus', family='ipv4')
def connect_modbus_client(self, server, name=None):
"""Connects a Modbus client to a Modbus server. If client `name` is not
given then the latest client is connected.
Examples:
| Connect modbus client | 10.10.10.2 |
| Connect modbus client | 10.10.10.2 | name=Client1 |
"""
Rammbock.connect(host=server, port=MODBUS_PORT, name=name)
def switch_modbus_client(self, name):
"""Switches the current active Modbus client to the given client `name`.
Example:
| Switch modbus client | Client1 |
"""
def close_modbus_client(self, name=None):
"""Closes the modbus client connection based on the client `name`.
If no name is provided it will close the current active connection.
You have to explicitly `Switch modbus client` after close when sending
or receiving any message without explicitly passing the client name.
Example:
| Close modbus client |
| Close modbus client | Client1 |
"""
Rammbock.close_client(name=name)
def reset_modbus(self):
"""Closes all Modbus connections, deletes all Modbus servers, clients
and the protocol.
TODO: restrict to single rammbock instance
"""
Rammbock.reset_rammbock()
def reset_modbus_message_streams(self):
"""Reset streams and sockets of incoming messages.
"""
Rammbock.clear_message_streams()
def _define_modbus_protocol(self):
"""Defines the Modbus TCP/IP protocol.
"""
Rammbock.new_protocol('modbus')
Rammbock.u16(name='transactionIdentifier', value=None, align=None)
Rammbock.u16(name='protocolIdentifier', value=PROTOCOL_ID_VALUE, align=None)
Rammbock.u16(name='lengthField', value=None, align=None)
Rammbock.u8(name='unitIdentifier', value=None, align=None)
Rammbock.u8(name='functionCode', value=None, align=None)
Rammbock.pdu(length='lengthField-PDU_OFFSET') # TODO syntax ok?
Rammbock.end_protocol()
def _define_valid_fc1_request(self):
"""Define the FC1 request message template.
Dynamic header content:
- transactionIdentifier
- unitIdentifier
Dynamic PDU content:
- starting address (2 bytes)
- quantity of coils (2 bytes)
"""
Rammbock.new_message(message_name=FC1_REQUEST_MSG, protocol='modbus') #, 'lengthField':'STARTING_ADDRESS_BYTES+QUANTITY_OF_COILS_BYTES', 'functionCode':'FC1_READ_COILS')
Rammbock.save_template(name=FC1_REQUEST_MSG, unlocked=False)
# def server_sends_read_coils_request(self, transaction_id, unit_id, starting_address, quantity_of_coils):
# """Server sends a new FC1 request message to given client `unit_id`.
#
# Example:
# | Server sends fc1 request | 1 | |
# """
# Rammbock.load_copy_of_template(name=FC1_REQUEST_MSG) #, 'transactionIdentifier':transaction_id, 'unitIdentifier':unit_id)
# Rammbock.u16()
# Rammbock.u16()
def discrete_input(self, name):
"""Add a discrete input with given `name` to the template.
Example:
| Discrete input | DigitalInput |
"""
def coil(self, name, value):
"""Add a single coil with given name `name` and value `value`
to the template.
Example:
| Coil | DigitalInput |
| Coil | DigitialOutput | 0xFF00 |
"""
ON = 0xFF00
OFF = 0x0000
if value not in [ON, OFF]:
raise Exception("Invalid single coil value given")
Rammbock.u8(name=name, value=value, align=None)
def coils(self, name, value):
"""Add multiple coils with given name `name` and values `values`
to the template.
Examples:
| Coils | |
| | | |
"""
# TODO range check
#COIL_BITS=[0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01]
#if value not combination of COIL_BITS:
# raise Exception("Invalid multiple coil value given")
Rammbock.u8(name=name, value=value, align=None)
def input_register(self, name):
"""Add a input register with given
Example:
| Input register | |
"""
def holding_register(self, name, value=None):
"""Add a register (input/holding) with given name `name` and value
`value` to the template.
TODO: range check when value given and writing 0x0000-0xFFFF
Example:
| Holding register | AnalogIn1 |
| Holding register | AnalogOut1 | 65535 |
"""
Rammbock.u16(name=name, value=value, align=None)
|
py | b40f68f751d455e290a5a541c2196a073a70fc48 | '''
Given a Binary Tree of size N, find the size of the Largest Independent Set(LIS) in it. A subset of all tree nodes is an independent set if there is no edge between any two nodes of the subset. Your task is to complete the function LISS(), which finds the size of the Largest Independent Set.
For example:
Consider the following binary tree.The LIS is
LIS: [10, 40, 60, 70, 80]
Size: 5.
Example1:
Input:
10 20 30 40 50 N 60 N N 70 80
Output:
5
Explanation: LIS of the above
tree will be [10,40, 60, 70,80]
'''
def LISS(root):
if not root:
return 0
incl = 1
excl = LISS(root.left)+LISS(root.right)
if root.left:
incl += LISS(root.left.left)+LISS(root.left.right)
if root.right:
incl += LISS(root.right.left)+LISS(root.right.right)
return max(incl, excl)
|
py | b40f692aee2adaa9f3946de9798b32d867156c0e | import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
@BACKBONES.register_module()
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
|
py | b40f6a964163c05acb10a89360d57a3a89ef5243 | from bigpipe_response.bigpipe import Bigpipe
from bigpipe_response.bigpipe_render_options import BigpipeRenderOptions
from bigpipe_response.helpers import to_include
from bigpipe_response.bigpipe_response import BigpipeResponse
from bigpipe_response.pagelet import Pagelet
from bigpipe_response_example.bigpipe_processors.VueDOMBind import VueDOMBind
from data.app_instance import AppInstance
demo_dao = AppInstance.get()
js_dependencies = to_include(['React=react', 'ReactDOM=react-dom', 'createReactClass=create-react-class', 'Vue=vue.default'],
is_link=True,
processor_name=Bigpipe.get().config.processors.js_modules.params.processor_name)
def demo(request):
pagelets = [
Pagelet(request, 'vue-container', vue_view, {}),
Pagelet(request, 'markdown-container', markdown_view, {}),
]
return BigpipeResponse(request,
render_type=BigpipeResponse.RenderType.TEMPLATE,
render_source='demoMultipleFrameworks.html',
pagelets=pagelets,
js_dependencies=js_dependencies,
scss_dependencies=['@demo_main'])
def vue_view(request):
return BigpipeResponse(request,
render_type=BigpipeResponse.RenderType.JAVASCRIPT,
render_source='DynamicList',
render_options=BigpipeRenderOptions(js_processor_name='vue', js_dom_bind=VueDOMBind()))
def markdown_view(request):
return BigpipeResponse(request,
render_type=BigpipeResponse.RenderType.JAVASCRIPT_RENDER,
render_source='markdown_demo',
render_options=BigpipeRenderOptions(js_processor_name='markdown'))
|
py | b40f6a9ec2910932a53134fa725b352a36ae6062 | import os
import tornado.httpserver
import tornado.options
from tornado.options import options
from tornado.options import define
# define host and port
define("host", default='127.0.0.1', help="run on the given host", type=str)
define("port", default=3214, help="run on the given port", type=int)
tornado.options.parse_command_line()
def main():
from tnb import app
port = os.getenv('PORT', options.port)
server = tornado.httpserver.HTTPServer(app.make_app())
server.listen(port)
message = "Listening server at http://{0}:{1}"
print(message.format(options.host, port))
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("\nStopping server.")
if __name__ == '__main__':
main()
|
py | b40f6e1e8e2a0c969381c4b08ea092fa67b4b0f3 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UI for obtaining dynamic configuration settings from the user.
The chooser takes an option file in yaml format which specifies options to
be offered to the user. The UI is populated dynamically with the options and
various types of form inputs.
The UI automatically times out to prevent it from blocking the build. If the
user interacts with the UI before the timer expires, the countdown stops and
the user must click to resume the build. Fields are assigned default values
at startup, and these defaults are also the final values if the UI times out.
When the UI exits, the final state of all the forms is written to the answer
file, to be consumed by the caller.
"""
import logging
from glazier.chooser import fields
from glazier.lib import resources
import Tkinter as tk
class Chooser(object):
"""Dynamic UI for user configuration."""
def __init__(self, options, preload=True):
self.fields = {}
self.responses = {}
self.root = tk.Tk()
self.row = 0
if preload:
self._GuiHeader()
self._LoadOptions(options)
self._GuiFooter()
def _AddExpander(self):
"""Adds an empty Frame which expands vertically in the UI."""
expander = tk.Frame(self.root)
expander.grid(column=0, row=self.row)
self.root.rowconfigure(self.row, weight=1)
self.row += 1
def _AddSeparator(self):
"""Adds a Separator visual element (UI decoration)."""
sep = fields.Separator(self.root)
sep.grid(column=0, row=self.row, sticky='EW')
self.root.rowconfigure(self.row, weight=0)
self.row += 1
def Display(self):
"""Displays the UI on screen."""
if self.fields:
w, h = self.root.winfo_screenwidth(), self.root.winfo_screenheight()
self.root.geometry('%dx%d+0+0' % (w, h))
self.root.focus_set()
self.timer.Countdown()
self.root.mainloop()
self._Quit()
def _GuiFooter(self):
"""Creates all UI elements below the input fields."""
self._AddExpander()
self.timer = fields.Timer(self.root)
self.timer.grid(column=0, row=self.row)
self.root.bind('<Key>', self.timer.Pause)
self.root.bind('<Button-1>', self.timer.Pause)
self.row += 1
self._AddExpander()
self._GuiLogo()
def _GuiHeader(self):
"""Creates all UI elements above the input fields."""
self.root.columnconfigure(0, weight=1)
self.root.overrideredirect(1)
top = self.root.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
def _GuiLogo(self):
"""Creates the UI graphical logo."""
self.logo_frame = tk.Frame(self.root)
self.logo_frame.columnconfigure(0, weight=1)
r = resources.Resources()
path = r.GetResourceFileName('logo.gif')
self.logo_img = tk.PhotoImage(file=path)
self.logo = tk.Label(self.logo_frame, image=self.logo_img, text='logo here')
self.logo.grid(column=0, row=0, sticky='SE')
self.logo_frame.grid(column=0, row=self.row, sticky='EW')
self.row += 1
def _LoadOptions(self, options):
"""Load all options from the options file input.
UI elements are created for each option
Args:
options: a list of all options pending for the user
"""
for option in options:
if 'type' not in option:
logging.error('Untyped option: %s.', option)
continue
if option['type'] == 'radio_menu':
self.fields[option['name']] = fields.RadioMenu(self.root, option)
self.fields[option['name']].grid(column=0, row=self.row, pady=5)
elif option['type'] == 'toggle':
self.fields[option['name']] = fields.Toggle(self.root, option)
self.fields[option['name']].grid(column=0, row=self.row, pady=5)
else:
logging.error('Unknown option type: %s.', option['type'])
continue
self.root.rowconfigure(self.row, weight=0)
self.row += 1
self._AddSeparator()
def _Quit(self):
"""Save all responses and exit the UI."""
for field in self.fields:
self.responses[field] = self.fields[field].Value()
self.root.destroy()
def Responses(self):
return self.responses
|
py | b40f6e54583908a797d8bfec88fcbbd365be68a9 | from typing import Optional
from rotkehlchen.assets.asset import (
WORLD_TO_BINANCE,
WORLD_TO_BITTREX,
WORLD_TO_KRAKEN,
WORLD_TO_POLONIEX,
Asset,
)
from rotkehlchen.constants.assets import A_DAI, A_SAI
from rotkehlchen.db.upgrades.v7_v8 import COINBASE_DAI_UPGRADE_END_TS
from rotkehlchen.errors import DeserializationError, UnsupportedAsset
from rotkehlchen.typing import Timestamp
from rotkehlchen.utils.misc import ts_now
UNSUPPORTED_POLONIEX_ASSETS = (
# This was a super shortlived coin.
# Only info is here: https://bitcointalk.org/index.php?topic=632818.0
# No price info in cryptocompare or paprika. So we don't support it.
'AXIS',
'APH',
# This was yet another shortlived coin whose announcement is here:
# https://bitcointalk.org/index.php?topic=843495 and coinmarketcap:
# https://coinmarketcap.com/currencies/snowballs/.
# No price info in cryptocompare or paprika. So we don't support it.
'BALLS',
# There are two coins with the name BankCoin, neither of which seems to
# be this. This market seems to have beend added in May 2014
# https://twitter.com/poloniex/status/468070096913432576
# but both other bank coins are in 2017 and 2018 respectively
# https://coinmarketcap.com/currencies/bankcoin/
# https://coinmarketcap.com/currencies/bank-coin/
# So this is an unknown coin
'BANK',
# BitBlock seems to be this: https://coinmarketcap.com/currencies/bitblock/
# and seems to have lived for less than a month. It does not seem to be the
# same as BBK, the BitBlocks project (https://www.cryptocompare.com/coins/bbk/overview)
# No price info in cryptocompare or paprika. So we don't support it.
'BBL',
# Black Dragon Coin. Seems like a very short lived scam from Russia.
# Only info that I found is here: https://bitcointalk.org/index.php?topic=597006.0
# No price info in cryptocompare or paprika. So we don't support it.
'BDC',
# Badgercoin. A very shortlived coin. Only info found is here:
# https://coinmarketcap.com/currencies/badgercoin/
# Same symbol is used for an active coin called "Bitdegreee"
# https://coinmarketcap.com/currencies/bitdegree/
# No price info in cryptocompare or paprika. So we don't support it.
'BDG',
# Bonuscoin. A shortlived coin. Only info found is here:
# https://coinmarketcap.com/currencies/bonuscoin/
# No price info in cryptocompare or paprika. So we don't support it.
'BNS',
# Bonescoin. A shortlived coin. Only info found is here:
# https://coinmarketcap.com/currencies/bones/
# No price info in cryptocompare or paprika. So we don't support it.
'BONES',
# Burnercoin. A shortlived coind Only info is here:
# https://coinmarketcap.com/currencies/burnercoin/
# No price info in cryptocompare or paprika. So we don't support it.
'BURN',
# Colbertcoin. Shortlived coin. Only info is here:
# https://coinmarketcap.com/currencies/colbertcoin/
# No price info in cryptocompare or paprika. So we don't support it.
'CC',
# Chancecoin.
# https://coinmarketcap.com/currencies/chancecoin/
'CHA',
# C-note. No data found anywhere. Only this:
# https://bitcointalk.org/index.php?topic=397916.0
'CNOTE',
# Coino. Shortlived coin with only data found here
# https://coinmarketcap.com/currencies/coino/
# A similar named token, coin(o) with symbol CNO has data
# both in cmc and paprika, but CON doesn't so we don't support it
'CON',
# CorgiCoin. No data found except from here:
# https://coinmarketcap.com/currencies/corgicoin/
'CORG',
# Neodice. No data found except from here:
# https://coinmarketcap.com/currencies/neodice/
# A lot more tokens with the DICE symbol exist so we don't support this
'DICE',
# Distrocoin. No data found except from here:
# https://coinmarketcap.com/currencies/distrocoin/
'DIS',
# Bitshares DNS. No data found except from here:
# https://coin.market/crypto/dns
'DNS',
# DvoraKoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=613854.0
'DVK',
# EBTcoin. No data found except from here:
# https://coinmarketcap.com/currencies/ebtcoin/
'EBT',
# EmotiCoin. No data found except from here:
# https://coinmarketcap.com/currencies/emoticoin/
'EMO',
# EntropyCoin. No data found except from here:
# https://coinmarketcap.com/currencies/entropycoin/
'ENC',
# eToken. No data found except from here:
# https://coinmarketcap.com/currencies/etoken/
'eTOK',
# ETHBNT. No data found outside of poloniex:
# https://poloniex.com/exchange#btc_ethbnt
'ETHBNT',
# FoxCoin. No data found except from here:
# https://coinmarketcap.com/currencies/foxcoin/
'FOX',
# FairQuark. No data found except from here:
# https://coinmarketcap.com/currencies/fairquark/
'FRQ',
# FVZCoin. No data found except from here:
# https://coin.market/crypto/fvz
'FVZ',
# Frozen. No data found except from here:
# https://coinmarketcap.com/currencies/frozen/
'FZ',
# Fuzon. No data found except from here:
# https://coinmarketcap.com/currencies/fuzon/
'FZN',
# Global Denomination. No data found except from here:
# https://coinmarketcap.com/currencies/global-denomination/
'GDN',
# Giarcoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=545529.0
'GIAR',
# Globe. No data found except from here:
# https://coinmarketcap.com/currencies/globe/
'GLB',
# GenesisCoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=518258.0
'GNS',
# GoldEagles. No data found.
'GOLD',
# GroupCoin. No data found except from here:
# https://coinmarketcap.com/currencies/groupcoin/
'GPC',
# Gridcoin X. Not sure what this is. Perhaps a fork of Gridcoin
# https://coinmarketcap.com/currencies/gridcoin-classic/#charts
# In any case only poloniex lists it for a bit so ignoring it
'GRCX',
# H2Ocoin. No data found except from here:
# https://coinmarketcap.com/currencies/h2ocoin/
'H2O',
# Hirocoin. No data found except from here:
# https://coinmarketcap.com/currencies/hirocoin/
'HIRO',
# Hotcoin. Super shortlived. No data found except from here:
# https://coinmarketcap.com/currencies/hotcoin/
# Note there are 2 more coins with this symbol.
# https://coinmarketcap.com/currencies/hydro-protocol/
# https://coinmarketcap.com/currencies/holo/
'HOT',
# CoinoIndex. No data found except from here:
# https://coinmarketcap.com/currencies/coinoindex/
'INDEX',
# InformationCoin. No data found except from here:
# https://coinmarketcap.com/currencies/informationcoin/
'ITC',
# jl777hodl. No data found except from here:
# https://coinmarketcap.com/currencies/jl777hodl/
'JLH',
# Jackpotcoin. No data found except from here:
# https://coinmarketcap.com/currencies/jackpotcoin/
'JPC',
# Juggalocoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=555896.0
'JUG',
# KTON - Darwinia commitment token. No data found
'KTON',
# Limecoin. No data found except from here:
# https://coinmarketcap.com/currencies/limecoin/
'LC',
# LimecoinLite. No data found except from here:
# https://coinmarketcap.com/currencies/limecoinlite/
'LCL',
# LogiCoin. No data found except from here:
# https://coinmarketcap.com/currencies/logicoin/
'LGC',
# LeagueCoin. No data found except from here:
# https://coinmarketcap.com/currencies/leaguecoin/
'LOL',
# LoveCoin. No data found except from here:
# https://coinmarketcap.com/currencies/lovecoin/
'LOVE',
# Mastiffcoin. No data found except from here:
# https://coinmarketcap.com/currencies/mastiffcoin/
'MAST',
# CryptoMETH. No data found except from here:
# https://coinmarketcap.com/currencies/cryptometh/
'METH',
# Millenium coin. No data found except from here:
# https://coinmarketcap.com/currencies/millenniumcoin/
'MIL',
# Moneta. No data found except from here:
# https://coinmarketcap.com/currencies/moneta/
# There are other moneta coins like this:
# https://www.cryptocompare.com/coins/moneta/overview/BTC
# but they don't seem to bethe same
'MNTA',
# Monocle. No data found except from here:
# https://coinmarketcap.com/currencies/monocle/
'MON',
# MicroCoin. No data found except from here:
# https://coinmarketcap.com/currencies/microcoin/
'MRC',
# Metiscoin. No data found except from here:
# https://coinmarketcap.com/currencies/metiscoin/
'MTS',
# Muniti. No data found except from here:
# https://coinmarketcap.com/currencies/muniti/
'MUN',
# N5coin. No data found except from here:
# https://coinmarketcap.com/currencies/n5coin/
'N5X',
# NAS. No data found except from here:
# https://coinmarketcap.com/currencies/nas/
# Note: This is not the Nebulas NAS token
'NAS',
# Nanolite. No data found except from here:
# https://www.reddit.com/r/CryptoCurrency/comments/26neqz/nanolite_a_new_x11_cryptocurrency_which_launched/
'NL',
# NobleNXT. No data found except from here:
# https://coinmarketcap.com/currencies/noblenxt/
'NOXT',
# NTX. No data found except from here:
# https://coinmarketcap.com/currencies/ntx/
'NTX',
# (PAND)a coin. No data found except here:
# https://coinmarketcap.com/currencies/pandacoin-panda/
# Note: This is not the PND Panda coin
'PAND',
# Pawncoin. No data found except from here:
# https://coinmarketcap.com/currencies/pawncoin/
'PAWN',
# Parallaxcoin. No data found except from here:
# https://coinmarketcap.com/currencies/parallaxcoin/
# Note: This is not PLEX coin
'PLX',
# Premine. No data found except from here:
# https://coinmarketcap.com/currencies/premine/
'PMC',
# Particle. No data found except from here:
# https://coinmarketcap.com/currencies/particle/
'PRT',
# Bitshares PTS. No data found except from here:
# https://coinmarketcap.com/currencies/bitshares-pts/
'PTS',
# ShibeCoin. No data found except from here:
# https://coinmarketcap.com/currencies/shibecoin/
'SHIBE',
# ShopX. No data found except from here:
# https://coinmarketcap.com/currencies/shopx/
'SHOPX',
# SocialCoin. No data found except from here:
# https://coinmarketcap.com/currencies/socialcoin/
# Note this is not The SOCC Social coin
# https://coinmarketcap.com/currencies/socialcoin-socc/
'SOC',
# SourceCoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=688494.160
'SRCC',
# SurgeCoin. No data found except from here:
# https://coinmarketcap.com/currencies/surgecoin/
'SRG',
# SummerCoin. No data found except from here:
# https://coinmarketcap.com/currencies/summercoin/
'SUM',
# SunCoin. No data found except from here:
# https://coinmarketcap.com/currencies/suncoin/
'SUN',
# TalkCoin. No data found except from here:
# https://coinmarketcap.com/currencies/talkcoin/
'TAC',
# Twecoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=553593.0
'TWE',
# UniversityCoin. No data found except from here:
# https://coinmarketcap.com/currencies/universitycoin/
'UVC',
# Voxels. No data found except from here:
# https://coincodex.com/crypto/voxels/
'VOX',
# X13 coin. No data found. Except from maybe this:
# https://bitcointalk.org/index.php?topic=635382.200;wap2
'X13',
# ApiCoin. No data found except from here:
# https://coinmarketcap.com/currencies/apicoin/
'XAP',
# Xcurrency. No data found except from here:
# https://coinmarketcap.com/currencies/xcurrency/
'XC',
# ClearingHouse. No data found except from here:
# https://coinmarketcap.com/currencies/clearinghouse/
'XCH',
# Filecoin IOU. No data found for this except from in poloniex.
# As of 22/07/2020
'XFIL',
# HonorCoin. No data found except from here:
# https://bitcointalk.org/index.php?topic=639043.0
'XHC',
# SilliconValleyCoin. No data found except from here:
# https://coinmarketcap.com/currencies/siliconvalleycoin-old/
'XSV',
# CoinoUSD. No data found except from here:
# https://coinmarketcap.com/currencies/coinousd/
'XUSD',
# Creds. No data found except from here:
# https://bitcointalk.org/index.php?topic=513483.0
'XXC',
# YangCoin. No data found except from here:
# https://coinmarketcap.com/currencies/yangcoin/
'YANG',
# YellowCoin. No data found except from here:
# https://coinmarketcap.com/currencies/yellowcoin/
'YC',
# YinCoin. No data found except from here:
# https://coinmarketcap.com/currencies/yincoin/
'YIN',
# LINK, BEAR/BULL. No data found yet but should probably revisit
'LINKBULL',
'LINKBEAR',
# Bitcoin and Volatility and Inverse volatility token.
# No data found yet but should probably revisit. They are
# in cryptocompare but they have no price
'BVOL',
'IBVOL',
'XDOT', # old polkadot before the split
'BCC', # neither in coingecko nor cryptocompare
'BTCTRON', # neither in coingecko nor cryptocompare
'FCT2', # neither in coingecko nor cryptocompare
)
UNSUPPORTED_BITTREX_ASSETS = (
# 4ART, As of 22/07/2020 no data found outside of Bittrex
'4ART',
# APIX, As of 19/12/2019 no data found outside of Bittrex
# https://medium.com/apisplatform/apix-trading-open-on-bittrex-global-61653fa346fa
'APIX',
# APM Coin. As of 16/11/2019 no data found outside of Bittrex for this token
# https://global.bittrex.com/Market/Index?MarketName=BTC-APM
'APM',
# Tether CNH. As of 30/09/2019 no data found outside of Bittrex for this token
# https://medium.com/bittrex/new-bittrex-international-listing-tether-cnh-cnht-c9ad966ac303
'CNHT',
# Credit coin. As of 29/01/2020 no data found outside of Bittrex for this token
# https://global.bittrex.com/Market/Index?MarketName=BTC-CTC
'CTC',
# Foresting. As of 22/03/2019 no data found.
# Only exists in bittrex. Perhaps it will soon be added to other APIs.
# https://international.bittrex.com/Market/Index?MarketName=BTC-PTON
'PTON',
# VDX IEO. As of 16/05/2019 no data found.
# Only exists in bittrex. Perhaps it will soon be added to other APIs.
# https://international.bittrex.com/Market/Index?MarketName=BTC-VDX
'VDX',
# Origo. As of 02/06/2019 no data found outside of bittrex
# https://international.bittrex.com/Market/Index?MarketName=BTC-OGO
'OGO',
# STPT. As of 06/06/2019 no data found outside of bittrex
# https://twitter.com/BittrexIntl/status/1136045052164227079
'STPT',
# PHNX. As of 07/06/2020 no data found outside of bittrex for PhoenixDAO
# https://www.coingecko.com/en/coins/phoenixdao
'PHNX',
# PROM. As of 28/06/2019 no data found outside of bittrex for Prometheus
# https://twitter.com/BittrexIntl/status/1144290718325858305
'PROM',
# URAC. As of 12/07/2019 no data found outside of bittrex for Uranus
# https://twitter.com/BittrexIntl/status/1149370485735591936
'URAC',
# BRZ. As of 16/06/2019 no data found outside of Bittrex for this token
# https://twitter.com/BittrexIntl/status/1150870819758907393
'BRZ',
# HINT. As of 28/07/2019 no data found outside of Bittrex for this token
# https://twitter.com/BittrexIntl/status/1154445165257474051
'HINT',
# TUDA. As of 02/08/2019 no data found outside of Bittrex for this token
# https://mobile.twitter.com/BittrexIntl/status/1156974900986490880
'TUDA',
# TwelveShips. As of 23/08/2019 no data found outside of Bittrex for this token
# https://twitter.com/BittrexIntl/status/1164689364997353472
'TSHP',
# BlockTV. As of 29/11/2019 no data found outside of Bittrex for this token
# https://global.bittrex.com/Market/Index?MarketName=BTC-BLTV
'BLTV',
# Forkspot. As for 01/03/2020 no data found outside of Bittrex for this token
# https://global.bittrex.com/Market/Index?MarketName=BTC-FRSP
'FRSP',
# Universal Protocol Token. As of 19/03/2020 no data found outside of Bittrex for this token.
# https://global.bittrex.com/Market/Index?MarketName=BTC-UPT
'UPT',
# Universal USD and EUR. As of 19/03/2020 no data found outside of Bittrex for this token.
# https://global.bittrex.com/Market/Index?MarketName=BTC-UPUSD
'UPEUR',
'UPUSD',
# Vanywhere. As of 19/03/2020 no data found outside of Bittrex for this token.
# https://global.bittrex.com/Market/Index?MarketName=BTC-VANY
'VANY',
# Ecochain. As of 22/07/2020 no data found outside of Bittrex for this token.
# All ECOC data refer to a different coin called EcoCoin
'ECOC',
# As of 28/08/2020 the following assets don't have prices listed anywhere
'FME',
'INX',
'MFA',
'FCT2', # neither in coingecko nor cryptocompare
)
UNSUPPORTED_BINANCE_ASSETS = (
'ETF', # ETF is a dead coin given to all ETH holders. Just ignore
# BTCB, USDSB, BGBP are not yet supported anywhere else
'BTCB', # https://www.binance.com/en/support/articles/360029288972
'USDSB', # https://www.binance.com/en/support/articles/360029522132
'BGBP', # https://www.binance.com/en/support/articles/360030827252
'TUSDB', # https://www.binance.com/en/support/articles/360032154071
'NGN', # https://www.binance.com/en/support/articles/360035511611
'123', # https://twitter.com/rotkiapp/status/1161977327078838272
'456', # https://twitter.com/rotkiapp/status/1161977327078838272
'WRX', # https://info.binance.com/en/currencies/WRX - not listed anywhere else
'SCRT', # no cryptocompare data
'SPARTA', # no cryptocompare data
'UNIDOWN', # no cryptocompare/coingecko data
'UNIUP', # no cryptocompare/coingecko data
)
POLONIEX_TO_WORLD = {v: k for k, v in WORLD_TO_POLONIEX.items()}
BITTREX_TO_WORLD = {v: k for k, v in WORLD_TO_BITTREX.items()}
BINANCE_TO_WORLD = {v: k for k, v in WORLD_TO_BINANCE.items()}
KRAKEN_TO_WORLD = {v: k for k, v in WORLD_TO_KRAKEN.items()}
RENAMED_BINANCE_ASSETS = {
# The old BCC in binance forked into BCHABC and BCHSV
# but for old trades the canonical chain is ABC (BCH in rotkehlchen)
'BCC': 'BCH',
# HCash (HSR) got swapped for Hyperchash (HC)
# https://support.binance.com/hc/en-us/articles/360012489731-Binance-Supports-Hcash-HSR-Mainnet-Swap-to-HyperCash-HC-
'HSR': 'HC',
# Red pulse got swapped for Phoenix
# https://support.binance.com/hc/en-us/articles/360012507711-Binance-Supports-Red-Pulse-RPX-Token-Swap-to-PHOENIX-PHX-
'RPX': 'PHX',
}
ETH_TOKENS_MOVED_TO_OWN_CHAIN = {
'NET': 'NIM',
'EOS': 'EOS',
'META': 'META',
}
def asset_from_kraken(kraken_name: str) -> Asset:
if not isinstance(kraken_name, str):
raise DeserializationError(f'Got non-string type {type(kraken_name)} for kraken asset')
if kraken_name.endswith('.S') or kraken_name.endswith('.M'):
# this is a staked coin. For now since we don't show staked coins
# consider it as the normal version. In the future we may perhaps
# differentiate between them in the balances
kraken_name = kraken_name[:-2]
# Some names are not in the map since kraken can have multiple representations
# depending on the pair for the same asset. For example XXBT and XBT, XETH and ETH,
# ZUSD and USD
if kraken_name == 'XBT':
name = 'BTC'
elif kraken_name == 'XDG':
name = 'DOGE'
elif kraken_name in ('ETH', 'EUR', 'USD', 'GBP', 'CAD', 'JPY', 'KRW', 'CHF', 'AUD'):
name = kraken_name
else:
name = KRAKEN_TO_WORLD.get(kraken_name, kraken_name)
return Asset(name)
def asset_from_poloniex(poloniex_name: str) -> Asset:
if not isinstance(poloniex_name, str):
raise DeserializationError(f'Got non-string type {type(poloniex_name)} for poloniex asset')
if poloniex_name in UNSUPPORTED_POLONIEX_ASSETS:
raise UnsupportedAsset(poloniex_name)
our_name = POLONIEX_TO_WORLD.get(poloniex_name, poloniex_name)
return Asset(our_name)
def asset_from_bittrex(bittrex_name: str) -> Asset:
if not isinstance(bittrex_name, str):
raise DeserializationError(f'Got non-string type {type(bittrex_name)} for bittrex asset')
if bittrex_name in UNSUPPORTED_BITTREX_ASSETS:
raise UnsupportedAsset(bittrex_name)
name = BITTREX_TO_WORLD.get(bittrex_name, bittrex_name)
return Asset(name)
def asset_from_binance(binance_name: str) -> Asset:
if not isinstance(binance_name, str):
raise DeserializationError(f'Got non-string type {type(binance_name)} for binance asset')
if len(binance_name) >= 5 and binance_name.startswith('LD'):
# this is a lending/savings coin. For now since we don't show lending/savings coins
# consider it as the normal version. In the future we may perhaps
# differentiate between them in the balances
binance_name = binance_name[2:]
if binance_name in UNSUPPORTED_BINANCE_ASSETS:
raise UnsupportedAsset(binance_name)
if binance_name in RENAMED_BINANCE_ASSETS:
return Asset(RENAMED_BINANCE_ASSETS[binance_name])
name = BINANCE_TO_WORLD.get(binance_name, binance_name)
return Asset(name)
def asset_from_coinbase(cb_name: str, time: Optional[Timestamp] = None) -> Asset:
# During the transition from DAI(SAI) to MCDAI(DAI) coinbase introduced an MCDAI
# wallet for the new DAI during the transition period. We should be able to handle this
# https://support.coinbase.com/customer/portal/articles/2982947
if cb_name == 'MCDAI':
return A_DAI
elif cb_name == 'DAI':
# If it's dai and it's queried from the exchange before the end of the upgrade
if not time:
time = ts_now()
if time < COINBASE_DAI_UPGRADE_END_TS:
# Then it should be the single collateral version
return A_SAI
else:
return A_DAI
# else
return Asset(cb_name)
|
py | b40f7012e07b20b1f113c971ea0ad8af92e1c4e9 | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements robustness verifications for decision-tree-based models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING
import numpy as np
from tqdm import trange
if TYPE_CHECKING:
from art.estimators.classification.classifier import ClassifierDecisionTree
logger = logging.getLogger(__name__)
class Interval:
"""
Representation of an intervals bound.
"""
def __init__(self, lower_bound: float, upper_bound: float) -> None:
"""
An interval of a feature.
:param lower_bound: The lower boundary of the feature.
:param upper_bound: The upper boundary of the feature.
"""
self.lower_bound = lower_bound
self.upper_bound = upper_bound
class Box:
"""
Representation of a box of intervals bounds.
"""
def __init__(self, intervals: Optional[Dict[int, Interval]] = None) -> None:
"""
A box of intervals.
:param intervals: A dictionary of intervals with features as keys.
"""
if intervals is None:
self.intervals = dict()
else:
self.intervals = intervals
def intersect_with_box(self, box: "Box") -> None:
"""
Get the intersection of two interval boxes. This function modifies this box instance.
:param box: Interval box to intersect with this box.
"""
for key, value in box.intervals.items():
if key not in self.intervals:
self.intervals[key] = value
else:
lower_bound = max(self.intervals[key].lower_bound, value.lower_bound)
upper_bound = min(self.intervals[key].upper_bound, value.upper_bound)
if lower_bound >= upper_bound:
self.intervals.clear()
break
self.intervals[key] = Interval(lower_bound, upper_bound)
def get_intersection(self, box: "Box") -> "Box":
"""
Get the intersection of two interval boxes. This function creates a new box instance.
:param box: Interval box to intersect with this box.
"""
box_new = Box(intervals=self.intervals.copy())
for key, value in box.intervals.items():
if key not in box_new.intervals:
box_new.intervals[key] = value
else:
lower_bound = max(box_new.intervals[key].lower_bound, value.lower_bound)
upper_bound = min(box_new.intervals[key].upper_bound, value.upper_bound)
if lower_bound >= upper_bound:
box_new.intervals.clear()
return box_new
box_new.intervals[key] = Interval(lower_bound, upper_bound)
return box_new
def __repr__(self):
return self.__class__.__name__ + "({})".format(self.intervals)
class LeafNode:
"""
Representation of a leaf node of a decision tree.
"""
def __init__(
self, tree_id: Optional[int], class_label: int, node_id: Optional[int], box: Box, value: float,
) -> None:
"""
Create a leaf node representation.
:param tree_id: ID of the decision tree.
:param class_label: ID of class to which this leaf node is contributing.
:param box: A box representing the n_feature-dimensional bounding intervals that reach this leaf node.
:param value: Prediction value at this leaf node.
"""
self.tree_id = tree_id
self.class_label = class_label
self.node_id = node_id
self.box = box
self.value = value
def __repr__(self):
return self.__class__.__name__ + "({}, {}, {}, {}, {})".format(
self.tree_id, self.class_label, self.node_id, self.box, self.value
)
class Tree:
"""
Representation of a decision tree.
"""
def __init__(self, class_id: Optional[int], leaf_nodes: List[LeafNode]) -> None:
"""
Create a decision tree representation.
:param class_id: ID of the class to which this decision tree contributes.
:param leaf_nodes: A list of leaf nodes of this decision tree.
"""
self.class_id = class_id
self.leaf_nodes = leaf_nodes
class RobustnessVerificationTreeModelsCliqueMethod:
"""
Robustness verification for decision-tree-based models.
Following the implementation in https://github.com/chenhongge/treeVerification (MIT License, 9 August 2019)
| Paper link: https://arxiv.org/abs/1906.03849
"""
def __init__(self, classifier: "ClassifierDecisionTree") -> None:
"""
Create robustness verification for a decision-tree-based classifier.
:param classifier: A trained decision-tree-based classifier.
"""
self._classifier = classifier
self._trees = self._classifier.get_trees()
def verify(
self,
x: np.ndarray,
y: np.ndarray,
eps_init: float,
norm: int = np.inf,
nb_search_steps: int = 10,
max_clique: int = 2,
max_level: int = 2,
) -> Tuple[float, float]:
"""
Verify the robustness of the classifier on the dataset `(x, y)`.
:param x: Feature data of shape `(nb_samples, nb_features)`.
:param y: Labels, one-vs-rest encoding of shape `(nb_samples, nb_classes)`.
:param eps_init: Attack budget for the first search step.
:param norm: The norm to apply epsilon.
:param nb_search_steps: The number of search steps.
:param max_clique: The maximum number of nodes in a clique.
:param max_level: The maximum number of clique search levels.
:return: A tuple of the average robustness bound and the verification error at `eps`.
"""
self.x: np.ndarray = x
self.y: np.ndarray = np.argmax(y, axis=1)
self.max_clique: int = max_clique
self.max_level: int = max_level
average_bound: float = 0.0
num_initial_successes: int = 0
num_samples: int = x.shape[0]
# pylint: disable=R1702
pbar = trange(num_samples, desc="Decision tree verification")
for i_sample in pbar:
eps: float = eps_init
robust_log: List[bool] = list()
i_robust = None
i_not_robust = None
eps_robust: float = 0.0
eps_not_robust: float = 0.0
best_score: Optional[float]
for i_step in range(nb_search_steps):
logger.info("Search step {0:d}: eps = {1:.4g}".format(i_step, eps))
is_robust = True
if self._classifier.nb_classes <= 2:
best_score = self._get_best_score(i_sample, eps, norm, target_label=None)
is_robust = (self.y[i_sample] < 0.5 and best_score < 0) or (
self.y[i_sample] > 0.5 and best_score > 0.0
)
else:
for i_class in range(self._classifier.nb_classes):
if i_class != self.y[i_sample]:
best_score = self._get_best_score(i_sample, eps, norm, target_label=i_class)
is_robust = is_robust and (best_score > 0.0)
if not is_robust:
break
robust_log.append(is_robust)
if is_robust:
if i_step == 0:
num_initial_successes += 1
logger.info("Model is robust at eps = {:.4g}".format(eps))
i_robust = i_step
eps_robust = eps
else:
logger.info("Model is not robust at eps = {:.4g}".format(eps))
i_not_robust = i_step
eps_not_robust = eps
if i_robust is None:
eps /= 2.0
else:
if i_not_robust is None:
if eps >= 1.0:
logger.info("Abort binary search because eps increased above 1.0")
break
eps = min(eps * 2.0, 1.0)
else:
eps = (eps_robust + eps_not_robust) / 2.0
if i_robust is not None:
clique_bound = eps_robust
average_bound += clique_bound
else:
logger.info(
"point %s: WARNING! no robust eps found, verification bound is set as 0 !", i_sample,
)
verified_error = 1.0 - num_initial_successes / num_samples
average_bound = average_bound / num_samples
logger.info("The average interval bound is: {:.4g}".format(average_bound))
logger.info("The verified error at eps = {0:.4g} is: {1:.4g}".format(eps_init, verified_error))
return average_bound, verified_error
def _get_k_partite_clique(
self, accessible_leaves: List[List[LeafNode]], label: int, target_label: Optional[int],
) -> Tuple[float, List]:
"""
Find the K partite cliques among the accessible leaf nodes.
:param accessible_leaves: List of lists of accessible leaf nodes.
:param label: The try label of the current sample.
:param target_label: The target label.
:return: The best score and a list of new cliques.
"""
new_nodes_list = list()
best_scores_sum = 0.0
# pylint: disable=R1702
for start_tree in range(0, len(accessible_leaves), self.max_clique):
cliques_old: List[Dict[str, Union[Box, float]]] = list()
cliques_new: List[Dict[str, Union[Box, float]]] = list()
# Start searching for cliques
for accessible_leaf in accessible_leaves[start_tree]:
if (
self._classifier.nb_classes > 2
and target_label is not None
and target_label == accessible_leaf.class_label
):
new_leaf_value = -accessible_leaf.value
else:
new_leaf_value = accessible_leaf.value
cliques_old.append({"box": accessible_leaf.box, "value": new_leaf_value})
# Loop over all all trees
for i_tree in range(start_tree + 1, min(len(accessible_leaves), start_tree + self.max_clique),):
cliques_new.clear()
# Loop over all existing cliques
for clique in cliques_old:
# Loop over leaf nodes in tree
for accessible_leaf in accessible_leaves[i_tree]:
leaf_box = accessible_leaf.box.get_intersection(clique["box"]) # type: ignore
if leaf_box.intervals:
if (
self._classifier.nb_classes > 2
and target_label is not None
and target_label == accessible_leaf.class_label
):
new_leaf_value = -accessible_leaf.value
else:
new_leaf_value = accessible_leaf.value
cliques_new.append(
{
"box": leaf_box,
"value": new_leaf_value + clique["value"], # type: ignore
}
)
cliques_old = cliques_new.copy()
new_nodes = list()
best_score = 0.0
for i, clique in enumerate(cliques_old):
# Create a new node without tree_id and node_id to represent clique
new_nodes.append(
LeafNode(
tree_id=None,
class_label=label,
node_id=None,
box=clique["box"], # type: ignore
value=clique["value"], # type: ignore
)
)
if i == 0:
best_score = clique["value"] # type: ignore
else:
if label < 0.5 and self._classifier.nb_classes <= 2:
best_score = max(best_score, clique["value"]) # type: ignore
else:
best_score = min(best_score, clique["value"]) # type: ignore
new_nodes_list.append(new_nodes)
best_scores_sum += best_score
return best_scores_sum, new_nodes_list
def _get_best_score(self, i_sample: int, eps: float, norm: int, target_label: Optional[int]) -> float:
"""
Get the list of best scores.
:param i_sample: Index of training sample in `x`.
:param eps: Attack budget epsilon.
:param norm: The norm to apply epsilon.
:param target_label: The target label.
:return: The best scores.
"""
nodes = self._get_accessible_leaves(i_sample, eps, norm, target_label)
best_score: float = 0.0
for i_level in range(self.max_level):
if self._classifier.nb_classes > 2 and i_level > 0:
target_label = None
best_score, nodes = self._get_k_partite_clique(nodes, label=self.y[i_sample], target_label=target_label)
# Stop if the root node has been reached
if len(nodes) <= 1:
break
return best_score
def _get_distance(self, box: Box, i_sample: int, norm: int) -> float:
"""
Determine the distance between sample and interval box.
:param box: Interval box.
:param i_sample: Index of training sample in `x`.
:param norm: The norm to apply epsilon.
:return: The distance.
"""
resulting_distance = 0.0
for feature, interval in box.intervals.items():
feature_value = self.x[i_sample, feature]
if interval.lower_bound < feature_value < interval.upper_bound:
distance = 0.0
else:
difference = max(feature_value - interval.upper_bound, interval.lower_bound - feature_value,)
if norm == 0:
distance = 1.0
elif norm == np.inf:
distance = difference
else:
distance = pow(difference, norm)
if norm == np.inf:
resulting_distance = max(resulting_distance, distance)
else:
resulting_distance += distance
if norm not in [0, np.inf]:
resulting_distance = pow(resulting_distance, 1.0 / norm)
return resulting_distance
def _get_accessible_leaves(
self, i_sample: int, eps: float, norm: int, target_label: Optional[int]
) -> List[List[LeafNode]]:
"""
Determine the leaf nodes accessible within the attack budget.
:param i_sample: Index of training sample in `x`.
:param eps: Attack budget epsilon.
:param norm: The norm to apply epsilon.
:param target_label: The target label.
:return: A list of lists of leaf nodes.
"""
accessible_leaves = list()
for tree in self._trees:
if (
self._classifier.nb_classes <= 2
or target_label is None
or tree.class_id in [self.y[i_sample], target_label]
):
leaves = list()
for leaf_node in tree.leaf_nodes:
distance = self._get_distance(leaf_node.box, i_sample, norm)
if leaf_node.box and distance <= eps:
leaves.append(leaf_node)
if not leaves:
raise ValueError("No accessible leaves found.")
accessible_leaves.append(leaves)
return accessible_leaves
|
py | b40f704c8390b6cb53dab4cfed5df21578374cab | name = 'mari'
a = "Setting %20r must be uppercase." % name
print(a)
i = 10
b = f'_{i:03X}'
print(b)
coord = (1, 2)
c = f'{coord[1]:f} {coord[0]:f}'
print(c)
d = '%i' % i
e = f'{i:.03f}'
print(d,e)
print(f"{a} hello") |
py | b40f70604c4228f5c5c796b518e720810e46cac0 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions and test harnesses for source implementations.
This module contains helper functions and test harnesses for checking
correctness of source (a subclass of ``iobase.BoundedSource``) and range
tracker (a subclass of``iobase.RangeTracker``) implementations.
Contains a few lightweight utilities (e.g. reading items from a source such as
``readFromSource()``, as well as heavyweight property testing and stress
testing harnesses that help getting a large amount of test coverage with few
code.
Most notable ones are:
* ``assertSourcesEqualReferenceSource()`` helps testing that the data read by
the union of sources produced by ``BoundedSource.split()`` is the same as data
read by the original source.
* If your source implements dynamic work rebalancing, use the
``assertSplitAtFraction()`` family of functions - they test behavior of
``RangeTracker.try_split()``, in particular, that various consistency
properties are respected and the total set of data read by the source is
preserved when splits happen. Use ``assertSplitAtFractionBehavior()`` to test
individual cases of ``RangeTracker.try_split()`` and use
``assertSplitAtFractionExhaustive()`` as a heavy-weight stress test including
concurrency. We strongly recommend to use both.
For example usages, see the unit tests of modules such as
* apache_beam.io.source_test_utils_test.py
* apache_beam.io.avroio_test.py
"""
# pytype: skip-file
import logging
import threading
import weakref
from collections import namedtuple
from multiprocessing.pool import ThreadPool
from apache_beam.io import iobase
from apache_beam.testing.util import equal_to
__all__ = [
'read_from_source',
'assert_sources_equal_reference_source',
'assert_reentrant_reads_succeed',
'assert_split_at_fraction_behavior',
'assert_split_at_fraction_binary',
'assert_split_at_fraction_exhaustive',
'assert_split_at_fraction_fails',
'assert_split_at_fraction_succeeds_and_consistent'
]
_LOGGER = logging.getLogger(__name__)
class ExpectedSplitOutcome(object):
MUST_SUCCEED_AND_BE_CONSISTENT = 1
MUST_FAIL = 2
MUST_BE_CONSISTENT_IF_SUCCEEDS = 3
SplitAtFractionResult = namedtuple(
'SplitAtFractionResult', 'num_primary_items num_residual_items')
SplitFractionStatistics = namedtuple(
'SplitFractionStatistics', 'successful_fractions non_trivial_fractions')
def read_from_source(source, start_position=None, stop_position=None):
"""Reads elements from the given ```BoundedSource```.
Only reads elements within the given position range.
Args:
source (~apache_beam.io.iobase.BoundedSource):
:class:`~apache_beam.io.iobase.BoundedSource` implementation.
start_position (int): start position for reading.
stop_position (int): stop position for reading.
Returns:
List[str]: the set of values read from the sources.
"""
values = []
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
reader = source.read(range_tracker)
for value in reader:
values.append(value)
return values
def _ThreadPool(threads):
# ThreadPool crashes in old versions of Python (< 2.7.5) if created from a
# child thread. (http://bugs.python.org/issue10015)
if not hasattr(threading.current_thread(), '_children'):
threading.current_thread()._children = weakref.WeakKeyDictionary()
return ThreadPool(threads)
def assert_sources_equal_reference_source(reference_source_info, sources_info):
"""Tests if a reference source is equal to a given set of sources.
Given a reference source (a :class:`~apache_beam.io.iobase.BoundedSource`
and a position range) and a list of sources, assert that the union of the
records read from the list of sources is equal to the records read from the
reference source.
Args:
reference_source_info\
(Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):
a three-tuple that gives the reference
:class:`~apache_beam.io.iobase.BoundedSource`, position to start
reading at, and position to stop reading at.
sources_info\
(Iterable[Tuple[~apache_beam.io.iobase.BoundedSource, int, int]]):
a set of sources. Each source is a three-tuple that is of the same
format described above.
Raises:
ValueError: if the set of data produced by the reference source
and the given set of sources are not equivalent.
"""
if not (isinstance(reference_source_info, tuple) and
len(reference_source_info) == 3 and
isinstance(reference_source_info[0], iobase.BoundedSource)):
raise ValueError(
'reference_source_info must a three-tuple where first'
'item of the tuple gives a '
'iobase.BoundedSource. Received: %r' % reference_source_info)
reference_records = read_from_source(*reference_source_info)
source_records = []
for source_info in sources_info:
assert isinstance(source_info, tuple)
assert len(source_info) == 3
if not (isinstance(source_info, tuple) and len(source_info) == 3 and
isinstance(source_info[0], iobase.BoundedSource)):
raise ValueError(
'source_info must a three tuple where first'
'item of the tuple gives a '
'iobase.BoundedSource. Received: %r' % source_info)
if (type(reference_source_info[0].default_output_coder()) != type(
source_info[0].default_output_coder())):
raise ValueError(
'Reference source %r and the source %r must use the same coder. '
'They are using %r and %r respectively instead.' % (
reference_source_info[0],
source_info[0],
type(reference_source_info[0].default_output_coder()),
type(source_info[0].default_output_coder())))
source_records.extend(read_from_source(*source_info))
if len(reference_records) != len(source_records):
raise ValueError(
'Reference source must produce the same number of records as the '
'list of sources. Number of records were %d and %d instead.' %
(len(reference_records), len(source_records)))
if equal_to(reference_records)(source_records):
raise ValueError(
'Reference source and provided list of sources must produce the '
'same set of records.')
def assert_reentrant_reads_succeed(source_info):
"""Tests if a given source can be read in a reentrant manner.
Assume that given source produces the set of values ``{v1, v2, v3, ... vn}``.
For ``i`` in range ``[1, n-1]`` this method performs a reentrant read after
reading ``i`` elements and verifies that both the original and reentrant read
produce the expected set of values.
Args:
source_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):
a three-tuple that gives the reference
:class:`~apache_beam.io.iobase.BoundedSource`, position to start reading
at, and a position to stop reading at.
Raises:
ValueError: if source is too trivial or reentrant read result
in an incorrect read.
"""
source, start_position, stop_position = source_info
assert isinstance(source, iobase.BoundedSource)
expected_values = [
val for val in source.read(
source.get_range_tracker(start_position, stop_position))
]
if len(expected_values) < 2:
raise ValueError(
'Source is too trivial since it produces only %d '
'values. Please give a source that reads at least 2 '
'values.' % len(expected_values))
for i in range(1, len(expected_values) - 1):
read_iter = source.read(
source.get_range_tracker(start_position, stop_position))
original_read = []
for _ in range(i):
original_read.append(next(read_iter))
# Reentrant read
reentrant_read = [
val for val in source.read(
source.get_range_tracker(start_position, stop_position))
]
# Continuing original read.
for val in read_iter:
original_read.append(val)
if equal_to(original_read)(expected_values):
raise ValueError(
'Source did not produce expected values when '
'performing a reentrant read after reading %d values. '
'Expected %r received %r.' % (i, expected_values, original_read))
if equal_to(reentrant_read)(expected_values):
raise ValueError(
'A reentrant read of source after reading %d values '
'did not produce expected values. Expected %r '
'received %r.' % (i, expected_values, reentrant_read))
def assert_split_at_fraction_behavior(
source, num_items_to_read_before_split, split_fraction, expected_outcome):
"""Verifies the behaviour of splitting a source at a given fraction.
Asserts that splitting a :class:`~apache_beam.io.iobase.BoundedSource` either
fails after reading **num_items_to_read_before_split** items, or succeeds in
a way that is consistent according to
:func:`assert_split_at_fraction_succeeds_and_consistent()`.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
num_items_to_read_before_split (int): number of items to read before
splitting.
split_fraction (float): fraction to split at.
expected_outcome (int): a value from
:class:`~apache_beam.io.source_test_utils.ExpectedSplitOutcome`.
Returns:
Tuple[int, int]: a tuple that gives the number of items produced by reading
the two ranges produced after dynamic splitting. If splitting did not
occur, the first value of the tuple will represent the full set of records
read by the source while the second value of the tuple will be ``-1``.
"""
assert isinstance(source, iobase.BoundedSource)
expected_items = read_from_source(source, None, None)
return _assert_split_at_fraction_behavior(
source,
expected_items,
num_items_to_read_before_split,
split_fraction,
expected_outcome)
def _assert_split_at_fraction_behavior(
source,
expected_items,
num_items_to_read_before_split,
split_fraction,
expected_outcome,
start_position=None,
stop_position=None):
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
current_items = []
reader = source.read(range_tracker)
# Reading 'num_items_to_read_before_split' items.
reader_iter = iter(reader)
for _ in range(num_items_to_read_before_split):
current_items.append(next(reader_iter))
suggested_split_position = range_tracker.position_at_fraction(split_fraction)
stop_position_before_split = range_tracker.stop_position()
split_result = range_tracker.try_split(suggested_split_position)
if split_result is not None:
if len(split_result) != 2:
raise ValueError(
'Split result must be a tuple that contains split '
'position and split fraction. Received: %r' % (split_result, ))
if range_tracker.stop_position() != split_result[0]:
raise ValueError(
'After a successful split, the stop position of the '
'RangeTracker must be the same as the returned split '
'position. Observed %r and %r which are different.' %
(range_tracker.stop_position() % (split_result[0], )))
if split_fraction < 0 or split_fraction > 1:
raise ValueError(
'Split fraction must be within the range [0,1]',
'Observed split fraction was %r.' % (split_result[1], ))
stop_position_after_split = range_tracker.stop_position()
if split_result and stop_position_after_split == stop_position_before_split:
raise ValueError(
'Stop position %r did not change after a successful '
'split of source %r at fraction %r.' %
(stop_position_before_split, source, split_fraction))
if expected_outcome == ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT:
if not split_result:
raise ValueError(
'Expected split of source %r at fraction %r to be '
'successful after reading %d elements. But '
'the split failed.' %
(source, split_fraction, num_items_to_read_before_split))
elif expected_outcome == ExpectedSplitOutcome.MUST_FAIL:
if split_result:
raise ValueError(
'Expected split of source %r at fraction %r after '
'reading %d elements to fail. But splitting '
'succeeded with result %r.' % (
source,
split_fraction,
num_items_to_read_before_split,
split_result))
elif (
expected_outcome != ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS):
raise ValueError('Unknown type of expected outcome: %r' % expected_outcome)
current_items.extend([value for value in reader_iter])
residual_range = (
split_result[0], stop_position_before_split) if split_result else None
return _verify_single_split_fraction_result(
source,
expected_items,
current_items,
split_result,
(range_tracker.start_position(), range_tracker.stop_position()),
residual_range,
split_fraction)
def _range_to_str(start, stop):
return '[' + (str(start) + ',' + str(stop) + ')')
def _verify_single_split_fraction_result(
source,
expected_items,
current_items,
split_successful,
primary_range,
residual_range,
split_fraction):
assert primary_range
primary_items = read_from_source(source, *primary_range)
if not split_successful:
# For unsuccessful splits, residual_range should be None.
assert not residual_range
residual_items = (
read_from_source(source, *residual_range) if split_successful else [])
total_items = primary_items + residual_items
if current_items != primary_items:
raise ValueError(
'Current source %r and a source created using the '
'range of the primary source %r determined '
'by performing dynamic work rebalancing at fraction '
'%r produced different values. Expected '
'these sources to produce the same list of values.' %
(source, _range_to_str(*primary_range), split_fraction))
if expected_items != total_items:
raise ValueError(
'Items obtained by reading the source %r for primary '
'and residual ranges %s and %s did not produce the '
'expected list of values.' %
(source, _range_to_str(*primary_range), _range_to_str(*residual_range)))
result = (len(primary_items), len(residual_items) if split_successful else -1)
return result
def assert_split_at_fraction_succeeds_and_consistent(
source, num_items_to_read_before_split, split_fraction):
"""Verifies some consistency properties of dynamic work rebalancing.
Equivalent to the following pseudocode:::
original_range_tracker = source.getRangeTracker(None, None)
original_reader = source.read(original_range_tracker)
items_before_split = read N items from original_reader
suggested_split_position = original_range_tracker.position_for_fraction(
split_fraction)
original_stop_position - original_range_tracker.stop_position()
split_result = range_tracker.try_split()
split_position, split_fraction = split_result
primary_range_tracker = source.get_range_tracker(
original_range_tracker.start_position(), split_position)
residual_range_tracker = source.get_range_tracker(split_position,
original_stop_position)
assert that: items when reading source.read(primary_range_tracker) ==
items_before_split + items from continuing to read 'original_reader'
assert that: items when reading source.read(original_range_tracker) =
items when reading source.read(primary_range_tracker) + items when reading
source.read(residual_range_tracker)
Args:
source: source to perform dynamic work rebalancing on.
num_items_to_read_before_split: number of items to read before splitting.
split_fraction: fraction to split at.
"""
assert_split_at_fraction_behavior(
source,
num_items_to_read_before_split,
split_fraction,
ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
def assert_split_at_fraction_fails(
source, num_items_to_read_before_split, split_fraction):
"""Asserts that dynamic work rebalancing at a given fraction fails.
Asserts that trying to perform dynamic splitting after reading
'num_items_to_read_before_split' items from the source fails.
Args:
source: source to perform dynamic splitting on.
num_items_to_read_before_split: number of items to read before splitting.
split_fraction: fraction to split at.
"""
assert_split_at_fraction_behavior(
source,
num_items_to_read_before_split,
split_fraction,
ExpectedSplitOutcome.MUST_FAIL)
def assert_split_at_fraction_binary(
source,
expected_items,
num_items_to_read_before_split,
left_fraction,
left_result,
right_fraction,
right_result,
stats,
start_position=None,
stop_position=None):
"""Performs dynamic work rebalancing for fractions within a given range.
Asserts that given a start position, a source can be split at every
interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Args:
source: source to perform dynamic splitting on.
expected_items: total set of items expected when reading the source.
num_items_to_read_before_split: number of items to read before splitting.
left_fraction: left fraction for binary splitting.
left_result: result received by splitting at left fraction.
right_fraction: right fraction for binary splitting.
right_result: result received by splitting at right fraction.
stats: a ``SplitFractionStatistics`` for storing results.
"""
assert right_fraction > left_fraction
if right_fraction - left_fraction < 0.001:
# This prevents infinite recursion.
return
middle_fraction = (left_fraction + right_fraction) / 2
if left_result is None:
left_result = _assert_split_at_fraction_behavior(
source,
expected_items,
num_items_to_read_before_split,
left_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if right_result is None:
right_result = _assert_split_at_fraction_behavior(
source,
expected_items,
num_items_to_read_before_split,
right_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
middle_result = _assert_split_at_fraction_behavior(
source,
expected_items,
num_items_to_read_before_split,
middle_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if middle_result[1] != -1:
stats.successful_fractions.append(middle_fraction)
if middle_result[1] > 0:
stats.non_trivial_fractions.append(middle_fraction)
# Two split results are equivalent if primary and residual ranges of them
# produce the same number of records (simply checking the size of primary
# enough since the total number of records is constant).
if left_result[0] != middle_result[0]:
assert_split_at_fraction_binary(
source,
expected_items,
num_items_to_read_before_split,
left_fraction,
left_result,
middle_fraction,
middle_result,
stats)
# We special case right_fraction=1.0 since that could fail due to being out
# of range. (even if a dynamic split fails at 'middle_fraction' and at
# fraction 1.0, there might be fractions in range ('middle_fraction', 1.0)
# where dynamic splitting succeeds).
if right_fraction == 1.0 or middle_result[0] != right_result[0]:
assert_split_at_fraction_binary(
source,
expected_items,
num_items_to_read_before_split,
middle_fraction,
middle_result,
right_fraction,
right_result,
stats)
MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM = 100
MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL = 1000
def assert_split_at_fraction_exhaustive(
source,
start_position=None,
stop_position=None,
perform_multi_threaded_test=True):
"""Performs and tests dynamic work rebalancing exhaustively.
Asserts that for each possible start position, a source can be split at
every interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Verifies multi threaded splitting as well.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
perform_multi_threaded_test (bool): if :data:`True` performs a
multi-threaded test, otherwise this test is skipped.
Raises:
ValueError: if the exhaustive splitting test fails.
"""
expected_items = read_from_source(source, start_position, stop_position)
if not expected_items:
raise ValueError('Source %r is empty.' % source)
if len(expected_items) == 1:
raise ValueError('Source %r only reads a single item.' % source)
all_non_trivial_fractions = []
any_successful_fractions = False
any_non_trivial_fractions = False
for i in range(len(expected_items)):
stats = SplitFractionStatistics([], [])
assert_split_at_fraction_binary(
source, expected_items, i, 0.0, None, 1.0, None, stats)
if stats.successful_fractions:
any_successful_fractions = True
if stats.non_trivial_fractions:
any_non_trivial_fractions = True
all_non_trivial_fractions.append(stats.non_trivial_fractions)
if not any_successful_fractions:
raise ValueError(
'SplitAtFraction test completed vacuously: no '
'successful split fractions found')
if not any_non_trivial_fractions:
raise ValueError(
'SplitAtFraction test completed vacuously: no non-trivial split '
'fractions found')
if not perform_multi_threaded_test:
return
num_total_trials = 0
for i in range(len(expected_items)):
non_trivial_fractions = [2.0] # 2.0 is larger than any valid fraction.
non_trivial_fractions.extend(all_non_trivial_fractions[i])
min_non_trivial_fraction = min(non_trivial_fractions)
if min_non_trivial_fraction == 2.0:
# This will not happen all the time. Otherwise previous test will fail
# due to vacuousness.
continue
num_trials = 0
have_success = False
have_failure = False
thread_pool = _ThreadPool(2)
try:
while True:
num_trials += 1
if num_trials > MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM:
_LOGGER.warning(
'After %d concurrent splitting trials at item #%d, observed '
'only %s, giving up on this item',
num_trials,
i,
'success' if have_success else 'failure')
break
if _assert_split_at_fraction_concurrent(source,
expected_items,
i,
min_non_trivial_fraction,
thread_pool):
have_success = True
else:
have_failure = True
if have_success and have_failure:
_LOGGER.info(
'%d trials to observe both success and failure of '
'concurrent splitting at item #%d',
num_trials,
i)
break
finally:
thread_pool.close()
num_total_trials += num_trials
if num_total_trials > MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL:
_LOGGER.warning(
'After %d total concurrent splitting trials, considered '
'only %d items, giving up.',
num_total_trials,
i)
break
_LOGGER.info(
'%d total concurrent splitting trials for %d items',
num_total_trials,
len(expected_items))
def _assert_split_at_fraction_concurrent(
source,
expected_items,
num_items_to_read_before_splitting,
split_fraction,
thread_pool=None):
range_tracker = source.get_range_tracker(None, None)
stop_position_before_split = range_tracker.stop_position()
reader = source.read(range_tracker)
reader_iter = iter(reader)
current_items = []
for _ in range(num_items_to_read_before_splitting):
current_items.append(next(reader_iter))
def read_or_split(test_params):
if test_params[0]:
return [val for val in test_params[1]]
else:
position = test_params[1].position_at_fraction(test_params[2])
result = test_params[1].try_split(position)
return result
inputs = []
pool = thread_pool if thread_pool else _ThreadPool(2)
try:
inputs.append([True, reader_iter])
inputs.append([False, range_tracker, split_fraction])
results = pool.map(read_or_split, inputs)
finally:
if not thread_pool:
pool.close()
current_items.extend(results[0])
primary_range = (
range_tracker.start_position(), range_tracker.stop_position())
split_result = results[1]
residual_range = (
split_result[0], stop_position_before_split) if split_result else None
res = _verify_single_split_fraction_result(
source,
expected_items,
current_items,
split_result,
primary_range,
residual_range,
split_fraction)
return res[1] > 0
|
py | b40f7088f2d3f0c9544007d73da44275c7a9a4f0 | #coding=utf-8
import json
from django import forms
from django.contrib import admin
from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponseBadRequest, HttpResponseForbidden)
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.html import escapejs
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ungettext
from django.views.decorators.csrf import csrf_exempt
from feincms.module.medialibrary.models import Category, MediaFile
from feincms.templatetags import feincms_thumbnail
from models import Gallery, GalleryMediaFile
class MediaFileWidget(forms.TextInput):
"""
TextInput widget, shows a link to the current value if there is one.
"""
def render(self, name, value, attrs=None):
inputfield = super(MediaFileWidget, self).render(name, value, attrs)
if value:
try:
mf = MediaFile.objects.get(pk=value)
except MediaFile.DoesNotExist:
return inputfield
try:
caption = mf.translation.caption
except (ObjectDoesNotExist, AttributeError):
caption = _('(no caption)')
if mf.type == 'image':
image = feincms_thumbnail.thumbnail(mf.file.name, '188x142')
image = u'background: url(%(url)s) center center no-repeat;' % {'url': image}
else:
image = u''
return mark_safe(u"""
<div style="%(image)s" class="admin-gallery-image-bg absolute">
<p class="admin-gallery-image-caption absolute">%(caption)s</p>
%(inputfield)s</div>""" % {
'image': image,
'caption': caption,
'inputfield': inputfield})
return inputfield
class ThumbnailForm(forms.Form):
id = forms.ModelChoiceField(
queryset=MediaFile.objects.filter(type='image')
)
width = forms.IntegerField(min_value=0)
height = forms.IntegerField(min_value=0)
@csrf_exempt
def admin_thumbnail(request):
content = u''
if request.method == 'POST' and request.is_ajax():
form = ThumbnailForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest(form.errors)
data = form.cleaned_data
obj = data['id']
dimensions = '%sx%s' % (data['width'], data['height'])
if obj.type == 'image':
image = None
try:
image = feincms_thumbnail.thumbnail(obj.file.name, dimensions)
except:
pass
if image:
content = json.dumps({
'url': image.url,
'name': escapejs(obj.translation.caption)
})
return HttpResponse(content, mimetype='application/json')
else:
return HttpResponseForbidden()
admin_thumbnail.short_description = _('Image')
admin_thumbnail.allow_tags = True
class MediaFileAdminForm(forms.ModelForm):
mediafile = forms.ModelChoiceField(queryset=MediaFile.objects.filter(type='image'),
widget=MediaFileWidget(attrs={'class': 'image-fk'}), label=_('media file'))
class Meta:
model = GalleryMediaFile
fields = ['mediafile']
class GalleryMediaFileAdmin(admin.ModelAdmin):
form = MediaFileAdminForm
model = GalleryMediaFile
list_display = ['__unicode__', admin_thumbnail]
classes = ['sortable']
class GalleryMediaFileInline(admin.StackedInline):
model = GalleryMediaFile
raw_id_fields = ('mediafile',)
extra = 0
form = MediaFileAdminForm
classes = ['sortable']
ordering = ['ordering']
template = 'admin/gallery/gallery/stacked.html'
class GalleryAdmin(admin.ModelAdmin):
inlines = (GalleryMediaFileInline,)
list_display = ['title', 'verbose_images']
class AddCategoryForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
category = forms.ModelChoiceField(Category.objects)
def assign_category(self, request, queryset):
form = None
if 'apply' in request.POST:
form = self.AddCategoryForm(request.POST)
if form.is_valid():
category = form.cleaned_data['category']
count = 0
mediafiles = MediaFile.objects.filter(categories=category)
for gallery in queryset:
for mediafile in mediafiles:
try:
GalleryMediaFile.objects.create(gallery = gallery, mediafile=mediafile)
except FieldError:
pass
count += 1
message = ungettext('Successfully added %(count)d mediafiles in %(category)s Category.',
'Successfully added %(count)d mediafiles in %(category)s Categories.', count) % {
'count':count, 'category':category }
self.message_user(request, message)
return HttpResponseRedirect(request.get_full_path())
if not form:
form = self.AddCategoryForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render_to_response('admin/gallery/add_category.html', {'mediafiles': queryset,
'category_form': form,
}, context_instance=RequestContext(request))
assign_category.short_description = _('Assign Images from a Category to this Gallery')
actions = [assign_category]
admin.site.register(Gallery, GalleryAdmin)
|
py | b40f70997bc4d7ca0bac03825f093211bb7ec0bf | """
A simple Point class.
NOTE: This is NOT rosegraphics -- it is your OWN Point class.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Alex Gipson
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
def main():
""" Calls the TEST functions in this module. """
run_test_init()
run_test_repr()
run_test_clone()
run_test_move_to()
run_test_move_by()
run_test_get_number_of_moves_made()
run_test_get_distance_from()
run_test_get_distance_from_start()
run_test_get_distance_traveled()
run_test_closer_to()
run_test_halfway_to()
########################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
########################################################################
# ----------------------------------------------------------------------
# DONE: 2. With your instructor, READ THE INSTRUCTIONS
# in file m0_INSTRUCTIONS.txt, asking questions as needed.
#
# Then implement a class called Point that has NO METHODS yet,
# just the lines that start the definition of any class:
#
# class NAME_OF_CLASS(object):
# """ Brief description of what objects of the class 'are'."""
#
# Run the program and correct any syntax (notational) errors.
# ----------------------------------------------------------------------
########################################################################
# NOTE: For ALL of the methods that you implement, the method is allowed
# to have additional side effects as needed by it and/or other methods.
########################################################################
class Point(object):
def __init__(self, x, y):
self.a = x
self.b = y
self.x = x
self.y = y
self.num_move_called = 0
self.distance_traveled = 0
def __repr__(self):
return "Point({}, {})".format(self.x, self.y)
def clone(self):
return Point(self.x, self.y)
def move_to(self, c, d):
self.e = self.x
self.f = self.y
self.x = c
self.y = d
self.num_move_called = self.num_move_called +1
self.distance_traveled = self.distance_traveled + math.sqrt((c-self.e)**2+(d-self.f)**2)
def move_by(self, dx,dy):
self.x = self.x+dx
self.y = self.y+dy
self.num_move_called = self.num_move_called +1
self.distance_traveled = self.distance_traveled + math.sqrt(dx**2+dy**2)
def get_number_of_moves_made(self):
return self.num_move_called
def get_distance_from(self, point):
distance = math.sqrt((self.x-point.x)**2+(self.y-point.y)**2)
return distance
def get_distance_from_start(self):
distance_from_start = math.sqrt((self.x-self.a)**2+(self.y-self.b)**2)
return distance_from_start
def get_distance_traveled(self):
return self.distance_traveled
def closer_to(self, pa, pb):
distance1 = math.sqrt((self.x-pa.x)**2+(self.y-pa.y)**2)
distance2 = math.sqrt((self.x-pb.x)**2+(self.y-pb.y)**2)
if distance2<distance1:
return pb
else:
return pa
def halfway_to(self, p):
point = Point((self.x+p.x)/2, (self.y+p.y)/2)
return point
def run_test_init():
"""
Tests the __init__ method of the Point class.
-- IMPORTANT: There are TWO underscores on each side.
-- Note: the __init__ method runs when one constructs
a Point. See examples below.
Here is the specification for the __init__ method:
What comes in:
-- self
-- an integer x
-- an integer y
where (x, y) is to be the initial position of this Point.
What goes out: Nothing (i.e., None).
Side effects: Sets two instance variables named:
x
y
to the given coordinate (i.e., to the given x and y).
Other methods should modify the instance variables
x
y
as needed so that they always indicate the CURRENT position
of the Point.
EXAMPLE: The following shows __init__ in action.
You may also use this example to test this method.
p1 = Point(30, 18)
print()
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p2 = Point(100, -40)
print()
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p1.y = 999
print()
print('Expected for p1: 30 999')
print('Actual for p1: ', p1.x, p1.y)
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
"""
# ------------------------------------------------------------------
# DONE: 3.
# a. Read the above specification of the __init__ method.
# Do NOT proceed until you understand WHAT it should do
# (but not necessarily HOW it will do it).
# NO CODE YET. Ask questions as needed.
#
# b. Examine the EXAMPLE code in the doc-string above.
# Make sure that you see how that code works, and how it
# TESTS the __init__ method. ** ASK QUESTIONS AS NEEDED. **
#
# c. Select the code in the EXAMPLE in the doc-string above.
# Copy-and-paste it into this RUN_TEST_INIT function, putting
# the copied code just below the PRINT statements below.
#
# Use the Tab and Shift-Tab keystrokes as needed to fix the
# indentation of the pasted code.
#
# You cannot RUN the copy-pasted tests because you
# have not (yet) implemented the __init__ method.
#
# d. Implement and test the __init__ method.
# Make sure that you UNDERSTAND your code and are not just
# "pattern matching" from examples.
# ASK QUESIONS AS NEEDED. COMMIT YOUR WORK.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __init__ method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(30, 18)
print()
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p2 = Point(100, -40)
print()
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p1.y = 999
print()
print('Expected for p1: 30 999')
print('Actual for p1: ', p1.x, p1.y)
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
def run_test_repr():
"""
Tests the __repr__ method of the Point class.
-- IMPORTANT: There are TWO underscores on each side.
-- Note: the __repr__ method is called by the PRINT
function and other functions that DISPLAY a Point object.
See examples below.
Here is the specification for the __repr__ method:
What comes in:
-- self
What goes out:
Returns a string that represents a Point like this:
'Point(x, y)'
where x and y are replaced by this Point's
x and y coordinates.
Side effects: None.
EXAMPLE: The following shows __repr__ in action.
You may also use this example to test this method.
p1 = Point(30, 18)
print()
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p2 = Point(100, -40)
print()
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p1.y = 999
print()
print('Expected for p1: Point(30, 999)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
"""
# ------------------------------------------------------------------
# DONE: 4. Follow the same instructions as in TO-DO 3 above,
# but for the __repr__ method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __repr__ method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(30, 18)
print()
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p2 = Point(100, -40)
print()
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p1.y = 999
print()
print('Expected for p1: Point(30, 999)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
def run_test_clone():
"""
Tests the clone method of the Point class.
Here is the specification for the clone method:
What comes in:
-- self
What goes out:
Returns a new Point whose x and y coordinates are the same
as the x and y coordinates of this Point.
Side effects: None.
EXAMPLE: The following shows clone in action.
You may also use this example to test this method.
p1 = Point(10, 8)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
p2 = p1.clone()
p3 = p2.clone()
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.x = 999
print()
print('Expected for p1: Point(999, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.y = 333
p2 = Point(11, 22)
p3.x = 777
p3.y = 555
print()
print('Expected for p1: Point(999. 333)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(11, 22)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(777, 555)')
print('Actual for p3: ', p3)
"""
# ------------------------------------------------------------------
# DONE: 5. Follow the same instructions as in TO-DO 3 above,
# but for the clone method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the clone method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
p2 = p1.clone()
p3 = p2.clone()
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.x = 999
print()
print('Expected for p1: Point(999, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.y = 333
p2 = Point(11, 22)
p3.x = 777
p3.y = 555
print()
print('Expected for p1: Point(999. 333)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(11, 22)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(777, 555)')
print('Actual for p3: ', p3)
def run_test_move_to():
"""
Tests the move_to method of the Point class.
Here is the specification for the move_to method:
What comes in:
-- self
-- an integer x
-- an integer y
What goes out: Nothing (i.e., None).
Side effects: Changes the instance variables
x
y
that store the position of this Point to the given x and y.
This has the effect of "moving" this Point TO the given (x, y).
EXAMPLE: The following shows move_to in action.
You may also use this example to test this method.
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_to(5, -1)
p2.move_to(0, 0)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 0)')
print('Actual for p2: ', p2)
p2.y = 99
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 99)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_to(0, 222)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 222)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('*** FAILED: This method should have had no value returned! ***')
"""
# ------------------------------------------------------------------
# DONE: 6. Follow the same instructions as in TO-DO 3 above,
# but for the move_to method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the move_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_to(5, -1)
p2.move_to(0, 0)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 0)')
print('Actual for p2: ', p2)
p2.y = 99
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 99)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_to(0, 222)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 222)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('*** FAILED: This method should have had no value returned! ***')
def run_test_move_by():
"""
Tests the move_by method of the Point class.
Here is the specification for the move_by method:
What comes in:
-- self
-- an integer dx
-- an integer dy
What goes out: Nothing (i.e., None).
Side effects: Adds the given dx and dy
to the instance variables
x
y
that store the position of this Point.
This has the effect of "moving" this Point BY the given (dx, dy).
EXAMPLE: The following shows move_by in action.
You may also use this example to test this method.
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p2.move_by(200, 0)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(250, 20)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_by(-100, 300)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(150, 320)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('*** FAILED: This method should have had no value returned! ***')
"""
# ------------------------------------------------------------------
# DONE: 7. Follow the same instructions as in TO-DO 3 above,
# but for the move_by method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the move_by method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_to(5, -1)
p2.move_to(0, 0)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 0)')
print('Actual for p2: ', p2)
p2.y = 99
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 99)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_to(0, 222)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 222)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('*** FAILED: This method should have had no value returned! ***')
def run_test_get_number_of_moves_made():
"""
Tests the get_number_of_moves_made method of the Point class.
Here is the specification for the get_number_of_moves_made method:
What comes in:
-- self
What goes out: Returns an integer that is the number of times that
this Point has "moved" via calls to move_to and/or move_by.
Side effects:
** You figure out what side effect(s) MUST happen! **
EXAMPLE: The following shows get_number_of_moves_made in action.
You may also use this example to test this method.
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1 moves made: 0')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 0')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 1')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 1')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p2.move_by(200, 0)
p2.move_by(-100, 300)
p2.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 2')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(200, 0)
p1.move_by(-100, 300)
p1.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.x = 400
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_to(3, 3)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 7')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 5')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
"""
# ------------------------------------------------------------------
# DONE: 8. Follow the same instructions as in TO-DO 3 above,
# but for the get_number_of_moves_made method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_number_of_moves_made method.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1 moves made: 0')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 0')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 1')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 1')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p2.move_by(200, 0)
p2.move_by(-100, 300)
p2.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 2')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(200, 0)
p1.move_by(-100, 300)
p1.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.x = 400
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_to(3, 3)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 7')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 5')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
def run_test_get_distance_from():
"""
Tests the get_distance_from method of the Point class.
Here is the specification for the get_distance_from method:
What comes in:
-- self
-- another Point object
What goes out:
Returns the distance from this Point to the given Point.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows get_distance_from in action.
You may also use this example to test this method.
p1 = Point(1, 5)
p2 = Point(10, 5)
p3 = Point(13, 9)
print()
print('Expected p1 to p2: 9.0')
print('Actual p1 to p2:', p1.get_distance_from(p2))
print()
print('Expected p2 to p3: 5.0')
print('Actual p2 to p3:', p2.get_distance_from(p3))
print('Expected p3 to p2: 5.0')
print('Actual p3 to p2:', p3.get_distance_from(p2))
print()
print('Expected p1 to p3: about 12.65')
print('Actual p1 to p3:', p1.get_distance_from(p3))
print('Expected p3 to p1: about 12.65')
print('Actual p3 to p1:', p3.get_distance_from(p1))
print()
print('Expected p1 to p1: 0.0')
print('Actual p1 to p1:', p1.get_distance_from(p1))
print('Expected p2 to p2: 0.0')
print('Actual p2 to p2:', p2.get_distance_from(p2))
print('Expected p3 to p3: 0.0')
print('Actual p3 to p3:', p3.get_distance_from(p3))
p4 = p1.clone()
print()
print('Expected p1 to p4: 0.0')
print('Actual p1 to p4:', p1.get_distance_from(p4))
print('Expected p4 to p1: 0.0')
print('Actual p4 to p1:', p4.get_distance_from(p1))
print('Expected p4 to p2: 9.0')
print('Actual p4 to p2:', p4.get_distance_from(p2))
print('Expected p2 to p4: 9.0')
print('Actual p2 to p4:', p2.get_distance_from(p4))
"""
# ------------------------------------------------------------------
# DONE: 9. Follow the same instructions as in TO-DO 3 above,
# but for the get_distance_from method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_distance_from method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(1, 5)
p2 = Point(10, 5)
p3 = Point(13, 9)
print()
print('Expected p1 to p2: 9.0')
print('Actual p1 to p2:', p1.get_distance_from(p2))
print()
print('Expected p2 to p3: 5.0')
print('Actual p2 to p3:', p2.get_distance_from(p3))
print('Expected p3 to p2: 5.0')
print('Actual p3 to p2:', p3.get_distance_from(p2))
print()
print('Expected p1 to p3: about 12.65')
print('Actual p1 to p3:', p1.get_distance_from(p3))
print('Expected p3 to p1: about 12.65')
print('Actual p3 to p1:', p3.get_distance_from(p1))
print()
print('Expected p1 to p1: 0.0')
print('Actual p1 to p1:', p1.get_distance_from(p1))
print('Expected p2 to p2: 0.0')
print('Actual p2 to p2:', p2.get_distance_from(p2))
print('Expected p3 to p3: 0.0')
print('Actual p3 to p3:', p3.get_distance_from(p3))
p4 = p1.clone()
print()
print('Expected p1 to p4: 0.0')
print('Actual p1 to p4:', p1.get_distance_from(p4))
print('Expected p4 to p1: 0.0')
print('Actual p4 to p1:', p4.get_distance_from(p1))
print('Expected p4 to p2: 9.0')
print('Actual p4 to p2:', p4.get_distance_from(p2))
print('Expected p2 to p4: 9.0')
print('Actual p2 to p4:', p2.get_distance_from(p4))
def run_test_get_distance_from_start():
"""
Tests the get_distance_from_START method of the Point class.
Here is the specification for the get_distance_from_start method:
What comes in:
-- self
What goes out:
Returns the distance from this Point's current position
to the position that the Point was at when it was constructed.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows get_distance_from_START in action.
You may also use this example to test this method.
p1 = Point(20, 30)
p1.move_to(111, 222)
p1.move_by(10, 20)
p1.move_to(0, 0)
p1.move_to(21, 31)
print()
print('p1 from start to (21, 31), should be about 1.414')
print('Actually is:', p1.get_distance_from_start())
p1.move_by(29, 39)
print()
print('p1 from start to (50, 70), should be about 50.0')
print('Actually is:', p1.get_distance_from_start())
p2 = Point(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(11, 1)
print()
print('p2 from start to (11, 1), should be about 10.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(999, 999)
p2.move_to(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
"""
# ------------------------------------------------------------------
# DONE: 10. Follow the same instructions as in TO-DO 3 above,
# but for the get_distance_from_START method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_distance_from_START method')
print('of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(20, 30)
p1.move_to(111, 222)
p1.move_by(10, 20)
p1.move_to(0, 0)
p1.move_to(21, 31)
print()
print('p1 from start to (21, 31), should be about 1.414')
print('Actually is:', p1.get_distance_from_start())
p1.move_by(29, 39)
print()
print('p1 from start to (50, 70), should be about 50.0')
print('Actually is:', p1.get_distance_from_start())
p2 = Point(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(11, 1)
print()
print('p2 from start to (11, 1), should be about 10.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(999, 999)
p2.move_to(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
def run_test_get_distance_traveled():
"""
Tests the get_distance_traveled method of the Point class.
Here is the specification for the get_distance_traveled method:
What comes in:
-- self
What goes out: Returns the sum of all the distances that
this Point has "moved" via calls to move_to and/or move_by.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows get_distance_traveled in action.
You may also use this example to test this method.
p1 = Point(20, 30)
p1.move_to(21, 30)
p1.move_to(21, 38)
print()
print('Expected p1 has traveled 9.0')
print('Actual:', p1.get_distance_traveled())
p1.move_by(1, 1)
print()
print('Expected p1 has now traveled about 10.414')
print('Actual:', p1.get_distance_traveled())
p2 = Point(0, 0)
p3 = Point(100, 22)
p4 = Point(0, 555)
for k in range(100):
p2.move_by(0, k + 1)
p3.move_by(k + 1, 0)
p4.move_to(k + 1, 555)
print()
print('Expected p2 has now traveled', 101 * 50.0)
print('Actual:', p2.get_distance_traveled())
print('Expected p3 has now traveled', 101 * 50.0)
print('Actual:', p3.get_distance_traveled())
print('Expected p4 has now traveled 100.0')
print('Actual:', p4.get_distance_traveled())
"""
# ------------------------------------------------------------------
# DONE: 11. Follow the same instructions as in TO-DO 3 above,
# but for the get_distance_traveled method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_distance_traveled method')
print('of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(20, 30)
p1.move_to(21, 30)
p1.move_to(21, 38)
print()
print('Expected p1 has traveled 9.0')
print('Actual:', p1.get_distance_traveled())
p1.move_by(1, 1)
print()
print('Expected p1 has now traveled about 10.414')
print('Actual:', p1.get_distance_traveled())
p2 = Point(0, 0)
p3 = Point(100, 22)
p4 = Point(0, 555)
for k in range(100):
p2.move_by(0, k + 1)
p3.move_by(k + 1, 0)
p4.move_to(k + 1, 555)
print()
print('Expected p2 has now traveled', 101 * 50.0)
print('Actual:', p2.get_distance_traveled())
print('Expected p3 has now traveled', 101 * 50.0)
print('Actual:', p3.get_distance_traveled())
print('Expected p4 has now traveled 100.0')
print('Actual:', p4.get_distance_traveled())
def run_test_closer_to():
"""
Tests the closer_to method of the Point class.
Here is the specification for the closer_to method:
What comes in:
-- self
-- a Point object p2
-- a Point object p3
What goes out:
Returns whichever of p2 and p3 this Point is closer to.
(Just to be specific, it should return p2 in the case of a tie.)
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows closer_to in action.
You may also use this example to test this method.
p1 = Point(10, 20)
p2 = Point(15, 20)
p3 = Point(14, 24)
print()
print('Expected:', p2)
print('Actual: ', p1.closer_to(p2, p3))
print('Expected:', p2)
print('Actual: ', p1.closer_to(p3, p2))
print()
print('Expected:', p1)
print('Actual: ', p1.closer_to(p1, p3))
print('Expected:', p2)
print('Actual: ', p2.closer_to(p3, p2))
print('Expected:', p3)
print('Actual: ', p3.closer_to(p3, p3))
print()
p4 = p1.clone()
p5 = p1.clone()
print('Expected:', p4)
print('Actual: ', p1.closer_to(p4, p5))
print('Expected: True')
print('Actual: ', p1.closer_to(p4, p5) is p4)
print('Expected: False')
print('Actual: ', p1.closer_to(p4, p5) is p5)
"""
# ------------------------------------------------------------------
# DONE: 12. Follow the same instructions as in TO-DO 3 above,
# but for the closer_to method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the closer_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 20)
p2 = Point(15, 20)
p3 = Point(14, 24)
print()
print('Expected:', p2)
print('Actual: ', p1.closer_to(p2, p3))
print('Expected:', p2)
print('Actual: ', p1.closer_to(p3, p2))
print()
print('Expected:', p1)
print('Actual: ', p1.closer_to(p1, p3))
print('Expected:', p2)
print('Actual: ', p2.closer_to(p3, p2))
print('Expected:', p3)
print('Actual: ', p3.closer_to(p3, p3))
print()
p4 = p1.clone()
p5 = p1.clone()
print('Expected:', p4)
print('Actual: ', p1.closer_to(p4, p5))
print('Expected: True')
print('Actual: ', p1.closer_to(p4, p5) is p4)
print('Expected: False')
print('Actual: ', p1.closer_to(p4, p5) is p5)
def run_test_halfway_to():
"""
Tests the halfway_to method of the Point class.
Here is the specification for the halfway_to method:
What comes in:
-- self
-- a Point object p2
What goes out:
Returns a new Point that is halfway between this Point and p2.
That is, the x coordinate of the new Point is the average
of the x coordinate of this Point and the x coordinate of p2,
and likewise for the new Point's y coordinate.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows halfway_to in action.
You may also use this example to test this method.
p1 = Point(10, 20)
p2 = Point(30, 100)
print()
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p1.halfway_to(p2))
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p2.halfway_to(p1))
print()
print('Should be: Point(10.0, 20.0)')
print('Actual is:', p1.halfway_to(p1))
p3 = Point(-10, 20)
p4 = Point(30, -100)
print()
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print()
print('Should be: Point(-10.0, 20.0)')
print('Actual is:', p3.halfway_to(p3))
"""
# ------------------------------------------------------------------
# DONEq: 13. Follow the same instructions as in TO-DO 3 above,
# but for the halfway_to method specified above.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the halfway_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 20)
p2 = Point(30, 100)
print()
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p1.halfway_to(p2))
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p2.halfway_to(p1))
print()
print('Should be: Point(10.0, 20.0)')
print('Actual is:', p1.halfway_to(p1))
p3 = Point(-10, 20)
p4 = Point(30, -100)
print()
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print()
print('Should be: Point(-10.0, 20.0)')
print('Actual is:', p3.halfway_to(p3))
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
py | b40f718f8f1201b1256b14912ff24a8d842db947 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to a SQLite DB file for SMU data.
Provides a simpler interface than SQL to create and access the SMU data in an
SQLite database.
The majority of the data is stored as a blob, with just the bond topology id and
smiles string pulled out as fields.
"""
import datetime
import os
import sqlite3
from absl import logging
from rdkit import Chem
from smu import dataset_pb2
from smu.parser import smu_utils_lib
import snappy
_CONFORMER_TABLE_NAME = 'conformer'
_BTID_TABLE_NAME = 'btid'
_SMILES_TABLE_NAME = 'smiles'
class ReadOnlyError(Exception):
pass
class SMUSQLite:
"""Provides an interface for SMU data to a SQLite DB file.
The class hides away all the SQL fun with just Conformer protobuf visible in
the interface.
Internal details about the tables:
There are 3 separate tables
* conformer: Is the primary table which has columns
* cid: integer conformer id (unique)
* conformer: blob wire format proto of a conformer proto
* btid: Used for lookups by bond topology id which has columns
* btid: integer bond topology id (not unique)
* cid: integer conformer id (not unique)
* smiles: Used to map smiles to bond topology ids with columns
* smiles: text canonical smiles string (unique)
* btid: integer bond topology id
Note that if multiple smiles strings are associated with the same bond
toplogy id, the first one provided will be silently kept.
"""
def __init__(self, filename, mode='r'):
"""Creates SMUSQLite.
Args:
filename: database file, must be on local filesystem
mode: 'c' (create, deletes existing), 'w' (writable), 'r' (read only)
Raises:
FileNotFoundError: if 'r' and file does not exist
"""
if mode == 'c':
if os.path.exists(filename):
os.remove(filename)
self._read_only = False
self._conn = sqlite3.connect(filename)
self._maybe_init_db()
elif mode == 'w':
self._read_only = False
self._conn = sqlite3.connect(filename)
self._maybe_init_db()
elif mode == 'r':
if not os.path.exists(filename):
raise FileNotFoundError(filename)
self._conn = sqlite3.connect(filename)
self._read_only = True
else:
raise ValueError('Mode must be c, r, or w')
self._conn = sqlite3.connect(filename)
def _maybe_init_db(self):
"""Create the table and indices if they do not exist."""
make_table = (f'CREATE TABLE IF NOT EXISTS {_CONFORMER_TABLE_NAME} '
'(cid INTEGER PRIMARY KEY, '
'exp_stoich STRING, '
'conformer BLOB)')
self._conn.execute(make_table)
self._conn.execute(f'CREATE UNIQUE INDEX IF NOT EXISTS '
f'idx_cid ON {_CONFORMER_TABLE_NAME} (cid)')
self._conn.execute(f'CREATE INDEX IF NOT EXISTS '
f'idx_exp_stoich ON {_CONFORMER_TABLE_NAME} '
'(exp_stoich)')
self._conn.execute(f'CREATE TABLE IF NOT EXISTS {_BTID_TABLE_NAME} '
'(btid INTEGER, cid INTEGER)')
self._conn.execute(f'CREATE INDEX IF NOT EXISTS '
f'idx_btid ON {_BTID_TABLE_NAME} (btid)')
self._conn.execute(f'CREATE TABLE IF NOT EXISTS {_SMILES_TABLE_NAME} '
'(smiles TEXT, btid INTEGER)')
self._conn.execute(f'CREATE UNIQUE INDEX IF NOT EXISTS '
f'idx_smiles ON {_SMILES_TABLE_NAME} (smiles)')
self._conn.execute('PRAGMA synchronous = OFF')
self._conn.execute('PRAGMA journal_mode = MEMORY')
self._conn.commit()
def bulk_insert(self, encoded_conformers, batch_size=10000, limit=None):
"""Inserts conformers into the database.
Args:
encoded_conformers: iterable for encoded dataset_pb2.Conformer
batch_size: insert performance is greatly improved by putting multiple
insert into one transaction. 10k was a reasonable default from some
early exploration.
limit: maximum number of records to insert
Raises:
ReadOnlyError: if mode is 'r'
ValueError: If encoded_conformers is empty.
"""
if self._read_only:
raise ReadOnlyError()
if not encoded_conformers:
raise ValueError()
insert_conformer = (f'INSERT INTO {_CONFORMER_TABLE_NAME} '
'VALUES (?, ?, ?)')
insert_btid = f'INSERT INTO {_BTID_TABLE_NAME} VALUES (?, ?)'
insert_smiles = (
f'INSERT OR IGNORE INTO {_SMILES_TABLE_NAME} VALUES (?, ?) ')
cur = self._conn.cursor()
start_time = datetime.datetime.now()
pending_conformer_args = []
pending_btid_args = []
pending_smiles_args = []
def commit_pending():
cur.executemany(insert_conformer, pending_conformer_args)
cur.executemany(insert_btid, pending_btid_args)
cur.executemany(insert_smiles, pending_smiles_args)
pending_conformer_args.clear()
pending_btid_args.clear()
pending_smiles_args.clear()
self._conn.commit()
idx = None
for idx, encoded_conformer in enumerate(encoded_conformers, 1):
conformer = dataset_pb2.Conformer.FromString(encoded_conformer)
# A small efficiency hack: the expanded stoich is only intended for use
# with topology_detection, so we only put a real value for those so that
# we dont' even have to return the entries we don't want.
if smu_utils_lib.conformer_eligible_for_topology_detection(conformer):
expanded_stoich = (
smu_utils_lib.expanded_stoichiometry_from_topology(
conformer.bond_topologies[0]))
else:
expanded_stoich = ''
pending_conformer_args.append((conformer.conformer_id, expanded_stoich,
snappy.compress(encoded_conformer)))
for bond_topology in conformer.bond_topologies:
pending_btid_args.append(
(bond_topology.bond_topology_id, conformer.conformer_id))
pending_smiles_args.append(
(bond_topology.smiles, bond_topology.bond_topology_id))
if batch_size and idx % batch_size == 0:
commit_pending()
elapsed = datetime.datetime.now() - start_time
logging.info(
'bulk_insert: committed at index %d, %f s total, %.6f s/record',
idx, elapsed.total_seconds(),
elapsed.total_seconds() / idx)
if limit and idx >= limit:
break
# Commit a final time
commit_pending()
elapsed = datetime.datetime.now() - start_time
logging.info('bulk_insert: Total records %d, %f s, %.6f s/record', idx,
elapsed.total_seconds(),
elapsed.total_seconds() / idx)
def find_by_conformer_id(self, cid):
"""Finds the conformer associated with a conformer id.
Args:
cid: conformer id to look up.
Returns:
dataset_pb2.Conformer
Raises:
KeyError: if cid is not found
"""
cur = self._conn.cursor()
select = f'SELECT conformer FROM {_CONFORMER_TABLE_NAME} WHERE cid = ?'
cur.execute(select, (cid,))
result = cur.fetchall()
if not result:
raise KeyError(f'Conformer id {cid} not found')
# Since it's a unique index, there should only be one result and it's a
# tuple with one value.
assert len(result) == 1
assert len(result[0]) == 1
return dataset_pb2.Conformer().FromString(snappy.uncompress(result[0][0]))
def find_by_bond_topology_id(self, btid):
"""Finds all the conformer associated with a bond topology id.
Args:
btid: bond topology id to look up.
Returns:
iterable of dataset_pb2.Conformer
"""
cur = self._conn.cursor()
select = (f'SELECT cid, conformer '
f'FROM {_CONFORMER_TABLE_NAME} '
f'INNER JOIN {_BTID_TABLE_NAME} USING(cid) '
f'WHERE {_BTID_TABLE_NAME}.btid = ?')
cur.execute(select, (btid,))
return (dataset_pb2.Conformer().FromString(snappy.uncompress(result[1]))
for result in cur)
def find_by_smiles(self, smiles):
"""Finds all conformer associated with a given smiles string.
Args:
smiles: string
Returns:
iterable for dataset_pb2.Conformer
"""
canon_smiles = smu_utils_lib.compute_smiles_for_molecule(
Chem.MolFromSmiles(smiles, sanitize=False), include_hs=False)
cur = self._conn.cursor()
select = f'SELECT btid FROM {_SMILES_TABLE_NAME} WHERE smiles = ?'
cur.execute(select, (canon_smiles,))
result = cur.fetchall()
if not result:
return []
# Since it's a unique index, there should only be one result and it's a
# tuple with one value.
assert len(result) == 1
assert len(result[0]) == 1
return self.find_by_bond_topology_id(result[0][0])
def find_by_expanded_stoichiometry(self, exp_stoich):
"""Finds all of the conformers with a stoichiometry.
The expanded stoichiometry includes hydrogens as part of the atom type.
See smu_utils_lib.expanded_stoichiometry_from_topology for a
description.
Args:
exp_stoich: string
Returns:
iterable of dataset_pb2.Conformer
"""
cur = self._conn.cursor()
select = (f'SELECT conformer '
f'FROM {_CONFORMER_TABLE_NAME} '
f'WHERE exp_stoich = ?')
cur.execute(select, (exp_stoich,))
return (dataset_pb2.Conformer().FromString(snappy.uncompress(result[0]))
for result in cur)
def find_by_stoichiometry(self, stoich):
"""Finds all conformers with a given stoichiometry.
The stoichiometry is like "C6H12".
Internally, the stoichiometry is converted a set of expanded stoichiometries
and the query is done to find all of those.
Notably, this means only records with status <= 512 are returned.
Args:
stoich: stoichiometry string like "C6H12", case doesn't matter
Returns:
Iterable of type dataset_pb2.Conformer.
"""
exp_stoichs = list(
smu_utils_lib.expanded_stoichiometries_from_stoichiometry(stoich))
cur = self._conn.cursor()
select = (f'SELECT conformer '
f'FROM {_CONFORMER_TABLE_NAME} '
f'WHERE exp_stoich IN (' + ','.join('?' for _ in exp_stoichs) +
')')
cur.execute(select, exp_stoichs)
return (dataset_pb2.Conformer().FromString(snappy.uncompress(result[0]))
for result in cur)
def __iter__(self):
"""Iterates through all dataset_pb2.Conformer in the DB."""
select = f'SELECT conformer FROM {_CONFORMER_TABLE_NAME} ORDER BY rowid'
cur = self._conn.cursor()
cur.execute(select)
return (dataset_pb2.Conformer().FromString(snappy.uncompress(result[0]))
for result in cur)
|
py | b40f734ef2b672926e6a0f6253ea333d799d0b03 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import os
import pickle
import sys
import uuid
from abc import ABC, abstractmethod
from contextlib import contextmanager
from itertools import chain
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from uuid import UUID
import numpy as np
from .configuration_auto import AutoConfig
from .configuration_utils import PretrainedConfig
from .data import SquadExample, squad_convert_examples_to_features
from .file_utils import is_tf_available, is_torch_available
from .modelcard import ModelCard
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import BatchEncoding, PaddingStrategy
if is_tf_available():
import tensorflow as tf
from .modeling_tf_auto import (
TFAutoModel,
TFAutoModelForSequenceClassification,
TFAutoModelForQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TFAutoModelForCausalLM,
)
if is_torch_available():
import torch
from .modeling_auto import (
AutoModel,
AutoModelForSequenceClassification,
AutoModelForQuestionAnswering,
AutoModelForTokenClassification,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
AutoModelForMaskedLM,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
)
if TYPE_CHECKING:
from .modeling_utils import PreTrainedModel
from .modeling_tf_utils import TFPreTrainedModel
logger = logging.getLogger(__name__)
def get_framework(model=None):
""" Select framework (TensorFlow/PyTorch) to use.
If both frameworks are installed and no specific model is provided, defaults to using PyTorch.
"""
if is_tf_available() and is_torch_available() and model is not None and not isinstance(model, str):
# Both framework are available but the user supplied a model class instance.
# Try to guess which framework to use from the model classname
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
elif not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
else:
# framework = 'tf' if is_tf_available() else 'pt'
framework = "pt" if is_torch_available() else "tf"
return framework
class PipelineException(Exception):
"""
Raised by pipelines when handling __call__
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling varargs for each Pipeline
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class DefaultArgumentHandler(ArgumentHandler):
"""
Default varargs argument parser handling parameters for each Pipeline
"""
@staticmethod
def handle_kwargs(kwargs: Dict) -> List:
if len(kwargs) == 1:
output = list(kwargs.values())
else:
output = list(chain(kwargs.values()))
return DefaultArgumentHandler.handle_args(output)
@staticmethod
def handle_args(args: Sequence[Any]) -> List[str]:
# Only one argument, let's do case by case
if len(args) == 1:
if isinstance(args[0], str):
return [args[0]]
elif not isinstance(args[0], list):
return list(args)
else:
return args[0]
# Multiple arguments (x1, x2, ...)
elif len(args) > 1:
if all([isinstance(arg, str) for arg in args]):
return list(args)
# If not instance of list, then it should instance of iterable
elif isinstance(args, Iterable):
return list(chain.from_iterable(chain(args)))
else:
raise ValueError(
"Invalid input type {}. Pipeline supports Union[str, Iterable[str]]".format(type(args))
)
else:
return []
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ValueError("Pipeline cannot handle mixed args and kwargs")
if len(kwargs) > 0:
return DefaultArgumentHandler.handle_kwargs(kwargs)
else:
return DefaultArgumentHandler.handle_args(args)
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing.
Supported data formats currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
PipelineDataFormat also includes some utilities to work with multi-columns like mapping from datasets columns
to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: dict):
"""
Save the provided data object with the representation for the current `DataFormat`.
:param data: data to store
:return:
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
:param data: data to store
:return: (str) Path where the data has been saved
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process.
For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations.
Pipeline workflow is defined as a sequence of the following operations:
Input -> Tokenization -> Model Inference -> Post-Processing (Task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument. Users can specify
device argument as an integer, -1 meaning "CPU", >= 0 referring the CUDA device ordinal.
Some pipeline, like for instance FeatureExtractionPipeline ('feature-extraction') outputs large
tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the binary_output constructor argument. If set to True, the output will be stored in the
pickle format.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e. pickle) or as raw text.
Return:
:obj:`List` or :obj:`Dict`:
Pipeline returns list or dictionary depending on:
- Whether the user supplied multiple samples
- Whether the pipeline exposes multiple fields in the output object
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework = get_framework()
self.task = task
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
self._args_parser = args_parser or DefaultArgumentHandler()
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory):
"""
Save the pipeline's model and tokenizer to the specified save_directory
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
example:
# Explicitly ask for tensor allocation on CUDA device :0
nlp = pipeline(..., device=0)
with nlp.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = nlp(...)
Returns:
Context manager
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
:param inputs:
:return:
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def check_model_type(self, supported_models):
"""
Check if the model class is in the supported class list of the pipeline.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models = [item[1].__name__ for item in supported_models.items()]
if self.model.__class__.__name__ not in supported_models:
raise PipelineException(
self.task,
self.model.base_model_prefix,
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
)
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs, add_special_tokens=add_special_tokens, return_tensors=self.framework, padding=padding,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching.
Args:
inputs: dict holding all the keyworded arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array.
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using Model head. This pipeline extracts the hidden states from the base transformer,
which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "feature-extraction", for extracting features of a sequence.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs).tolist()
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any ModelWithLMHead head. This pipeline predicts the words that will follow a specified text prompt.
This language generation pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "text-generation", for generating text from a specified prompt.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective,
which includes the uni-directional models in the library (e.g. gpt2).
See the list of available community models on
`huggingface.co/models <https://huggingface.co/models?search=&filter=lm-head>`__.
"""
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. """
ALLOWED_MODELS = [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"ReformerModelWithLMHead",
"GPT2LMHeadModel",
"OpenAIGPTLMHeadModel",
"CTRLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
"TFGPT2LMHeadModel",
"TFOpenAIGPTLMHeadModel",
"TFCTRLLMHeadModel",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(self.ALLOWED_MODELS)
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
tokenizer_kwargs = {"add_space_before_punct_symbol": True}
else:
tokenizer_kwargs = {}
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
**tokenizer_kwargs,
)
return inputs
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
text_inputs = self._args_parser(*args)
results = []
for prompt_text in text_inputs:
# Manage correct placement of the tensors
with self.device_placement():
if self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we had an article to the prompt to give more state to the model.
padding_text = self.PADDING_TEXT + self.tokenizer.eos_token
padding = self._parse_and_tokenize(padding_text, padding=False, add_special_tokens=False)
# This impacts max_length and min_length argument that need adjusting.
padding_length = padding["input_ids"].shape[-1]
if "max_length" in generate_kwargs and generate_kwargs["max_length"] is not None:
generate_kwargs["max_length"] += padding_length
if "min_length" in generate_kwargs and generate_kwargs["min_length"] is not None:
generate_kwargs["min_length"] += padding_length
inputs = self._parse_and_tokenize(
padding_text + prompt_text, padding=False, add_special_tokens=False
)
else:
inputs = self._parse_and_tokenize(prompt_text, padding=False, add_special_tokens=False)
# set input_ids to None to allow empty prompt
if inputs["input_ids"].shape[-1] == 0:
inputs["input_ids"] = None
inputs["attention_mask"] = None
if self.framework == "pt" and inputs["input_ids"] is not None:
inputs = self.ensure_tensor_on_device(**inputs)
input_ids = inputs["input_ids"]
# Ensure that batch size = 1 (batch generation not allowed for now)
assert (
input_ids is None or input_ids.shape[0] == 1
), "Batch generation is currently not supported. See https://github.com/huggingface/transformers/issues/3021 for more information."
output_sequences = self.model.generate(input_ids=input_ids, **generate_kwargs) # BS x SL
result = []
for generated_sequence in output_sequences:
if self.framework == "pt" and generated_sequence is not None:
generated_sequence = generated_sequence.cpu()
generated_sequence = generated_sequence.numpy().tolist()
record = {}
if return_tensors:
record["generated_token_ids"] = generated_sequence
if return_text:
# Decode text
text = self.tokenizer.decode(
generated_sequence,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
record["generated_text"] = prompt_text + text[prompt_length:]
result.append(record)
results += [result]
if len(results) == 1:
return results[0]
return results
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using ModelForSequenceClassification head. See the
`sequence classification usage <../usage.html#sequence-classification>`__ examples for more information.
This text classification pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "sentiment-analysis", for classifying sequences according to positive or negative sentiments.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=text-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, return_all_scores: bool = False, **kwargs):
super().__init__(**kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
)
self.return_all_scores = return_all_scores
def __call__(self, *args, **kwargs):
outputs = super().__call__(*args, **kwargs)
scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
if self.return_all_scores:
return [
[{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(item)]
for item in scores
]
else:
return [
{"label": self.model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores
]
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for zero-shot for text classification by turning each possible label into an NLI
premise/hypothesis pair.
"""
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(",")]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if len(labels) == 0 or len(sequences) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(hypothesis_template)
)
if isinstance(sequences, str):
sequences = [sequences]
labels = self._parse_labels(labels)
sequence_pairs = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
return sequence_pairs
class ZeroShotClassificationPipeline(Pipeline):
"""
NLI-based zero-shot classification pipeline using a ModelForSequenceClassification head with models trained on
NLI tasks.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pre-trained model. Then logit for `entailment` is then taken as the logit for the
candidate label being valid. Any NLI model can be used as long as the first output logit corresponds to
`contradiction` and the last to `entailment`.
This pipeline can currently be loaded from the :func:`~transformers.pipeline` method using the following task
identifier(s):
- "zero-shot-classification"
The models that this pipeline can use are models that have been fine-tuned on a Natural Language Inference task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?search=nli>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
super().__init__(*args, args_parser=args_parser, **kwargs)
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
truncation="only_first",
)
return inputs
def __call__(self, sequences, candidate_labels, hypothesis_template="This example is {}.", multi_class=False):
"""
NLI-based zero-shot classification. Any combination of sequences and labels can be passed and each
combination will be posed as a premise/hypothesis pair and passed to the pre-trained model. Then logit for
`entailment` is then taken as the logit for the candidate label being valid. Any NLI model can be used as
long as the first output logit corresponds to `contradiction` and the last to `entailment`.
Args:
sequences (:obj:`str` or obj:`List`):
The sequence or sequences to classify. Truncated if model input is too large.
candidate_labels (:obj:`str` or obj:`List`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (obj:`str`, defaults to "This example is {}."):
The template used to turn each label into an NLI-style hypothesis. This template must include a {}
or similar syntax for the candidate label to be inserted into the template. For example, the default
template is "This example is {}." With the candidate label "sports", this would be fed into the model
like `<cls> sequence to classify <sep> This example is sports . <sep>`. The default template works
well in many cases, but it may be worthwhile to experiment with different templates depending on the
task setting.
multi_class (obj:`bool`, defaults to False):
When False, it is assumed that only one candidate label can be true, and the scores are normalized
such that the sum of the label likelihoods for each sequence is 1. When True, the labels are
considered independent and probabilities are normalized for each candidate by doing a of softmax of
the entailment score vs. the contradiction score.
"""
outputs = super().__call__(sequences, candidate_labels, hypothesis_template)
num_sequences = 1 if isinstance(sequences, str) else len(sequences)
candidate_labels = self._args_parser._parse_labels(candidate_labels)
reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1))
if len(candidate_labels) == 1:
multi_class = True
if not multi_class:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., -1]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entail_contr_logits = reshaped_outputs[..., [0, -1]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for iseq in range(num_sequences):
top_inds = list(reversed(scores[iseq].argsort()))
result.append(
{
"sequence": sequences if isinstance(sequences, str) else sequences[iseq],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[iseq][top_inds].tolist(),
}
)
if len(result) == 1:
return result[0]
return result
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using ModelWithLMHead head. See the
`masked language modeling usage <../usage.html#masked-language-modeling>`__ examples for more information.
This mask filling pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "fill-mask", for predicting masked tokens in a sequence.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=lm-head>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
topk=5,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
self.topk = topk
def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
numel = np.prod(masked_index.shape)
if numel > 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
)
elif numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index)
logits = outputs[i, masked_index.item(), :]
probs = tf.nn.softmax(logits)
topk = tf.math.top_k(probs, k=self.topk)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
masked_index = (input_ids == self.tokenizer.mask_token_id).nonzero()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index.numpy())
logits = outputs[i, masked_index.item(), :]
probs = logits.softmax(dim=0)
values, predictions = probs.topk(self.topk)
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append(
{
"sequence": self.tokenizer.decode(tokens),
"score": v,
"token": p,
"token_str": self.tokenizer.convert_ids_to_tokens(p),
}
)
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
class TokenClassificationPipeline(Pipeline):
"""
Named Entity Recognition pipeline using ModelForTokenClassification head. See the
`named entity recognition usage <../usage.html#named-entity-recognition>`__ examples for more information.
This token recognition pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "ner", for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous.
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=token-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
grouped_entities: bool = False,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task,
)
self.check_model_type(
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
self.grouped_entities = grouped_entities
def __call__(self, *args, **kwargs):
inputs = self._args_parser(*args, **kwargs)
answers = []
for sentence in inputs:
# Manage correct placement of the tensors
with self.device_placement():
tokens = self.tokenizer(
sentence, return_attention_mask=False, return_tensors=self.framework, truncation=True,
)
# Forward
if self.framework == "tf":
entities = self.model(tokens.data)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
entities = []
# Filter to labels not in `self.ignore_labels`
filtered_labels_idx = [
(idx, label_idx)
for idx, label_idx in enumerate(labels_idx)
if self.model.config.id2label[label_idx] not in self.ignore_labels
]
for idx, label_idx in filtered_labels_idx:
entity = {
"word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])),
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
"index": idx,
}
entities += [entity]
# Append grouped entities
if self.grouped_entities:
answers += [self.group_entities(entities)]
# Append ungrouped entities
else:
answers += [entities]
if len(answers) == 1:
return answers[0]
return answers
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Returns grouped sub entities
"""
# Get the first entity in the entity group
entity = entities[0]["entity"]
scores = np.mean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
}
return entity_group
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Returns grouped entities
"""
entity_groups = []
entity_group_disagg = []
if entities:
last_idx = entities[-1]["index"]
for entity in entities:
is_last_idx = entity["index"] == last_idx
if not entity_group_disagg:
entity_group_disagg += [entity]
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
continue
# If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" suffixes
if (
entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1]
and entity["index"] == entity_group_disagg[-1]["index"] + 1
):
entity_group_disagg += [entity]
# Group the entities at the last entity
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
# If the current entity is different from the previous entity, aggregate the disaggregated entity group
else:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
entity_group_disagg = [entity]
# If it's the last entity, add it to the entity groups
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
return entity_groups
NerPipeline = TokenClassificationPipeline
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped
to internal SquadExample / SquadFeature structures.
QuestionAnsweringArgumentHandler manages all the possible to create SquadExample from the command-line supplied
arguments.
"""
def __call__(self, *args, **kwargs):
# Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating
if args is not None and len(args) > 0:
if len(args) == 1:
kwargs["X"] = args[0]
else:
kwargs["X"] = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
if "X" in kwargs or "data" in kwargs:
inputs = kwargs["X"] if "X" in kwargs else kwargs["data"]
if isinstance(inputs, dict):
inputs = [inputs]
else:
# Copy to avoid overriding arguments
inputs = [i for i in inputs]
for i, item in enumerate(inputs):
if isinstance(item, dict):
if any(k not in item for k in ["question", "context"]):
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
inputs[i] = QuestionAnsweringPipeline.create_sample(**item)
elif not isinstance(item, SquadExample):
raise ValueError(
"{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format(
"X" if "X" in kwargs else "data"
)
)
# Tabular input
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], str):
kwargs["question"] = [kwargs["question"]]
if isinstance(kwargs["context"], str):
kwargs["context"] = [kwargs["context"]]
inputs = [
QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"])
]
else:
raise ValueError("Unknown arguments {}".format(kwargs))
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
class QuestionAnsweringPipeline(Pipeline):
"""
Question Answering pipeline using ModelForQuestionAnswering head. See the
`question answering usage <../usage.html#question-answering>`__ examples for more information.
This question answering can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "question-answering", for answering questions given a context.
The models that this pipeline can use are models that have been fine-tuned on a question answering task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=question-answering>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "question,context"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
device: int = -1,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
task=task,
**kwargs,
)
self.check_model_type(
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the SquadExample/SquadFeatures internally.
This helper method encapsulate all the logic for converting question(s) and context(s) to SquadExample(s).
We currently support extractive question answering.
Arguments:
question: (str, List[str]) The question to be ask for the associated context
context: (str, List[str]) The context in which we will look for the answer.
Returns:
SquadExample initialized with the corresponding question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def __call__(self, *args, **kwargs):
"""
Args:
We support multiple use-cases, the following are exclusive:
X: sequence of SquadExample
data: sequence of SquadExample
question: (str, List[str]), batch of question(s) to map along with context
context: (str, List[str]), batch of context(s) associated with the provided question keyword argument
Returns:
dict: {'answer': str, 'score": float, 'start": int, "end": int}
answer: the textual answer in the intial context
score: the score the current answer scored for the model
start: the character index in the original string corresponding to the beginning of the answer' span
end: the character index in the original string corresponding to the ending of the answer' span
"""
# Set defaults values
kwargs.setdefault("topk", 1)
kwargs.setdefault("doc_stride", 128)
kwargs.setdefault("max_answer_len", 15)
kwargs.setdefault("max_seq_len", 384)
kwargs.setdefault("max_question_len", 64)
kwargs.setdefault("handle_impossible_answer", False)
if kwargs["topk"] < 1:
raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
if kwargs["max_answer_len"] < 1:
raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
# Convert inputs to features
examples = self._args_parser(*args, **kwargs)
features_list = [
squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=kwargs["max_seq_len"],
doc_stride=kwargs["doc_stride"],
max_query_length=kwargs["max_question_len"],
padding_strategy=PaddingStrategy.DO_NOT_PAD.value,
is_training=False,
tqdm_enabled=False,
)
for example in examples
]
all_answers = []
for features, example in zip(features_list, examples):
model_input_names = self.tokenizer.model_input_names + ["input_ids"]
fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names}
# Manage tensor allocation on correct device
with self.device_placement():
if self.framework == "tf":
fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
start, end = self.model(fw_args)[:2]
start, end = start.numpy(), end.numpy()
else:
with torch.no_grad():
# Retrieve the score for the context tokens only (removing question tokens)
fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
start, end = self.model(**fw_args)[:2]
start, end = start.cpu().numpy(), end.cpu().numpy()
min_null_score = 1000000 # large and positive
answers = []
for (feature, start_, end_) in zip(features, start, end):
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask
# Generate mask
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start_ = np.where(undesired_tokens_mask, -10000.0, start_)
end_ = np.where(undesired_tokens_mask, -10000.0, end_)
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))
if kwargs["handle_impossible_answer"]:
min_null_score = min(min_null_score, (start_[0] * end_[0]).item())
# Mask CLS
start_[0] = end_[0] = 0.0
starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
answers += [
{
"score": score.item(),
"start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(
example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
),
}
for s, e, score in zip(starts, ends, scores)
]
if kwargs["handle_impossible_answer"]:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
all_answers += answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
"""
Take the output of any QuestionAnswering head and will generate probalities for each span to be
the actual answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than
max_answer_len or answer end position being before the starting position.
The method supports output the k-best answer through the topk argument.
Args:
start: numpy array, holding individual start probabilities for each token
end: numpy array, holding individual end probabilities for each token
topk: int, indicates how many possible answer span(s) to extract from the model's output
max_answer_len: int, maximum size of the answer to extract from the model's output
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
return start, end, candidates[0, start, end]
def span_to_answer(self, text: str, start: int, end: int):
"""
When decoding from token probalities, this method maps token indexes to actual word in
the initial context.
Args:
text: str, the actual context to extract the answer from
start: int, starting answer token index
end: int, ending answer token index
Returns:
dict: {'answer': str, 'start': int, 'end': int}
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
class SummarizationPipeline(Pipeline):
"""
Summarize news articles and other documents
Usage::
# use bart in pytorch
summarizer = pipeline("summarization")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
The models that this pipeline can use are models that have been fine-tuned on a summarization task,
which is currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=summarization>`__.
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, *args, **kwargs):
kwargs.update(task="summarization")
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Args:
*documents: (list of strings) articles to be summarized
return_text: (bool, default=True) whether to add a decoded "summary_text" to each result
return_tensors: (bool, default=False) whether to return the raw "summary_token_ids" to each result
clean_up_tokenization_spaces: (`optional`) bool whether to include extra spaces in the output
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of dicts with 'summary_text' and/or 'summary_token_ids' for each document_to_summarize
.. _`self.model.generate`:
https://huggingface.co/transformers/model_doc/bart.html#transformers.BartForConditionalGeneration.generate
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
assert len(documents) > 0, "Please provide a document to summarize"
if self.framework == "tf" and "BartForConditionalGeneration" in self.model.__class__.__name__:
raise NotImplementedError(
"Tensorflow is not yet supported for Bart. Please consider using T5, e.g. `t5-base`"
)
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(documents[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
documents = ([prefix + document for document in documents[0]],)
padding = True
elif isinstance(documents[0], str):
documents = (prefix + documents[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
documents[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*documents, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
min_length = generate_kwargs.get("min_length", self.model.config.min_length)
if input_length < min_length // 2:
logger.warning(
"Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
min_length, input_length
)
)
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length < max_length:
logger.warning(
"Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
max_length, input_length
)
)
summaries = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
results = []
for summary in summaries:
record = {}
if return_tensors:
record["summary_token_ids"] = summary
if return_text:
record["summary_text"] = self.tokenizer.decode(
summary, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class TranslationPipeline(Pipeline):
"""
Translates from one language to another.
Usage::
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
The models that this pipeline can use are models that have been fine-tuned on a translation task,
currently: "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=translation>`__.
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Args:
*args: (list of strings) texts to be translated
return_text: (bool, default=True) whether to add a decoded "translation_text" to each result
return_tensors: (bool, default=False) whether to return the raw "translation_token_ids" to each result
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of dicts with 'translation_text' and/or 'translation_token_ids' for each text_to_translate
.. _`self.model.generate`:
https://huggingface.co/transformers/model_doc/bart.html#transformers.BartForConditionalGeneration.generate
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
args = ([prefix + text for text in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length > 0.9 * max_length:
logger.warning(
"Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
input_length, max_length
)
)
translations = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
results = []
for translation in translations:
record = {}
if return_tensors:
record["translation_token_ids"] = translation
if return_text:
record["translation_text"] = self.tokenizer.decode(
translation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:obj:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the addition of new
user input and generated model responses. A conversation needs to contain an unprocessed user input before being
passed to the :obj:`~transformers.ConversationalPipeline`. This user input is either created when the class is instantiated, or by calling
`append_response("input")` after a conversation turn.
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
Arguments:
text (:obj:`str`, `optional`, defaults to :obj:`None`):
The initial user input to start the conversation.
If :obj:`None`, a user input needs to be provided manually using `add_user_input` before the conversation can begin.
conversation_id (:obj:`uuid.UUID`, `optional`, defaults to :obj:`None`):
Unique identifier for the conversation
If :obj:`None`, the random UUID4 id will be assigned to the conversation.
"""
def __init__(self, text: str = None, conversation_id: UUID = None):
if not conversation_id:
conversation_id = uuid.uuid4()
self.uuid: UUID = conversation_id
self.past_user_inputs: List[str] = []
self.generated_responses: List[str] = []
self.history: List[int] = []
self.new_user_input: Optional[str] = text
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal `new_user_input` field.
Args:
text: str, the user input for the next conversation round
overwrite: bool, flag indicating if existing and unprocessed user input should be overwritten when this function is called
"""
if self.new_user_input:
if overwrite:
logger.warning(
'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
self.new_user_input, text
)
)
self.new_user_input = text
else:
logger.warning(
'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
"Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of `new_user_input` to `past_user_inputs`) and empties the
`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response: str, the model generated response
"""
self.generated_responses.append(response)
def set_history(self, history: List[int]):
"""
Updates the value of the history of the conversation. The history is represented by a list of `token_ids`. The
history is used by the model to generate responses based on the previous conversation turns.
Args:
history: (list of int), history of tokens provided and generated for this conversation
"""
self.history = history
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str` or :obj:`Dict`:
Example:
Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114
user >> Going to the movies tonight - any suggestions?
bot >> The Big Lebowski
"""
output = "Conversation id: {} \n".format(self.uuid)
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
output += "user >> {} \n".format(user_input)
output += "bot >> {} \n".format(generated_response)
if self.new_user_input is not None:
output += "user >> {} \n".format(self.new_user_input)
return output
class ConversationalPipeline(Pipeline):
"""
Multi-turn conversational pipeline.
Usage::
conversational_pipeline = pipeline("conversational")
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
conversational_pipeline([conversation_1, conversation_2])
conversation_1.add_user_input("Is it an action movie?")
conversation_2.add_user_input("What is the genre of this book?")
conversational_pipeline([conversation_1, conversation_2])
The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
currently: "microsoft/DialoGPT-small", "microsoft/DialoGPT-medium", "microsoft/DialoGPT-large"
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=conversational>`__.
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, min_length_for_response=32, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.tokenizer.eos_token_id is not None, "DialoguePipeline tokenizer should have an EOS token set"
if self.tokenizer.pad_token_id is not None:
self.pad_token_id = self.tokenizer.pad_token_id
else:
self.pad_token_id = self.tokenizer.eos_token_id
self.min_length_for_response = min_length_for_response
def __call__(
self,
conversations: Union[Conversation, List[Conversation]],
clean_up_tokenization_spaces=True,
**generate_kwargs
):
r"""
Args:
conversations: (list of :class:`~transformers.pipelines.Conversation`) Conversations to generate responses for
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of conversations with updated generated responses for those containing a new user input
"""
# Input validation
if isinstance(conversations, list):
for conversation in conversations:
assert isinstance(
conversation, Conversation
), "DialoguePipeline expects a Conversation or list of Conversations as an input"
if conversation.new_user_input is None:
raise ValueError(
"Conversation with UUID {} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method".format(
type(conversation.uuid)
)
)
assert (
self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
elif isinstance(conversations, Conversation):
conversations = [conversations]
else:
raise ValueError("DialoguePipeline expects a Conversation or list of Conversations as an input")
with self.device_placement():
inputs = self._parse_and_tokenize([conversation.new_user_input for conversation in conversations])
histories = [conversation.history for conversation in conversations]
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
inputs = self._concat_inputs_history(inputs, histories, max_length)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
if input_length > 0.9 * max_length:
logger.warning(
"Longest conversation length: {} is bigger than 0.9 * max_length: {}. "
"You might consider trimming the early phase of the conversation".format(input_length, max_length)
)
generated_responses = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
cleaned_history = self._clean_padding_history(generated_responses)
output = []
for conversation_index, conversation in enumerate(conversations):
conversation.mark_processed()
conversation.generated_responses.append(
self.tokenizer.decode(
cleaned_history[conversation_index][input_length:],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
conversation.set_history(cleaned_history[conversation_index])
output.append(conversation)
if len(output) == 1:
return output[0]
else:
return output
def _parse_and_tokenize(self, *args, **kwargs):
"""
Parse arguments and tokenize, adding an EOS token at the end of the user input
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer.batch_encode_plus(inputs, add_special_tokens=False, padding=False).get("input_ids", [])
for input in inputs:
input.append(self.tokenizer.eos_token_id)
return inputs
def _clean_padding_history(self, generated_tensor) -> List[List[int]]:
"""
Cleans the padding history. Padding may be generated in two places when multiple conversations are provided as
an input:
- at the end of the concatenated history and new user input, so that all input to the model have the same
length
- at the end of the generated response, as some responses will be longer than others
This method cleans up these padding token so that the history for each conversation is not impacted by the
batching process.
"""
outputs = []
for sequence in generated_tensor:
sequence_tokens = []
is_previous_pad = False
for token in sequence:
if token == self.pad_token_id:
if is_previous_pad:
continue
else:
is_previous_pad = True
else:
is_previous_pad = False
if self.framework == "pt":
sequence_tokens.append(token.item())
else:
sequence_tokens.append(int(token.numpy()))
outputs.append(sequence_tokens)
return outputs
def _concat_inputs_history(self, inputs: List[List[int]], histories: List[Optional[List[int]]], max_length: int):
"""
Builds an input prepended by the history for this conversation, allowing multi-turn conversation with context
"""
outputs = []
for new_input, history in zip(inputs, histories):
if history is not None:
new_input = history + new_input
if len(new_input) > max_length - self.min_length_for_response:
cutoff_eos_index = 0
while len(new_input) - cutoff_eos_index > max_length - self.min_length_for_response:
if cutoff_eos_index >= len(new_input):
break
cutoff_eos_index = new_input[cutoff_eos_index:].index(self.tokenizer.eos_token_id)
if cutoff_eos_index == 0 or cutoff_eos_index == len(new_input) - 1:
break
else:
new_input = new_input[cutoff_eos_index + 1 :]
outputs.append(new_input)
max_len = max([len(item) for item in outputs])
outputs = [output + [self.pad_token_id] * (max_len - len(output)) for output in outputs]
outputs = BatchEncoding(
{"input_ids": outputs, "attention_mask": [1] * len(outputs)}, tensor_type=self.framework
)
return outputs
# Register all the supported tasks here
SUPPORTED_TASKS = {
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": TFAutoModel if is_tf_available() else None,
"pt": AutoModel if is_torch_available() else None,
"default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}},
},
"sentiment-analysis": {
"impl": TextClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "distilbert-base-uncased-finetuned-sst-2-english",
"tf": "distilbert-base-uncased-finetuned-sst-2-english",
},
},
},
"ner": {
"impl": TokenClassificationPipeline,
"tf": TFAutoModelForTokenClassification if is_tf_available() else None,
"pt": AutoModelForTokenClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
},
},
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
"pt": AutoModelForQuestionAnswering if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
},
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForMaskedLM if is_torch_available() else None,
"default": {"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"}},
},
"summarization": {
"impl": SummarizationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "sshleifer/distilbart-cnn-12-6", "tf": "t5-small"}},
},
"translation_en_to_fr": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_de": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_ro": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "gpt2", "tf": "gpt2"}},
},
"zero-shot-classification": {
"impl": ZeroShotClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"config": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"tokenizer": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
},
},
"conversational": {
"impl": ConversationalPipeline,
"tf": TFAutoModelForCausalLM if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "microsoft/DialoGPT-medium", "tf": "microsoft/DialoGPT-medium"}},
},
}
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a pipeline.
Pipeline are made of:
- A Tokenizer instance in charge of mapping raw textual input to token
- A Model instance
- Some (optional) post processing for enhancing model's output
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- "feature-extraction": will return a :class:`~transformers.FeatureExtractionPipeline`
- "sentiment-analysis": will return a :class:`~transformers.TextClassificationPipeline`
- "ner": will return a :class:`~transformers.TokenClassificationPipeline`
- "question-answering": will return a :class:`~transformers.QuestionAnsweringPipeline`
- "fill-mask": will return a :class:`~transformers.FillMaskPipeline`
- "summarization": will return a :class:`~transformers.SummarizationPipeline`
- "translation_xx_to_yy": will return a :class:`~transformers.TranslationPipeline`
- "text-generation": will return a :class:`~transformers.TextGenerationPipeline`
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`,
a model identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default for this pipeline will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`, defaults to :obj:`None`):
The configuration that will be used by the pipeline to instantiate the model. This can be :obj:`None`,
a model identifier or an actual pre-trained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If :obj:`None`, the default for this pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a model identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default for this pipeline will be loaded.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
Returns:
:class:`~transformers.Pipeline`: Class inheriting from :class:`~transformers.Pipeline`, according to
the task.
Examples::
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
# Sentiment analysis pipeline
pipeline('sentiment-analysis')
# Question answering pipeline, specifying the checkpoint identifier
pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
# Named entity recognition pipeline, passing in a specific model and tokenizer
model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
pipeline('ner', model=model, tokenizer=tokenizer)
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
framework = framework or get_framework(model)
targeted_task = SUPPORTED_TASKS[task]
task_class, model_class = targeted_task["impl"], targeted_task[framework]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"][framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
|
py | b40f73b7ff33530b24c77426cce419663034de71 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import phyre.creator as creator_lib
@creator_lib.define_task
def build_task(C):
scene_width = C.scene.width
scene_height = C.scene.height
# Add two bars that are supposed to touch each other.
bar1 = C.add('dynamic bar', scale=0.25) \
.set_angle(90.) \
.set_bottom(0.) \
.set_left(.3 * scene_width)
bar2 = C.add('dynamic bar', scale=0.25) \
.set_angle(90.) \
.set_bottom(0.) \
.set_left(.7 * scene_width)
# Add obstacle.
C.add('static bar', scale=0.6) \
.set_center_x(0.5 * scene_width) \
.set_bottom(0.5 * scene_height)
# Create task.
C.update_task(body1=bar1,
body2=bar2,
relationships=[C.SpatialRelationship.TOUCHING])
|
py | b40f7518a9a213d7edc0e0121eaf50dbfc06b3bc | import io
import os
import pytest
from unittest import mock
from unittest.mock import sentinel
import trio
from trio import _core
from trio._file_io import AsyncIOWrapper, _FILE_SYNC_ATTRS, _FILE_ASYNC_METHODS
@pytest.fixture
def path(tmpdir):
return os.fspath(tmpdir.join("test"))
@pytest.fixture
def wrapped():
return mock.Mock(spec_set=io.StringIO)
@pytest.fixture
def async_file(wrapped):
return trio.wrap_file(wrapped)
def test_wrap_invalid():
with pytest.raises(TypeError):
trio.wrap_file(str())
def test_wrap_non_iobase():
class FakeFile:
def close(self): # pragma: no cover
pass
def write(self): # pragma: no cover
pass
wrapped = FakeFile()
assert not isinstance(wrapped, io.IOBase)
async_file = trio.wrap_file(wrapped)
assert isinstance(async_file, AsyncIOWrapper)
del FakeFile.write
with pytest.raises(TypeError):
trio.wrap_file(FakeFile())
def test_wrapped_property(async_file, wrapped):
assert async_file.wrapped is wrapped
def test_dir_matches_wrapped(async_file, wrapped):
attrs = _FILE_SYNC_ATTRS.union(_FILE_ASYNC_METHODS)
# all supported attrs in wrapped should be available in async_file
assert all(attr in dir(async_file) for attr in attrs if attr in dir(wrapped))
# all supported attrs not in wrapped should not be available in async_file
assert not any(
attr in dir(async_file) for attr in attrs if attr not in dir(wrapped)
)
def test_unsupported_not_forwarded():
class FakeFile(io.RawIOBase):
def unsupported_attr(self): # pragma: no cover
pass
async_file = trio.wrap_file(FakeFile())
assert hasattr(async_file.wrapped, "unsupported_attr")
with pytest.raises(AttributeError):
getattr(async_file, "unsupported_attr")
def test_sync_attrs_forwarded(async_file, wrapped):
for attr_name in _FILE_SYNC_ATTRS:
if attr_name not in dir(async_file):
continue
assert getattr(async_file, attr_name) is getattr(wrapped, attr_name)
def test_sync_attrs_match_wrapper(async_file, wrapped):
for attr_name in _FILE_SYNC_ATTRS:
if attr_name in dir(async_file):
continue
with pytest.raises(AttributeError):
getattr(async_file, attr_name)
with pytest.raises(AttributeError):
getattr(wrapped, attr_name)
def test_async_methods_generated_once(async_file):
for meth_name in _FILE_ASYNC_METHODS:
if meth_name not in dir(async_file):
continue
assert getattr(async_file, meth_name) is getattr(async_file, meth_name)
def test_async_methods_signature(async_file):
# use read as a representative of all async methods
assert async_file.read.__name__ == "read"
assert async_file.read.__qualname__ == "AsyncIOWrapper.read"
assert "io.StringIO.read" in async_file.read.__doc__
async def test_async_methods_wrap(async_file, wrapped):
for meth_name in _FILE_ASYNC_METHODS:
if meth_name not in dir(async_file):
continue
meth = getattr(async_file, meth_name)
wrapped_meth = getattr(wrapped, meth_name)
value = await meth(sentinel.argument, keyword=sentinel.keyword)
wrapped_meth.assert_called_once_with(
sentinel.argument, keyword=sentinel.keyword
)
assert value == wrapped_meth()
wrapped.reset_mock()
async def test_async_methods_match_wrapper(async_file, wrapped):
for meth_name in _FILE_ASYNC_METHODS:
if meth_name in dir(async_file):
continue
with pytest.raises(AttributeError):
getattr(async_file, meth_name)
with pytest.raises(AttributeError):
getattr(wrapped, meth_name)
async def test_open(path):
f = await trio.open_file(path, "w")
assert isinstance(f, AsyncIOWrapper)
await f.aclose()
async def test_open_context_manager(path):
async with await trio.open_file(path, "w") as f:
assert isinstance(f, AsyncIOWrapper)
assert not f.closed
assert f.closed
async def test_async_iter():
async_file = trio.wrap_file(io.StringIO("test\nfoo\nbar"))
expected = list(async_file.wrapped)
result = []
async_file.wrapped.seek(0)
async for line in async_file:
result.append(line)
assert result == expected
async def test_aclose_cancelled(path):
with _core.CancelScope() as cscope:
f = await trio.open_file(path, "w")
cscope.cancel()
with pytest.raises(_core.Cancelled):
await f.write("a")
with pytest.raises(_core.Cancelled):
await f.aclose()
assert f.closed
async def test_detach_rewraps_asynciobase():
raw = io.BytesIO()
buffered = io.BufferedReader(raw)
async_file = trio.wrap_file(buffered)
detached = await async_file.detach()
assert isinstance(detached, AsyncIOWrapper)
assert detached.wrapped is raw
|
py | b40f7665f70e2d42a9217253f373aa956a1bec7d | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
try:
import pymysql
except:
pass
import logging
import struct
from lib.core.common import getSafeExString
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://code.google.com/p/pymysql/
User guide: http://code.google.com/p/pymysql/
API: http://code.google.com/p/pymysql/
Debian package: <none>
License: MIT
Possible connectors: http://wiki.python.org/moin/MySQL
"""
def __init__(self):
GenericConnector.__init__(self)
def connect(self):
self.initConnection()
try:
self.connector = pymysql.connect(host=self.hostname, user=self.user, passwd=self.password, db=self.db, port=self.port, connect_timeout=conf.timeout, use_unicode=True)
except (pymysql.OperationalError, pymysql.InternalError, pymysql.ProgrammingError, struct.error) as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except pymysql.ProgrammingError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
return None
def execute(self, query):
retVal = False
try:
self.cursor.execute(query)
retVal = True
except (pymysql.OperationalError, pymysql.ProgrammingError) as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
except pymysql.InternalError as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.connector.commit()
return retVal
def select(self, query):
retVal = None
if self.execute(query):
retVal = self.fetchall()
return retVal
|
py | b40f76e8a2cd3608f0db5ca05addb9477cfc2af9 | # Module 'dircache'
#
# Return a sorted list of the files in a POSIX directory, using a cache
# to avoid reading the directory more often than necessary.
# Also contains a subroutine to append slashes to directories.
import posix
import path
cache = {}
def listdir(path): # List directory contents, using cache
try:
cached_mtime, list = cache[path]
del cache[path]
except RuntimeError:
cached_mtime, list = -1, []
try:
mtime = posix.stat(path)[8]
except posix.error:
return []
if mtime <> cached_mtime:
try:
list = posix.listdir(path)
except posix.error:
return []
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list): # Add '/' suffixes to directories
for i in range(len(list)):
if path.isdir(path.cat(head, list[i])):
list[i] = list[i] + '/'
|
py | b40f77257dbc68f54e1a91931b8f26e660071eda | # Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.iosxe.show_service import ShowServiceGroupState, \
ShowServiceGroupStats, \
ShowServiceGroupTrafficStats
# ============================================
# Test for 'show service-group state'
# ============================================
class test_show_service_group_state(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'group': {
'1' : {
'state' : 'Up'
}
}
}
golden_output = {'execute.return_value': '''\
Load for five secs: 98%/0%; one minute: 98%; five minutes: 96%
Time source is NTP, 18:59:13.897 EST Web Nov 9 2016
Group State
1 Up
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowServiceGroupState(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowServiceGroupState(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
# ============================================
# Test for 'show service-group stats'
# ============================================
class test_show_service_group_stats(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_1 = {
'service_group_statistics':{
'global': {
'num_of_groups' : 5,
'num_of_members' : 8005
},
'1' : {
'num_of_interfaces' : 1,
'num_of_members' : {
3000 : {
'service_instance': 3000
}
},
'members_joined': 13000,
'members_left': 10000
},
'2' : {
'num_of_interfaces' : 1,
'num_of_members' : {
2000 : {
'service_instance': 2000
}
},
'members_joined': 10000,
'members_left': 8000
},
'3' : {
'num_of_interfaces' : 1,
'num_of_members' : {
3000 : {
'service_instance': 3000
}
},
'members_joined': 9000,
'members_left': 6000
},
'10' : {
'num_of_interfaces' : 1,
'num_of_members' : {
3 : {
'service_instance': 3
}
},
'members_joined': 8003,
'members_left': 8000
},
'20' : {
'num_of_interfaces' : 1,
'num_of_members' : {
2 : {
'service_instance': 2
}
},
'members_joined': 8002,
'members_left': 8000
}
}
}
golden_parsed_output_2 = {
'service_group_statistics':{
'global': {
'num_of_groups' : 1,
'num_of_members' : 2
},
'1' : {
'num_of_interfaces' : 1,
'num_of_members' : {
2 : {
'sub_interface': 2
}
},
'members_joined': 103,
'members_left': 101
}
}
}
golden_output_1 = {'execute.return_value': '''\
Service Group global statistics:
Number of groups: 5
Number of members: 8005
Service Group 1 statistics:
Number of Interfaces: 1
Number of members: 3000
Service Instance 3000
Members joined: 13000
Members left: 10000
Service Group 2 statistics:
Number of Interfaces: 1
Number of members: 2000
Service Instance 2000
Members joined: 10000
Members left: 8000
Service Group 3 statistics:
Number of Interfaces: 1
Number of members: 3000
Service Instance 3000
Members joined: 9000
Members left: 6000
Service Group 10 statistics:
Number of Interfaces: 1
Number of members: 3
Service Instance 3
Members joined: 8003
Members left: 8000
Service Group 20 statistics:
Number of Interfaces: 1
Number of members: 2
Service Instance 2
Members joined: 8002
Members left: 8000
'''}
golden_output_2 = {'execute.return_value': '''\
Service Group global statistics:
Number of groups: 1
Number of members: 2
Service Group 1 statistics:
Number of Interfaces: 1
Number of members: 2
Sub-interface 2
Members joined: 103
Members left: 101
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowServiceGroupStats(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_1(self):
self.device = Mock(**self.golden_output_1)
obj = ShowServiceGroupStats(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowServiceGroupStats(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_2)
class test_show_traffic_stats(unittest.TestCase):
"""unit test for show service-group traffic-stats"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
Router# show service-group traffic-stats
Traffic Statistics of service groups:
Group Pks In Bytes In Pkts Out Bytes Out
1 1 22 3 62
2 0 0 0 0
3 0 0 0 0
10 0 0 0 0
20 0 0 0 0
'''}
golden_parsed_output = {
"group": {
1: {
"pkts_in": 1,
"bytes_in": 22,
"pkts_out": 3,
"bytes_out": 62
},
2: {
"pkts_in": 0,
"bytes_in": 0,
"pkts_out": 0,
"bytes_out": 0
},
3: {
"pkts_in": 0,
"bytes_in": 0,
"pkts_out": 0,
"bytes_out": 0
},
10: {
"pkts_in": 0,
"bytes_in": 0,
"pkts_out": 0,
"bytes_out": 0
},
20: {
"pkts_in": 0,
"bytes_in": 0,
"pkts_out": 0,
"bytes_out": 0
}
}
}
golden_output_group = {'execute.return_value': '''
Router# show service-group traffic-stats group 1
Traffic Statistics of service groups:
Group Pkts In Bytes In Pkts Out Bytes Out
1 78 10548 172 18606
'''}
golden_parsed_output_group = {
"group": {
1: {
"pkts_in": 78,
"bytes_in": 10548,
"pkts_out": 172,
"bytes_out": 18606
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowServiceGroupTrafficStats(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowServiceGroupTrafficStats(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_show_group(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_group)
obj = ShowServiceGroupTrafficStats(device=self.device)
parsed_output = obj.parse(group="group 1")
self.assertEqual(parsed_output,self.golden_parsed_output_group)
if __name__ == '__main__':
unittest.main() |
py | b40f7a75dfc255be5bcb557c6b01a7a16a50d15f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.hdl.constants import WRITE, READ
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.amba.axi_comp.to_axiLite_test import Axi_to_AxiLite_TC
from hwtLib.avalon.mm import AvalonMmAgent
from hwtLib.examples.mem.avalonmm_ram import AvalonMmBRam
from hwtSimApi.constants import CLK_PERIOD
from pyMathBitPrecise.bit_utils import mask
class AvalonMmBram_TC(SimTestCase):
TRANSACTION_CNT = 32
MAX_LEN = 4
@classmethod
def setUpClass(cls):
cls.u = AvalonMmBRam()
cls.u.MAX_BURST = cls.MAX_LEN
cls.u.DATA_WIDTH = 64
cls.compileSim(cls.u)
def randomize_all(self):
pass
def test_nop(self):
self.randomize_all()
self.runSim(10 * CLK_PERIOD)
avmm = self.u.s._ag
self.assertEmpty(avmm.rDataAg.data)
self.assertEmpty(avmm.wRespAg.data)
def test_read(self):
N = 0
u = self.u
self.randomize_all()
expected_data = []
addr = 0
memory_init = []
avmm: AvalonMmAgent = u.s._ag
for _ in range(self.TRANSACTION_CNT):
len_ = 1 + Axi_to_AxiLite_TC.get_rand_in_range(self, self.MAX_LEN - 1)
N += len_ + 1 + 1
rand_data = [self._rand.getrandbits(u.DATA_WIDTH)
for _ in range(len_)]
# rand_data = [i + 1 for i in range(len_)]
memory_init.extend(rand_data)
# print(f"0x{addr:x}, {len_:d}", rand_data)
a_t = (READ, addr, len_, None, None)
avmm.addrAg.data.append(a_t)
expected_data.extend(rand_data)
addr += len(rand_data) * u.DATA_WIDTH // 8
mem = self.rtl_simulator.model.ram_inst.io.ram_memory
mem.val = mem.def_val = mem._dtype.from_py({i: v for i, v in enumerate(memory_init)})
self.runSim(N * 3 * CLK_PERIOD)
self.assertValSequenceEqual(avmm.rData, [(d, None) for d in expected_data])
def test_write(self):
N = self.TRANSACTION_CNT
u = self.u
avmm: AvalonMmAgent = u.s._ag
expected_data = []
addr = 0
m = mask(u.DATA_WIDTH // 8)
for _ in range(self.TRANSACTION_CNT):
len_ = 1 + Axi_to_AxiLite_TC.get_rand_in_range(self, self.MAX_LEN - 1)
N += len_ + 3
rand_data = [self._rand.getrandbits(u.DATA_WIDTH)
for _ in range(len_)]
rand_data = [i + 1 for i in range(len_)]
# print(f"0x{addr:x}, {len_:d}", rand_data)
word_i = addr // (u.DATA_WIDTH // 8)
for i, d in enumerate(rand_data):
a_t = (WRITE, addr, len_, d, m)
avmm.addrAg.data.append(a_t)
expected_data.append((word_i + i, d))
addr += len(rand_data) * u.DATA_WIDTH // 8
self.runSim(N * 3 * CLK_PERIOD)
for word_i, expected in expected_data:
d = self.rtl_simulator.model.ram_inst.io.ram_memory.val.val.get(word_i, None)
self.assertValEqual(d, expected, ("word ", word_i))
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(AvalonMmBram_TC('test_write'))
suite.addTest(unittest.makeSuite(AvalonMmBram_TC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
py | b40f7b9faf79979592e582b40b13c89bcfcfb386 | import poe.utils as utils
import threading
import json
from nltk import bigrams
from cachetools import cached
from .cache import cache
with open('utils/items.json') as f:
items = json.load(f)
def calc_bigrams(name, item, matches):
count = 0
bi_item = [x for x in bigrams(item.lower())]
for n in name:
if n in bi_item:
count += 1
matches[item] = count
class POEClientException(Exception):
pass
@cached(cache)
def find_one(name: str, client, loop):
if 1:
item = client.find_items({'_pageName': name}, limit=1)
if not item:
item = client.find_passives({'name': name}, limit=1)
if not item:
matches = {}
processes = []
name_tri = [x for x in bigrams(name.lower())]
for item_name in items["names"]:
p = threading.Thread(target=calc_bigrams, args=(name_tri, item_name, matches,))
processes.append(p)
p.start()
for process in processes:
process.join()
# return {"matches": sorted(matches.items(), key=lambda it: it[1])[:3], "name": name.replace("%", "")}
return {"matches": sorted(matches.items(), key=lambda kv: kv[1], reverse=True)[:3], "name": name}
return item[0]
else:
return POEClientException
@cached(cache)
def cache_pob_xml(xml, client):
stats = utils.parse_pob_xml(xml, client)
return stats
|
py | b40f7c4ae1e71893dd45749cc1273d4bf08eb6d0 | #!/usr/bin/env python3
##
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import struct
import argparse
import numpy
import warnings
delimiter = ','
def fixInput(val):
if val is None:
return 0
try:
retval = int(val)
except ValueError:
retval = ord(val)
return retval
if len(sys.argv) != 5 and len(sys.argv) != 6:
print("Usage:")
print("\tcsv_to_binary.py <input filename> <column choice> <datatype> <output filename> [delimiter]")
print()
print("The <column choice> should be an integer in the range [0, N-1], where N is the number of columns.")
print("The <datatype> option should be one of 'int', 'long', or 'float'.")
print("The [delimiter] is an optional argument, and defaults to '%s'" % delimiter)
print()
exit()
in_fname = sys.argv[1]
col_num = sys.argv[2]
datatype = sys.argv[3]
out_fname = sys.argv[4]
if len(sys.argv) == 6:
delimiter = sys.argv[5]
# Add more datatypes if needed
if datatype == "int":
dtype = "int32"
elif datatype == "long":
dtype = "int64"
elif datatype == "float":
dtype = "float32"
elif datatype == "string":
dtype = "str"
else:
print("Please select datatype int or long")
exit()
print("Reading column " + col_num + ", of type " + datatype + "...")
chunk_size = 10000000
iters=0
finished = False
offset = 0
with open(str(in_fname), "r") as inFile:
with open(str(out_fname), "wb") as newFile:
with warnings.catch_warnings():
while not finished:
in_data=numpy.genfromtxt(inFile, dtype=dtype,
max_rows=chunk_size, usecols=[int(col_num)], delimiter=delimiter, loose=False)
iters = iters+1
if offset == 0:
# don't warn about an empty file after we have read something
warnings.filterwarnings('ignore', r'genfromtxt: Empty input file:')
if in_data.size > 0:
in_data.tofile(newFile)
offset += in_data.size;
else:
finished = True
print iters
|
py | b40f7d862e0291ee801ec848a6c834a56ca0fa68 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# TODO: Create `Controller` object
# self.controller = Controller(<Arguments you wish to provide>)
self.controller = Controller(vehicle_mass = vehicle_mass,
fuel_capacity = fuel_capacity,
brake_deadband = brake_deadband,
decel_limit = decel_limit,
accel_limit = accel_limit,
wheel_radius = wheel_radius,
wheel_base = wheel_base,
steer_ratio = steer_ratio,
max_lat_accel = max_lat_accel,
max_steer_angle = max_steer_angle)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.current_vel = None
self.current_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(20) # 50Hz
while not rospy.is_shutdown():
# TODO: Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# throttle, brake, steering = self.controller.control(<proposed linear velocity>,
# <proposed angular velocity>,
# <current linear velocity>,
# <dbw status>,
# <any other argument you need>)
# if <dbw is enabled>:
# self.publish(throttle, brake, steer)
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
|
py | b40f7dfd323477fd54899c7bc9b4ef876b2be026 | """
Tests for asynchronous vectorized environments.
"""
import gym
import numpy as np
import pytest
from codes.e_utils.tests.test_with_mpi import with_mpi
from .dummy_vec_env import DummyVecEnv
from .shmem_vec_env import ShmemVecEnv
from .subproc_vec_env import SubprocVecEnv
def assert_venvs_equal(venv1, venv2, num_steps):
"""
Compare two environments over num_steps steps and make sure
that the observations produced by each are the same when given
the same actions.
"""
assert venv1.num_envs == venv2.num_envs
assert venv1.observation_space.shape == venv2.observation_space.shape
assert venv1.observation_space.dtype == venv2.observation_space.dtype
assert venv1.action_space.shape == venv2.action_space.shape
assert venv1.action_space.dtype == venv2.action_space.dtype
try:
obs1, obs2 = venv1.reset(), venv2.reset()
assert np.array(obs1).shape == np.array(obs2).shape
assert np.array(obs1).shape == (venv1.num_envs,) + venv1.observation_space.shape
assert np.allclose(obs1, obs2)
venv1.action_space.seed(1337)
for _ in range(num_steps):
actions = np.array([venv1.action_space.sample() for _ in range(venv1.num_envs)])
for venv in [venv1, venv2]:
venv.step_async(actions)
outs1 = venv1.step_wait()
outs2 = venv2.step_wait()
for out1, out2 in zip(outs1[:3], outs2[:3]):
assert np.array(out1).shape == np.array(out2).shape
assert np.allclose(out1, out2)
assert list(outs1[3]) == list(outs2[3])
finally:
venv1.close()
venv2.close()
@pytest.mark.parametrize('klass', (ShmemVecEnv, SubprocVecEnv))
@pytest.mark.parametrize('dtype', ('uint8', 'float32'))
def test_vec_env(klass, dtype): # pylint: disable=R0914
"""
Test that a vectorized environment is equivalent to
DummyVecEnv, since DummyVecEnv is less likely to be
error prone.
"""
num_envs = 3
num_steps = 100
shape = (3, 8)
def make_fn(seed):
"""
Get an environment constructor with a seed.
"""
return lambda: SimpleEnv(seed, shape, dtype)
fns = [make_fn(i) for i in range(num_envs)]
env1 = DummyVecEnv(fns)
env2 = klass(fns)
assert_venvs_equal(env1, env2, num_steps=num_steps)
@pytest.mark.parametrize('dtype', ('uint8', 'float32'))
@pytest.mark.parametrize('num_envs_in_series', (3, 4, 6))
def test_sync_sampling(dtype, num_envs_in_series):
"""
Test that a SubprocVecEnv running with envs in series
outputs the same as DummyVecEnv.
"""
num_envs = 12
num_steps = 100
shape = (3, 8)
def make_fn(seed):
"""
Get an environment constructor with a seed.
"""
return lambda: SimpleEnv(seed, shape, dtype)
fns = [make_fn(i) for i in range(num_envs)]
env1 = DummyVecEnv(fns)
env2 = SubprocVecEnv(fns, in_series=num_envs_in_series)
assert_venvs_equal(env1, env2, num_steps=num_steps)
@pytest.mark.parametrize('dtype', ('uint8', 'float32'))
@pytest.mark.parametrize('num_envs_in_series', (3, 4, 6))
def test_sync_sampling_sanity(dtype, num_envs_in_series):
"""
Test that a SubprocVecEnv running with envs in series
outputs the same as SubprocVecEnv without running in series.
"""
num_envs = 12
num_steps = 100
shape = (3, 8)
def make_fn(seed):
"""
Get an environment constructor with a seed.
"""
return lambda: SimpleEnv(seed, shape, dtype)
fns = [make_fn(i) for i in range(num_envs)]
env1 = SubprocVecEnv(fns)
env2 = SubprocVecEnv(fns, in_series=num_envs_in_series)
assert_venvs_equal(env1, env2, num_steps=num_steps)
class SimpleEnv(gym.Env):
"""
An environment with a pre-determined observation space
and RNG seed.
"""
def __init__(self, seed, shape, dtype):
np.random.seed(seed)
self._dtype = dtype
self._start_obs = np.array(np.random.randint(0, 0x100, size=shape),
dtype=dtype)
self._max_steps = seed + 1
self._cur_obs = None
self._cur_step = 0
# this is 0xFF instead of 0x100 because the Box space includes
# the high end, while randint does not
self.action_space = gym.spaces.Box(low=0, high=0xFF, shape=shape, dtype=dtype)
self.observation_space = self.action_space
def step(self, action):
self._cur_obs += np.array(action, dtype=self._dtype)
self._cur_step += 1
done = self._cur_step >= self._max_steps
reward = self._cur_step / self._max_steps
return self._cur_obs, reward, done, {'foo': 'bar' + str(reward)}
def reset(self):
self._cur_obs = self._start_obs
self._cur_step = 0
return self._cur_obs
def render(self, mode=None):
raise NotImplementedError
@with_mpi()
def test_mpi_with_subprocvecenv():
shape = (2,3,4)
nenv = 1
venv = SubprocVecEnv([lambda: SimpleEnv(0, shape, 'float32')] * nenv)
ob = venv.reset()
venv.close()
assert ob.shape == (nenv,) + shape
|
py | b40f7e5066f5af6d57873c4664cf34319959826e | from scabbard import get_client
def test__v1_lists_utilities_airlines_alliances_get():
client = get_client()
alliances = client.Utility\
.V1ListsUtilitiesAirlinesAlliancesGet(alliancecode='*S')\
.result()
assert 'SkyTeam' == alliances['AllianceInfo'][0]['AllianceName']
|
py | b40f7f7f6cb3d169846ac560e4e94914770d1e83 | #! /usr/bin/env python3
# coding: utf-8
from django.http import HttpResponse
from TestModel.models import Test
# 数据库操作
def testdb(request):
# insert 一个数据
test1 = Test(name='timilong')
test1.save()
return HttpResponse("<p>"+ "更新成功</p>")
"""
# 初始化
response = ""
response1 = ""
# 通过objects这个模型管理器的all()获得所有数据行
list = Test.objects.all()
# 通过filter设置条件过滤结果
response2 = Test.objects.filter(id=1)
# 获取单个对象
response3 = Test.objects.g et(id=1)
# 限制返回的数据
Test.objects.order_by('name')[0:2]
# 数据排序
Test.objects.order_by('id')
# 上面的方法联合使用
Test.objects.filter(name='timilong').order_by('id')
#输出所有数据
for var in list:
response1 += var.name + " "
response = response1
# 更新数据方法1
test1 = Test.objects.get(id=1)
test1.name = "Google"
test1.save()
# 更新数据方法2
test2 = Test.objects.filter(id=2)
test2.update(name="Facebook")
# 更新所有列
Test.objects.all().update(name="Twitter")
# 删除id=1的列
test3 = Test.objects.get(id=1)
test3.delete()
# 删除id=2的列
test4 = Test.objects.filter(id=2)
test4.delete()
# 删除所有数据
Test.objects.all().delete()
"""
|
py | b40f84af8aaf6f2ffb5ef9e215a730d83acbb2f9 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import re
import mock
from click.testing import CliRunner
from tqdm.utils import _supports_unicode
from shub import config
class AssertInvokeRaisesMixin(object):
def assertInvokeRaises(self, exc, *args, **kwargs):
"""
Invoke self.runner (or a new runner if nonexistent) with given *args
and **kwargs, assert that it raised an exception of type exc, and
return the runner's result.
"""
runner = getattr(self, 'runner', None) or CliRunner()
kwargs['standalone_mode'] = False
result = runner.invoke(*args, **kwargs)
self.assertIsInstance(result.exception, exc)
return result
def mock_conf(testcase, target=None, attr=None, conf=None):
if not conf:
conf = config.ShubConfig()
conf.projects.update({
'default': 1,
'prod': 2,
'vagrant': 'vagrant/3',
'custom1': {'id': 4, 'image': False},
'custom2': {'id': 5, 'image': True},
'custom3': {'id': 6, 'image': 'custom/image'},
})
conf.endpoints.update({
'vagrant': 'https://vagrant_ep/api/',
})
conf.apikeys.update({
'default': 32 * '1',
'vagrant': 32 * '2',
})
conf.version = 'version'
if target:
if attr:
patcher = mock.patch.object(target, attr, return_value=conf,
autospec=True)
else:
patcher = mock.patch(target, return_value=conf, autospec=True)
else:
patcher = mock.patch('shub.config.load_shub_config', return_value=conf,
autospec=True)
patcher.start()
testcase.addCleanup(patcher.stop)
return conf
def _is_tqdm_in_ascii_mode():
"""Small helper deciding about placeholders for tqdm progress bars."""
with CliRunner().isolation():
return not _supports_unicode(sys.stdout)
def format_expected_progress(progress):
"""Replace unicode symbols in progress string for tqdm in ascii mode."""
if _is_tqdm_in_ascii_mode():
to_replace = {'█': '#', '▏': '2', '▎': '3', '▌': '5', '▋': '6'}
for sym in to_replace:
progress = progress.replace(sym, to_replace[sym])
return progress
def clean_progress_output(output):
"""Return output cleaned from \\r, \\n, and ANSI escape sequences"""
return re.sub(
r"""(?x) # Matches:
\n|\r| # 1. newlines or carriage returns, or
(\x1b\[|\x9b) # 2. ANSI control sequence introducer ("ESC[" or single
# byte \x9b) +
[^@-_]*[@-_]| # private mode characters + command character, or
\x1b[@-_] # 3. ANSI control codes without sequence introducer
# ("ESC" + single command character)
""",
'', output)
|
py | b40f850dd066c0dab221fbb80c6c6efea58129f0 | from dataclasses import dataclass
from typing import List, Optional, Tuple
from dogia.types.blockchain_format.foliage import Foliage
from dogia.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from dogia.types.blockchain_format.sized_bytes import bytes32
from dogia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from dogia.types.blockchain_format.vdf import VDFInfo, VDFProof
from dogia.types.end_of_slot_bundle import EndOfSubSlotBundle
from dogia.util.ints import uint8, uint32, uint64, uint128
from dogia.util.streamable import Streamable, streamable
"""
Protocol between timelord and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeakTimelord(Streamable):
reward_chain_block: RewardChainBlock
difficulty: uint64
deficit: uint8
sub_slot_iters: uint64 # SSi in the slot where NewPeak has been infused
sub_epoch_summary: Optional[
SubEpochSummary
] # If NewPeak is the last slot in epoch, the next slot should include this
previous_reward_challenges: List[Tuple[bytes32, uint128]]
last_challenge_sb_or_eos_total_iters: uint128
passes_ses_height_but_not_yet_included: bool
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlockTimelord(Streamable):
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
difficulty: uint64
sub_slot_iters: uint64 # SSi in the slot where block is infused
foliage: Foliage # Reward chain foliage data
sub_epoch_summary: Optional[SubEpochSummary] # If this is the last slot in epoch, the next slot should include this
# This is the last thing infused in the reward chain before this signage point.
# The challenge that the SP reward chain VDF is based off of, or in the case of sp index 0, the previous infusion
rc_prev: bytes32
@dataclass(frozen=True)
@streamable
class NewInfusionPointVDF(Streamable):
unfinished_reward_hash: bytes32
challenge_chain_ip_vdf: VDFInfo
challenge_chain_ip_proof: VDFProof
reward_chain_ip_vdf: VDFInfo
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_vdf: Optional[VDFInfo]
infused_challenge_chain_ip_proof: Optional[VDFProof]
@dataclass(frozen=True)
@streamable
class NewSignagePointVDF(Streamable):
index_from_challenge: uint8
challenge_chain_sp_vdf: VDFInfo
challenge_chain_sp_proof: VDFProof
reward_chain_sp_vdf: VDFInfo
reward_chain_sp_proof: VDFProof
@dataclass(frozen=True)
@streamable
class NewEndOfSubSlotVDF(Streamable):
end_of_sub_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestCompactProofOfTime(Streamable):
new_proof_of_time: VDFInfo
header_hash: bytes32
height: uint32
field_vdf: uint8
@dataclass(frozen=True)
@streamable
class RespondCompactProofOfTime(Streamable):
vdf_info: VDFInfo
vdf_proof: VDFProof
header_hash: bytes32
height: uint32
field_vdf: uint8
|
py | b40f8529e4d163d524dabe4b14102b88addff0f6 | from datetime import datetime, timedelta
import pytest
from flask_sqlalchemy import BaseQuery
from c3bottles import db
from c3bottles.model.drop_point import DropPoint
from c3bottles.model.location import Location
from c3bottles.model.report import Report
dp_number = 1
creation_time = datetime.now()
description = "somewhere"
lat = 20
lng = 10
level = 2
@pytest.fixture
def dp():
db.session.expunge_all()
return DropPoint(
dp_number, time=creation_time, description=description, lat=lat, lng=lng, level=level
)
def test_dp_equals_dp_from_db(dp):
assert db.session.query(DropPoint).get(dp_number) == dp
def test_dp_number_correct(dp):
assert dp.number == dp_number
def test_dp_not_removed(dp):
assert dp.removed is None
def test_reports_are_query(dp):
assert isinstance(dp.reports, BaseQuery)
def test_dp_has_no_reports(dp):
assert not dp.reports.all()
def test_visits_are_query(dp):
assert isinstance(dp.visits, BaseQuery)
def test_dp_has_no_visits(dp):
assert not dp.visits.all()
def test_dp_has_a_location(dp):
assert len(dp.locations) == 1
def test_dp_location_is_location(dp):
assert isinstance(dp.locations[0], Location)
def test_dp_location_desc(dp):
assert dp.locations[0].description == description
def test_dp_location_lat(dp):
assert dp.locations[0].lat == lat
def test_dp_location_lng(dp):
assert dp.locations[0].lng == lng
def test_dp_location_level(dp):
assert dp.locations[0].level == level
def test_dp_location_time(dp):
assert dp.locations[0].time == creation_time
def test_dp_creation_time(dp):
assert dp.time == creation_time
def test_dp_number_unique(dp):
with pytest.raises(ValueError, match="already exists"):
DropPoint(dp_number, lat=0, lng=0, level=1)
@pytest.mark.parametrize("num", [-1, "foo", None, False])
def test_dp_invalid_number(num):
with pytest.raises(ValueError, match="number"):
DropPoint(num, lat=0, lng=0, level=1)
def test_dp_created_in_future():
time_in_future = datetime.today() + timedelta(hours=1)
with pytest.raises(ValueError, match="future"):
DropPoint(dp_number + 1, time=time_in_future, lat=0, lng=0, level=1)
@pytest.mark.parametrize("time", [-1, "foo", False])
def test_dp_invalid_creation_time(time):
with pytest.raises(ValueError, match="not a datetime"):
DropPoint(dp_number + 1, time=time, lat=0, lng=0, level=1) # noqa
def test_dp_getter_returns_dp(dp):
assert DropPoint.query.get(dp_number) == dp
@pytest.mark.parametrize("num", [-1, "foo", False, 1234])
def test_dp_getter_return_none_for_nonexistent(num):
assert DropPoint.query.get(num) is None # noqa
def test_dp_location_getter_returns_location_object(dp):
assert isinstance(dp.location, Location)
def test_dp_is_in_default_state(dp):
assert dp.last_state == Report.states[1]
def test_dp_total_report_count_is_zero(dp):
assert dp.total_report_count == 0
def test_dp_new_report_count_is_zero(dp):
assert dp.new_report_count == 0
def test_dp_no_last_visit(dp):
assert not dp.last_visit
def test_dp_no_last_report(dp):
assert not dp.last_report
def test_dp_no_new_reports(dp):
assert not dp.new_reports
def test_dp_visit_interval_greater_zero(dp):
assert dp.visit_interval > 0
def test_dp_history_is_list(dp):
assert isinstance(dp.history, list)
def test_dp_history_is_list_of_dicts(dp):
for entry in dp.history:
assert type(entry) is dict
def test_dp_history_length(dp):
# the history should contain the creation
# and the setting of the initial location
assert len(dp.history) == 2 # noqa
def test_dps_json_is_string(dp):
assert type(DropPoint.get_dps_json()) is str
def test_dps_json_is_not_empty(dp):
assert len(DropPoint.get_dps_json()) > 1
def test_fresh_dps_json_is_empty(dp):
assert DropPoint.get_dps_json(datetime.now()) == "{}"
def test_dps_json_since_creation_empty(dp):
assert DropPoint.get_dps_json(creation_time) == "{}"
def test_dps_json_before_creation_not_empty(dp):
assert DropPoint.get_dps_json(creation_time - timedelta(seconds=1)) != {}
def test_dp_removed_is_datetime(dp):
dp.remove()
assert isinstance(dp.removed, datetime)
def test_dp_removed_is_removal_time(dp):
removal_time = datetime.now()
dp.remove(removal_time)
assert dp.removed == removal_time
def test_dp_removed_visit_priority(dp):
dp.remove()
assert dp.priority == 0
def test_dp_removal_in_future(dp):
with pytest.raises(ValueError, match="future"):
dp.remove(datetime.today() + timedelta(hours=1))
def test_dp_invalid_removal_time(dp):
with pytest.raises(TypeError, match="not a datetime"):
dp.remove("foo") # noqa
def test_dp_already_removed(dp):
dp.remove()
with pytest.raises(RuntimeError, match="already removed"):
dp.remove()
first_report_time = datetime.now()
@pytest.fixture
def first_report(dp):
return Report(dp, state=Report.states[0], time=first_report_time)
def test_dp_report_time(first_report):
assert first_report.time == first_report_time
def test_dp_report_state(first_report):
assert first_report.state == Report.states[0]
def test_dp_report_is_first_report(dp, first_report):
assert dp.reports[0] == first_report
def test_dp_report_is_last_report(dp, first_report):
assert dp.last_report == first_report
def test_dp_state_is_reported_state(dp, first_report):
assert dp.last_state == Report.states[0]
def test_dp_total_report_count(dp, first_report):
assert dp.total_report_count == 1
def test_dp_new_report_count(dp, first_report):
assert dp.new_report_count == 1
def test_dp_first_new_report(dp, first_report):
assert dp.new_reports[0] == first_report # noqa
second_report_time = datetime.now()
@pytest.fixture
def second_report(dp, first_report):
return Report(dp, state=Report.states[-1], time=second_report_time)
def test_dp_first_report_is_first_in_list(dp, first_report, second_report):
assert dp.reports[0] == first_report
def test_dp_second_report_is_last_in_list(dp, second_report):
assert dp.reports[-1] == second_report
def test_dp_second_report_is_returned_as_last(dp, second_report):
assert dp.last_report == second_report
def test_dp_second_state_is_reported_state(dp, second_report):
assert dp.last_state == Report.states[-1]
def test_dp_has_two_total_reports(dp, second_report):
assert dp.total_report_count == 2
def test_dp_has_two_new_reports(dp, second_report):
assert dp.new_report_count == 2
|
py | b40f87562351934e331fa3a153b7a28681aa783f | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class ApplySetFieldMiss(Base):
"""Select the type of Apply Set Field Miss capability that the table miss flow entry will support.
The ApplySetFieldMiss class encapsulates a required applySetFieldMiss resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'applySetFieldMiss'
_SDM_ATT_MAP = {
'ArpDestinationHardwareAddress': 'arpDestinationHardwareAddress',
'ArpDestinationIpv4Address': 'arpDestinationIpv4Address',
'ArpOpcode': 'arpOpcode',
'ArpSourceHardwareAddress': 'arpSourceHardwareAddress',
'ArpSourceIpv4Address': 'arpSourceIpv4Address',
'EthernetDestination': 'ethernetDestination',
'EthernetSource': 'ethernetSource',
'EthernetType': 'ethernetType',
'IcmpCode': 'icmpCode',
'IcmpType': 'icmpType',
'Icmpv6Code': 'icmpv6Code',
'Icmpv6Type': 'icmpv6Type',
'IpDscp': 'ipDscp',
'IpEcn': 'ipEcn',
'IpProtocol': 'ipProtocol',
'Ipv4Destination': 'ipv4Destination',
'Ipv4Source': 'ipv4Source',
'Ipv6Destination': 'ipv6Destination',
'Ipv6ExtHeader': 'ipv6ExtHeader',
'Ipv6FlowLabel': 'ipv6FlowLabel',
'Ipv6NdSll': 'ipv6NdSll',
'Ipv6NdTarget': 'ipv6NdTarget',
'Ipv6NdTll': 'ipv6NdTll',
'Ipv6Source': 'ipv6Source',
'MplsBos': 'mplsBos',
'MplsLabel': 'mplsLabel',
'MplsTc': 'mplsTc',
'PbbIsid': 'pbbIsid',
'SctpDestination': 'sctpDestination',
'SctpSource': 'sctpSource',
'TcpDestination': 'tcpDestination',
'TcpSource': 'tcpSource',
'TunnelId': 'tunnelId',
'UdpDestination': 'udpDestination',
'UdpSource': 'udpSource',
'VlanId': 'vlanId',
'VlanPriority': 'vlanPriority',
}
def __init__(self, parent):
super(ApplySetFieldMiss, self).__init__(parent)
@property
def ArpDestinationHardwareAddress(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ARP Destination Hardware Address is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['ArpDestinationHardwareAddress'])
@ArpDestinationHardwareAddress.setter
def ArpDestinationHardwareAddress(self, value):
self._set_attribute(self._SDM_ATT_MAP['ArpDestinationHardwareAddress'], value)
@property
def ArpDestinationIpv4Address(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ARP Destination IPv4 Address is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['ArpDestinationIpv4Address'])
@ArpDestinationIpv4Address.setter
def ArpDestinationIpv4Address(self, value):
self._set_attribute(self._SDM_ATT_MAP['ArpDestinationIpv4Address'], value)
@property
def ArpOpcode(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ARP Opcode is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['ArpOpcode'])
@ArpOpcode.setter
def ArpOpcode(self, value):
self._set_attribute(self._SDM_ATT_MAP['ArpOpcode'], value)
@property
def ArpSourceHardwareAddress(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ARP Source Hardware Address is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['ArpSourceHardwareAddress'])
@ArpSourceHardwareAddress.setter
def ArpSourceHardwareAddress(self, value):
self._set_attribute(self._SDM_ATT_MAP['ArpSourceHardwareAddress'], value)
@property
def ArpSourceIpv4Address(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ARP Source IPv4 Address is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['ArpSourceIpv4Address'])
@ArpSourceIpv4Address.setter
def ArpSourceIpv4Address(self, value):
self._set_attribute(self._SDM_ATT_MAP['ArpSourceIpv4Address'], value)
@property
def EthernetDestination(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for Ethernet Destination is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetDestination'])
@EthernetDestination.setter
def EthernetDestination(self, value):
self._set_attribute(self._SDM_ATT_MAP['EthernetDestination'], value)
@property
def EthernetSource(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for Ethernet Source is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetSource'])
@EthernetSource.setter
def EthernetSource(self, value):
self._set_attribute(self._SDM_ATT_MAP['EthernetSource'], value)
@property
def EthernetType(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for Ethernet Type is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetType'])
@EthernetType.setter
def EthernetType(self, value):
self._set_attribute(self._SDM_ATT_MAP['EthernetType'], value)
@property
def IcmpCode(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ICMP Code is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['IcmpCode'])
@IcmpCode.setter
def IcmpCode(self, value):
self._set_attribute(self._SDM_ATT_MAP['IcmpCode'], value)
@property
def IcmpType(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ICMP Type is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['IcmpType'])
@IcmpType.setter
def IcmpType(self, value):
self._set_attribute(self._SDM_ATT_MAP['IcmpType'], value)
@property
def Icmpv6Code(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ICMPv6 Code is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Icmpv6Code'])
@Icmpv6Code.setter
def Icmpv6Code(self, value):
self._set_attribute(self._SDM_ATT_MAP['Icmpv6Code'], value)
@property
def Icmpv6Type(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for ICMPv6 Type is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Icmpv6Type'])
@Icmpv6Type.setter
def Icmpv6Type(self, value):
self._set_attribute(self._SDM_ATT_MAP['Icmpv6Type'], value)
@property
def IpDscp(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IP DSCP is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpDscp'])
@IpDscp.setter
def IpDscp(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpDscp'], value)
@property
def IpEcn(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IP ECN is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpEcn'])
@IpEcn.setter
def IpEcn(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpEcn'], value)
@property
def IpProtocol(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IP Protocol is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpProtocol'])
@IpProtocol.setter
def IpProtocol(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpProtocol'], value)
@property
def Ipv4Destination(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv4 Destination is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4Destination'])
@Ipv4Destination.setter
def Ipv4Destination(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv4Destination'], value)
@property
def Ipv4Source(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv4 Source is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4Source'])
@Ipv4Source.setter
def Ipv4Source(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv4Source'], value)
@property
def Ipv6Destination(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 Destination is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6Destination'])
@Ipv6Destination.setter
def Ipv6Destination(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6Destination'], value)
@property
def Ipv6ExtHeader(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 Ext Header is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6ExtHeader'])
@Ipv6ExtHeader.setter
def Ipv6ExtHeader(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6ExtHeader'], value)
@property
def Ipv6FlowLabel(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 Flow Label is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6FlowLabel'])
@Ipv6FlowLabel.setter
def Ipv6FlowLabel(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6FlowLabel'], value)
@property
def Ipv6NdSll(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 ND SLL is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6NdSll'])
@Ipv6NdSll.setter
def Ipv6NdSll(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6NdSll'], value)
@property
def Ipv6NdTarget(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 ND Target is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6NdTarget'])
@Ipv6NdTarget.setter
def Ipv6NdTarget(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6NdTarget'], value)
@property
def Ipv6NdTll(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 ND TLL is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6NdTll'])
@Ipv6NdTll.setter
def Ipv6NdTll(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6NdTll'], value)
@property
def Ipv6Source(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for IPv6 Source is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6Source'])
@Ipv6Source.setter
def Ipv6Source(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6Source'], value)
@property
def MplsBos(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for MPLS BoS is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['MplsBos'])
@MplsBos.setter
def MplsBos(self, value):
self._set_attribute(self._SDM_ATT_MAP['MplsBos'], value)
@property
def MplsLabel(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for MPLS Label is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['MplsLabel'])
@MplsLabel.setter
def MplsLabel(self, value):
self._set_attribute(self._SDM_ATT_MAP['MplsLabel'], value)
@property
def MplsTc(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for MPLS TC is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['MplsTc'])
@MplsTc.setter
def MplsTc(self, value):
self._set_attribute(self._SDM_ATT_MAP['MplsTc'], value)
@property
def PbbIsid(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for PBB ISID is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['PbbIsid'])
@PbbIsid.setter
def PbbIsid(self, value):
self._set_attribute(self._SDM_ATT_MAP['PbbIsid'], value)
@property
def SctpDestination(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for SCTP Destination is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['SctpDestination'])
@SctpDestination.setter
def SctpDestination(self, value):
self._set_attribute(self._SDM_ATT_MAP['SctpDestination'], value)
@property
def SctpSource(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for SCTP Source is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['SctpSource'])
@SctpSource.setter
def SctpSource(self, value):
self._set_attribute(self._SDM_ATT_MAP['SctpSource'], value)
@property
def TcpDestination(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for TCP Destination is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['TcpDestination'])
@TcpDestination.setter
def TcpDestination(self, value):
self._set_attribute(self._SDM_ATT_MAP['TcpDestination'], value)
@property
def TcpSource(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for TCP Source is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['TcpSource'])
@TcpSource.setter
def TcpSource(self, value):
self._set_attribute(self._SDM_ATT_MAP['TcpSource'], value)
@property
def TunnelId(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for Tunnel ID is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['TunnelId'])
@TunnelId.setter
def TunnelId(self, value):
self._set_attribute(self._SDM_ATT_MAP['TunnelId'], value)
@property
def UdpDestination(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for UDP Destination is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['UdpDestination'])
@UdpDestination.setter
def UdpDestination(self, value):
self._set_attribute(self._SDM_ATT_MAP['UdpDestination'], value)
@property
def UdpSource(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for UDP Source is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['UdpSource'])
@UdpSource.setter
def UdpSource(self, value):
self._set_attribute(self._SDM_ATT_MAP['UdpSource'], value)
@property
def VlanId(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for VLAN ID is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanId'])
@VlanId.setter
def VlanId(self, value):
self._set_attribute(self._SDM_ATT_MAP['VlanId'], value)
@property
def VlanPriority(self):
"""
Returns
-------
- bool: If selected, Apply Set Field Miss for VLAN Priority is supported.
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanPriority'])
@VlanPriority.setter
def VlanPriority(self, value):
self._set_attribute(self._SDM_ATT_MAP['VlanPriority'], value)
def update(self, ArpDestinationHardwareAddress=None, ArpDestinationIpv4Address=None, ArpOpcode=None, ArpSourceHardwareAddress=None, ArpSourceIpv4Address=None, EthernetDestination=None, EthernetSource=None, EthernetType=None, IcmpCode=None, IcmpType=None, Icmpv6Code=None, Icmpv6Type=None, IpDscp=None, IpEcn=None, IpProtocol=None, Ipv4Destination=None, Ipv4Source=None, Ipv6Destination=None, Ipv6ExtHeader=None, Ipv6FlowLabel=None, Ipv6NdSll=None, Ipv6NdTarget=None, Ipv6NdTll=None, Ipv6Source=None, MplsBos=None, MplsLabel=None, MplsTc=None, PbbIsid=None, SctpDestination=None, SctpSource=None, TcpDestination=None, TcpSource=None, TunnelId=None, UdpDestination=None, UdpSource=None, VlanId=None, VlanPriority=None):
"""Updates applySetFieldMiss resource on the server.
Args
----
- ArpDestinationHardwareAddress (bool): If selected, Apply Set Field Miss for ARP Destination Hardware Address is supported.
- ArpDestinationIpv4Address (bool): If selected, Apply Set Field Miss for ARP Destination IPv4 Address is supported.
- ArpOpcode (bool): If selected, Apply Set Field Miss for ARP Opcode is supported.
- ArpSourceHardwareAddress (bool): If selected, Apply Set Field Miss for ARP Source Hardware Address is supported.
- ArpSourceIpv4Address (bool): If selected, Apply Set Field Miss for ARP Source IPv4 Address is supported.
- EthernetDestination (bool): If selected, Apply Set Field Miss for Ethernet Destination is supported.
- EthernetSource (bool): If selected, Apply Set Field Miss for Ethernet Source is supported.
- EthernetType (bool): If selected, Apply Set Field Miss for Ethernet Type is supported.
- IcmpCode (bool): If selected, Apply Set Field Miss for ICMP Code is supported.
- IcmpType (bool): If selected, Apply Set Field Miss for ICMP Type is supported.
- Icmpv6Code (bool): If selected, Apply Set Field Miss for ICMPv6 Code is supported.
- Icmpv6Type (bool): If selected, Apply Set Field Miss for ICMPv6 Type is supported.
- IpDscp (bool): If selected, Apply Set Field Miss for IP DSCP is supported.
- IpEcn (bool): If selected, Apply Set Field Miss for IP ECN is supported.
- IpProtocol (bool): If selected, Apply Set Field Miss for IP Protocol is supported.
- Ipv4Destination (bool): If selected, Apply Set Field Miss for IPv4 Destination is supported.
- Ipv4Source (bool): If selected, Apply Set Field Miss for IPv4 Source is supported.
- Ipv6Destination (bool): If selected, Apply Set Field Miss for IPv6 Destination is supported.
- Ipv6ExtHeader (bool): If selected, Apply Set Field Miss for IPv6 Ext Header is supported.
- Ipv6FlowLabel (bool): If selected, Apply Set Field Miss for IPv6 Flow Label is supported.
- Ipv6NdSll (bool): If selected, Apply Set Field Miss for IPv6 ND SLL is supported.
- Ipv6NdTarget (bool): If selected, Apply Set Field Miss for IPv6 ND Target is supported.
- Ipv6NdTll (bool): If selected, Apply Set Field Miss for IPv6 ND TLL is supported.
- Ipv6Source (bool): If selected, Apply Set Field Miss for IPv6 Source is supported.
- MplsBos (bool): If selected, Apply Set Field Miss for MPLS BoS is supported.
- MplsLabel (bool): If selected, Apply Set Field Miss for MPLS Label is supported.
- MplsTc (bool): If selected, Apply Set Field Miss for MPLS TC is supported.
- PbbIsid (bool): If selected, Apply Set Field Miss for PBB ISID is supported.
- SctpDestination (bool): If selected, Apply Set Field Miss for SCTP Destination is supported.
- SctpSource (bool): If selected, Apply Set Field Miss for SCTP Source is supported.
- TcpDestination (bool): If selected, Apply Set Field Miss for TCP Destination is supported.
- TcpSource (bool): If selected, Apply Set Field Miss for TCP Source is supported.
- TunnelId (bool): If selected, Apply Set Field Miss for Tunnel ID is supported.
- UdpDestination (bool): If selected, Apply Set Field Miss for UDP Destination is supported.
- UdpSource (bool): If selected, Apply Set Field Miss for UDP Source is supported.
- VlanId (bool): If selected, Apply Set Field Miss for VLAN ID is supported.
- VlanPriority (bool): If selected, Apply Set Field Miss for VLAN Priority is supported.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
|
py | b40f87ce68d5b2f74c401093e8c289bbf3f4ba19 | # This file is generated by /tmp/pip-build-a2gvux/numpy/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_info={}
atlas_3_10_blas_info={}
lapack_info={}
atlas_3_10_blas_threads_info={}
atlas_threads_info={}
blas_src_info={}
blas_opt_info={}
lapack_src_info={}
blis_info={}
atlas_blas_threads_info={}
openblas_info={}
lapack_opt_info={}
openblas_lapack_info={}
atlas_3_10_threads_info={}
atlas_info={}
atlas_3_10_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
py | b40f88fb66b90b6d76affa6fcec0154aa964f52b | import logging
import aiohttp
from aiohttp import web
from ....application import CommandBus, EventBus, QueryBus
from ....application.query.fetch_sensors import FetchSensors
class WebsocketResource:
_commands: CommandBus
_events: EventBus
_queries: QueryBus
def __init__(self, commands: CommandBus, queries: QueryBus, events: EventBus):
self._commands = commands
self._queries = queries
self._events = events
def attach(self, app: web.Application):
app.add_routes([
web.get('/ws', self.websocket_handler)
])
return app
async def websocket_handler(self, request):
logging.debug('Websocket handler starting')
ws = web.WebSocketResponse()
await ws.prepare(request)
async def sensor_value_changed(event):
await ws.send_json(event.to_dict())
await self._events.attach('sensor_value_changed', sensor_value_changed)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
else:
await ws.send_str(msg.data + '/answer')
elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' % ws.exception())
print('websocket connection closed')
return ws
|
py | b40f89079898383e216140c1e3981a11a20e2253 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'rubcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
py | b40f8939a262011072a952bf843797079817c221 | from abc import abstractmethod, ABCMeta
import numpy as np
import copy
from sklearn.utils.validation import check_X_y
from ..index import MultiLabelIndexCollection
from ..oracle import Oracle, Oracles
from ..utils.interface import BaseQueryStrategy
__all__ = ['BaseIndexQuery',
'BaseNoisyOracleQuery',
'BaseMultiLabelQuery',
'BaseFeatureQuery',
]
class BaseIndexQuery(BaseQueryStrategy, metaclass=ABCMeta):
"""The base class for the selection method which imposes a constraint on the parameters of select()"""
@abstractmethod
def select(self, label_index, unlabel_index, batch_size=1, **kwargs):
"""Select instances to query.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
batch_size: int, optional (default=1)
Selection batch size.
"""
class BaseNoisyOracleQuery(BaseQueryStrategy, metaclass=ABCMeta):
def __init__(self, X, y, oracles):
super(BaseNoisyOracleQuery, self).__init__(X, y)
if isinstance(oracles, list):
self._oracles_type = 'list'
for oracle in oracles:
assert isinstance(oracle, Oracle)
elif isinstance(oracles, Oracles):
self._oracles_type = 'Oracles'
else:
raise TypeError("The type of parameter oracles must be a list or alipy.oracle.Oracles object.")
self._oracles = oracles
self._oracles_iterset = list(range(len(oracles))) if self._oracles_type == 'list' else oracles.names()
self._oracle_ind_name_dict = dict(enumerate(self._oracles_iterset))
@abstractmethod
def select(self, label_index, unlabel_index, batch_size=1, **kwargs):
"""Query from oracles. Return the selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
batch_size: int, optional (default=1)
Selection batch size.
"""
class BaseMultiLabelQuery(BaseIndexQuery, metaclass=ABCMeta):
"""Base query strategy for multi label setting."""
def _check_multi_label_ind(self, container):
"""Check if the given array is an array of multi label indexes."""
if not isinstance(container, MultiLabelIndexCollection):
try:
if isinstance(container[0], tuple):
container = MultiLabelIndexCollection(container, self.y.shape[1])
else:
container = MultiLabelIndexCollection.construct_by_1d_array(container, label_mat_shape=self.y.shape)
except:
raise ValueError(
"Please pass a 1d array of indexes or MultiLabelIndexCollection (column major, "
"start from 0) or a list "
"of tuples with 2 elements, in which, the 1st element is the index of instance "
"and the 2nd element is the index of label.")
return copy.copy(container)
def _check_multi_label(self, matrix):
"""Check if the given matrix is multi label"""
# ytype = type_of_target(matrix)
# if 'multilabel' not in ytype:
if len(np.shape(matrix)) != 2:
raise ValueError("Please provide a multi-label matrix in y with the shape [n_samples, n_classes].")
def __init__(self, X=None, y=None, **kwargs):
if X is not None and y is not None:
self._check_multi_label(y)
if isinstance(X, np.ndarray) and isinstance(y, np.ndarray):
# will not use additional memory
check_X_y(X, y, accept_sparse='csc', multi_output=True)
self.X = X
self.y = y
else:
self.X, self.y = check_X_y(X, y, accept_sparse='csc', multi_output=True)
else:
self.X = X
self.y = y
@abstractmethod
def select(self, label_index, unlabel_index, batch_size=1, **kwargs):
"""Select a subset from the unlabeled set, return the selected instance and label.
Parameters
----------
label_index: {list, np.ndarray, MultiLabelIndexCollection}
The indexes of labeled samples. It should be a 1d array of indexes (column major, start from 0) or
MultiLabelIndexCollection or a list of tuples with 2 elements, in which,
the 1st element is the index of instance and the 2nd element is the index of labels.
unlabel_index: {list, np.ndarray, MultiLabelIndexCollection}
The indexes of unlabeled samples. It should be a 1d array of indexes (column major, start from 0) or
MultiLabelIndexCollection or a list of tuples with 2 elements, in which,
the 1st element is the index of instance and the 2nd element is the index of labels.
batch_size: int, optional (default=1)
Selection batch size.
"""
class BaseFeatureQuery(BaseIndexQuery, metaclass=ABCMeta):
"""Base query strategy for feature querying setting.
Basically have the same api with multi label setting."""
def _check_mask(self, mask):
mask = np.asarray(mask)
ue = np.unique(mask)
if not (len(mask.shape) == 2 and len(ue) == 2 and 0 in ue and 1 in ue):
raise ValueError("The mask matrix should be a 2d array, and there must be only "
"1 and 0 in the matrix, in which, 1 means the corresponding "
"element is known, and will be added to the MultiLabelIndexCollection container.")
return mask
def _check_feature_ind(self, container):
if not isinstance(container, MultiLabelIndexCollection):
try:
if isinstance(container[0], tuple):
container = MultiLabelIndexCollection(container, self.X.shape[1])
else:
container = MultiLabelIndexCollection.construct_by_1d_array(container, label_mat_shape=self.X.shape)
except:
raise ValueError(
"Please pass a 1d array of indexes or MultiLabelIndexCollection (column major, start from 0)"
"or a list of tuples with 2 elements, in which, the 1st element is the index of instance "
"and the 2nd element is the index of features.")
return container
def __init__(self, X=None, y=None, **kwargs):
if X is not None and y is not None:
if isinstance(X, np.ndarray) and isinstance(y, np.ndarray):
# will not use additional memory
check_X_y(X, y, accept_sparse='csc', multi_output=True)
self.X = X
self.y = y
else:
self.X, self.y = check_X_y(X, y, accept_sparse='csc', multi_output=True)
else:
self.X = X
self.y = y
@abstractmethod
def select(self, observed_entries, unkonwn_entries, batch_size=1, **kwargs):
"""Select a subset from the unlabeled set, return the selected instance and feature.
Parameters
----------
observed_entries: {list, np.ndarray, MultiLabelIndexCollection}
The indexes of labeled samples. It should be a 1d array of indexes (column major, start from 0)
or MultiLabelIndexCollection or a list of tuples with 2 elements, in which,
the 1st element is the index of instance and the 2nd element is the index of features.
unkonwn_entries: {list, np.ndarray, MultiLabelIndexCollection}
The indexes of unlabeled samples. It should be a 1d array of indexes (column major, start from 0)
or MultiLabelIndexCollection or a list of tuples with 2 elements, in which,
the 1st element is the index of instance and the 2nd element is the index of features.
batch_size: int, optional (default=1)
Selection batch size.
"""
|
py | b40f8b01630b2d4ecb211fdb9c28016f771dc4a1 | from threading import Thread
from common import pure_posix_path, confirm_popup, mk_logger, get_dir_attrs
from threads.upload import Upload
logger = mk_logger(__name__)
ex_log = mk_logger(name=f'{__name__}-EX',
level=40,
_format='[%(levelname)-8s] [%(asctime)s] [%(name)s] [%(funcName)s] [%(lineno)d] [%(message)s]')
ex_log = ex_log.exception
class MkRemoteDirs(Thread):
def __init__(self, data, manager, sftp):
super().__init__()
self.data = data
self.dst_path = data['dst_path']
# self.full_path = posix_path(self.dst_path, data['name'])
self.manager = manager
self.sftp = sftp
self.done = None
def run(self):
self.makedirs()
def delete_file(self, popup, content, answer):
path = content._args
logger.info(f'Deleting file {path}')
if answer == 'yes':
self.sftp = self.manager.get_sftp()
if self.sftp:
# noinspection PyBroadException
try:
self.sftp.remove(path)
self.sftp.makedirs(path)
attrs = get_dir_attrs(path, self.sftp)
except Exception as ex:
ex_log(f'Failed to delete file {ex}')
else:
logger.info(f'File deleted - {path}')
self.manager.sftp_queue.put(self.sftp)
self.manager.uploaded(self.dst_path, attrs)
self.manager.directory_created(path, Upload)
popup.dismiss()
def makedirs(self):
for _dir in self.data['name']:
full_path = pure_posix_path(self.dst_path, _dir)
try:
self.sftp.makedirs(full_path)
attrs = get_dir_attrs(full_path, self.sftp)
except OSError:
ex_log(f'Could not make dir {full_path}. Detected regular file.')
self.manager.locked_paths.add(full_path)
confirm_popup(callback=self.delete_file,
movable=True,
_args=full_path,
text=f'Path {full_path} already exists on remote server but it is a file\n'
f'To be able to upload data to that destination it must be removed\n\n'
f'Do you agree to delete the file and create directory?')
else:
logger.info(f'Created directory - {_dir}')
self.done = True
if self.manager.is_current_path(self.dst_path):
self.manager.uploaded(self.dst_path, attrs)
else:
self.manager.sftp_queue.put(self.sftp)
self.manager.thread_queue.put('.')
self.manager.next_transfer()
|
py | b40f8b616b974070c8ba4017e0d5d4fe9c5a4ac8 | import jwt
import uuid
import time
from datetime import datetime
from tests.conftest import set_config_values
import pytest
from flask import json, current_app, request
from freezegun import freeze_time
from notifications_python_client.authentication import create_jwt_token
from unittest.mock import call
from app import api_user
from app.dao.api_key_dao import (
get_unsigned_secrets,
save_model_api_key,
get_unsigned_secret,
expire_api_key,
get_model_api_keys,
)
from app.dao.services_dao import dao_fetch_service_by_id
from app.models import ApiKey, KEY_TYPE_NORMAL
from app.authentication.auth import AuthError, requires_admin_auth, requires_auth, GENERAL_TOKEN_ERROR_MESSAGE
from tests.conftest import set_config
@pytest.mark.parametrize('auth_fn', [requires_auth, requires_admin_auth])
def test_should_not_allow_request_with_no_token(client, auth_fn):
request.headers = {}
with pytest.raises(AuthError) as exc:
auth_fn()
assert exc.value.short_message == 'Unauthorized: authentication token must be provided'
@pytest.mark.parametrize('auth_fn', [requires_auth, requires_admin_auth])
def test_should_not_allow_request_with_incorrect_header(client, auth_fn):
request.headers = {'Authorization': 'Basic 1234'}
with pytest.raises(AuthError) as exc:
auth_fn()
assert exc.value.short_message == 'Unauthorized: authentication bearer scheme must be used'
@pytest.mark.parametrize('auth_fn', [requires_auth, requires_admin_auth])
def test_should_not_allow_request_with_incorrect_token(client, auth_fn):
request.headers = {'Authorization': 'Bearer 1234'}
with pytest.raises(AuthError) as exc:
auth_fn()
assert exc.value.short_message == GENERAL_TOKEN_ERROR_MESSAGE
@pytest.mark.parametrize('auth_fn', [requires_auth, requires_admin_auth])
def test_should_not_allow_request_with_no_iss(client, auth_fn):
# code copied from notifications_python_client.authentication.py::create_jwt_token
headers = {
"typ": 'JWT',
"alg": 'HS256'
}
claims = {
# 'iss': not provided
'iat': int(time.time())
}
token = jwt.encode(payload=claims, key=str(uuid.uuid4()), headers=headers)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
auth_fn()
assert exc.value.short_message == 'Invalid token: iss field not provided'
def test_auth_should_not_allow_request_with_no_iat(client, sample_api_key):
iss = str(sample_api_key.service_id)
# code copied from notifications_python_client.authentication.py::create_jwt_token
headers = {
"typ": 'JWT',
"alg": 'HS256'
}
claims = {
'iss': iss
# 'iat': not provided
}
token = jwt.encode(payload=claims, key=str(uuid.uuid4()), headers=headers)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_auth()
assert exc.value.short_message == 'Invalid token: API key not found'
def test_auth_should_not_allow_request_with_non_hs256_algorithm(client, sample_api_key):
iss = str(sample_api_key.service_id)
# code copied from notifications_python_client.authentication.py::create_jwt_token
headers = {
"typ": 'JWT',
"alg": 'HS512'
}
claims = {
'iss': iss,
'iat': int(time.time())
}
token = jwt.encode(payload=claims, key=str(uuid.uuid4()), headers=headers)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_auth()
assert exc.value.short_message == 'Invalid token: algorithm used is not HS256'
def test_admin_auth_should_not_allow_request_with_no_iat(client):
iss = current_app.config['ADMIN_CLIENT_USER_NAME']
secret = current_app.config['API_INTERNAL_SECRETS'][0]
# code copied from notifications_python_client.authentication.py::create_jwt_token
headers = {
"typ": 'JWT',
"alg": 'HS256'
}
claims = {
'iss': iss
# 'iat': not provided
}
token = jwt.encode(payload=claims, key=secret, headers=headers)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_admin_auth()
assert exc.value.short_message == "Unauthorized: admin authentication token not found"
def test_admin_auth_should_not_allow_request_with_old_iat(client):
iss = current_app.config['ADMIN_CLIENT_USER_NAME']
secret = current_app.config['API_INTERNAL_SECRETS'][0]
# code copied from notifications_python_client.authentication.py::create_jwt_token
headers = {
"typ": 'JWT',
"alg": 'HS256'
}
claims = {
'iss': iss,
'iat': int(time.time()) - 60
}
token = jwt.encode(payload=claims, key=secret, headers=headers)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_admin_auth()
assert exc.value.short_message == "Invalid token: expired, check that your system clock is accurate"
def test_auth_should_not_allow_request_with_extra_claims(client, sample_api_key):
iss = str(sample_api_key.service_id)
key = get_unsigned_secrets(sample_api_key.service_id)[0]
headers = {
"typ": 'JWT',
"alg": 'HS256'
}
claims = {
'iss': iss,
'iat': int(time.time()),
'aud': 'notifications.service.gov.uk' # extra claim that we don't support
}
token = jwt.encode(payload=claims, key=key, headers=headers)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_auth()
assert exc.value.short_message == GENERAL_TOKEN_ERROR_MESSAGE
def test_should_not_allow_invalid_secret(client, sample_api_key):
token = create_jwt_token(
secret="not-so-secret",
client_id=str(sample_api_key.service_id))
response = client.get(
'/notifications',
headers={'Authorization': "Bearer {}".format(token)}
)
assert response.status_code == 403
data = json.loads(response.get_data())
assert data['message'] == {"token": ['Invalid token: API key not found']}
@pytest.mark.parametrize('scheme', ['bearer', 'Bearer'])
def test_should_allow_valid_token(client, sample_api_key, scheme):
token = __create_token(sample_api_key.service_id)
response = client.get('/notifications', headers={'Authorization': '{} {}'.format(scheme, token)})
assert response.status_code == 200
def test_should_not_allow_service_id_that_is_not_the_wrong_data_type(client, sample_api_key):
token = create_jwt_token(secret=get_unsigned_secrets(sample_api_key.service_id)[0],
client_id=str('not-a-valid-id'))
response = client.get(
'/notifications',
headers={'Authorization': "Bearer {}".format(token)}
)
assert response.status_code == 403
data = json.loads(response.get_data())
assert data['message'] == {"token": ['Invalid token: service id is not the right data type']}
def test_should_allow_valid_token_for_request_with_path_params_for_public_url(client, sample_api_key):
token = __create_token(sample_api_key.service_id)
response = client.get('/notifications', headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 200
def test_should_allow_valid_token_for_request_with_path_params_for_admin_url(client):
token = create_jwt_token(
current_app.config['API_INTERNAL_SECRETS'][0], current_app.config['ADMIN_CLIENT_USER_NAME']
)
response = client.get('/service', headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 200
def test_should_allow_valid_token_for_request_with_path_params_for_admin_url_with_second_secret(client):
with set_config(client.application, 'API_INTERNAL_SECRETS', ["secret1", "secret2"]):
token = create_jwt_token(
current_app.config['API_INTERNAL_SECRETS'][0], current_app.config['ADMIN_CLIENT_USER_NAME']
)
response = client.get('/service', headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 200
token = create_jwt_token(
current_app.config['API_INTERNAL_SECRETS'][1], current_app.config['ADMIN_CLIENT_USER_NAME']
)
response = client.get('/service', headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 200
def test_should_allow_valid_token_when_service_has_multiple_keys(client, sample_api_key):
data = {'service': sample_api_key.service,
'name': 'some key name',
'created_by': sample_api_key.created_by,
'key_type': KEY_TYPE_NORMAL
}
api_key = ApiKey(**data)
save_model_api_key(api_key)
token = __create_token(sample_api_key.service_id)
response = client.get(
'/notifications',
headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 200
def test_authentication_passes_when_service_has_multiple_keys_some_expired(
client,
sample_api_key):
expired_key_data = {'service': sample_api_key.service,
'name': 'expired_key',
'expiry_date': datetime.utcnow(),
'created_by': sample_api_key.created_by,
'key_type': KEY_TYPE_NORMAL
}
expired_key = ApiKey(**expired_key_data)
save_model_api_key(expired_key)
another_key = {'service': sample_api_key.service,
'name': 'another_key',
'created_by': sample_api_key.created_by,
'key_type': KEY_TYPE_NORMAL
}
api_key = ApiKey(**another_key)
save_model_api_key(api_key)
token = create_jwt_token(
secret=get_unsigned_secret(api_key.id),
client_id=str(sample_api_key.service_id))
response = client.get(
'/notifications',
headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 200
def test_authentication_returns_token_expired_when_service_uses_expired_key_and_has_multiple_keys(client,
sample_api_key):
expired_key = {'service': sample_api_key.service,
'name': 'expired_key',
'created_by': sample_api_key.created_by,
'key_type': KEY_TYPE_NORMAL
}
expired_api_key = ApiKey(**expired_key)
save_model_api_key(expired_api_key)
another_key = {'service': sample_api_key.service,
'name': 'another_key',
'created_by': sample_api_key.created_by,
'key_type': KEY_TYPE_NORMAL
}
api_key = ApiKey(**another_key)
save_model_api_key(api_key)
token = create_jwt_token(
secret=get_unsigned_secret(expired_api_key.id),
client_id=str(sample_api_key.service_id))
expire_api_key(service_id=sample_api_key.service_id, api_key_id=expired_api_key.id)
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_auth()
assert exc.value.short_message == 'Invalid token: API key revoked'
assert exc.value.service_id == str(expired_api_key.service_id)
assert exc.value.api_key_id == expired_api_key.id
def test_authentication_returns_error_when_admin_client_has_no_secrets(client):
api_secret = current_app.config.get('API_INTERNAL_SECRETS')[0]
api_service_id = current_app.config.get('ADMIN_CLIENT_USER_NAME')
token = create_jwt_token(
secret=api_secret,
client_id=api_service_id
)
with set_config(client.application, 'API_INTERNAL_SECRETS', []):
response = client.get(
'/service',
headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 401
error_message = json.loads(response.get_data())
assert error_message['message'] == {"token": ["Unauthorized: admin authentication token not found"]}
def test_authentication_returns_error_when_admin_client_secret_is_invalid(client):
api_secret = current_app.config.get('API_INTERNAL_SECRETS')[0]
token = create_jwt_token(
secret=api_secret,
client_id=current_app.config.get('ADMIN_CLIENT_USER_NAME')
)
current_app.config['API_INTERNAL_SECRETS'][0] = 'something-wrong'
response = client.get(
'/service',
headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 401
error_message = json.loads(response.get_data())
assert error_message['message'] == {"token": ["Unauthorized: admin authentication token not found"]}
current_app.config['API_INTERNAL_SECRETS'][0] = api_secret
def test_authentication_returns_error_when_service_doesnt_exit(
client,
sample_api_key
):
# get service ID and secret the wrong way around
token = create_jwt_token(
secret=str(sample_api_key.service_id),
client_id=str(sample_api_key.id))
response = client.get(
'/notifications',
headers={'Authorization': 'Bearer {}'.format(token)}
)
assert response.status_code == 403
error_message = json.loads(response.get_data())
assert error_message['message'] == {'token': ['Invalid token: service not found']}
def test_authentication_returns_error_when_service_inactive(client, sample_api_key):
sample_api_key.service.active = False
token = create_jwt_token(secret=str(sample_api_key.id), client_id=str(sample_api_key.service_id))
response = client.get('/notifications', headers={'Authorization': 'Bearer {}'.format(token)})
assert response.status_code == 403
error_message = json.loads(response.get_data())
assert error_message['message'] == {'token': ['Invalid token: service is archived']}
def test_authentication_returns_error_when_service_has_no_secrets(client,
sample_service,
fake_uuid):
token = create_jwt_token(
secret=fake_uuid,
client_id=str(sample_service.id))
request.headers = {'Authorization': 'Bearer {}'.format(token)}
with pytest.raises(AuthError) as exc:
requires_auth()
assert exc.value.short_message == 'Invalid token: service has no API keys'
assert exc.value.service_id == str(sample_service.id)
def test_should_attach_the_current_api_key_to_current_app(notify_api, sample_service, sample_api_key):
with notify_api.test_request_context(), notify_api.test_client() as client:
token = __create_token(sample_api_key.service_id)
response = client.get(
'/notifications',
headers={'Authorization': 'Bearer {}'.format(token)}
)
assert response.status_code == 200
assert str(api_user.id) == str(sample_api_key.id)
def test_should_return_403_when_token_is_expired(client,
sample_api_key):
with freeze_time('2001-01-01T12:00:00'):
token = __create_token(sample_api_key.service_id)
with freeze_time('2001-01-01T12:00:40'):
with pytest.raises(AuthError) as exc:
request.headers = {'Authorization': 'Bearer {}'.format(token)}
requires_auth()
assert exc.value.short_message == 'Error: Your system clock must be accurate to within 30 seconds'
assert exc.value.service_id == str(sample_api_key.service_id)
assert str(exc.value.api_key_id) == str(sample_api_key.id)
def __create_token(service_id):
return create_jwt_token(secret=get_unsigned_secrets(service_id)[0],
client_id=str(service_id))
@pytest.mark.parametrize('check_proxy_header,header_value,expected_status', [
(True, 'key_1', 200),
(True, 'wrong_key', 200),
(False, 'key_1', 200),
(False, 'wrong_key', 200),
])
def test_proxy_key_non_auth_endpoint(notify_api, check_proxy_header, header_value, expected_status):
with set_config_values(notify_api, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with notify_api.test_client() as client:
response = client.get(
path='/_status',
headers=[
('X-Custom-Forwarder', header_value),
]
)
assert response.status_code == expected_status
@pytest.mark.parametrize('check_proxy_header,header_value,expected_status', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'key_1', 200),
(False, 'wrong_key', 200),
])
def test_proxy_key_on_admin_auth_endpoint(notify_api, check_proxy_header, header_value, expected_status):
token = create_jwt_token(
current_app.config['API_INTERNAL_SECRETS'][0], current_app.config['ADMIN_CLIENT_USER_NAME']
)
with set_config_values(notify_api, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with notify_api.test_client() as client:
response = client.get(
path='/service',
headers=[
('X-Custom-Forwarder', header_value),
('Authorization', 'Bearer {}'.format(token))
]
)
assert response.status_code == expected_status
def test_should_cache_service_and_api_key_lookups(mocker, client, sample_api_key):
mock_get_api_keys = mocker.patch(
'app.serialised_models.get_model_api_keys',
wraps=get_model_api_keys,
)
mock_get_service = mocker.patch(
'app.serialised_models.dao_fetch_service_by_id',
wraps=dao_fetch_service_by_id,
)
for _ in range(5):
token = __create_token(sample_api_key.service_id)
client.get('/notifications', headers={
'Authorization': f'Bearer {token}'
})
assert mock_get_api_keys.call_args_list == [
call(str(sample_api_key.service_id))
]
assert mock_get_service.call_args_list == [
call(str(sample_api_key.service_id))
]
|
py | b40f8b90c03ced381182e26d592ec982b5717cf5 | from .base import require_arg
from .base import get_timeout_multiplier # noqa: F401
from .chrome import executor_kwargs as chrome_executor_kwargs
from .chrome_android import ChromeAndroidBrowserBase
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "android_webview",
"check_args": "check_args",
"browser": "SystemWebViewShell",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier"}
_wptserve_ports = set()
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"device_serial": kwargs["device_serial"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(logger, test_type, server_config, cache_manager, run_info_data,
**kwargs):
# Use update() to modify the global list in place.
_wptserve_ports.update(set(
server_config['ports']['http'] + server_config['ports']['https'] +
server_config['ports']['ws'] + server_config['ports']['wss']
))
executor_kwargs = chrome_executor_kwargs(logger, test_type, server_config,
cache_manager, run_info_data,
**kwargs)
del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
capabilities = executor_kwargs["capabilities"]
# Note that for WebView, we launch a test shell and have the test shell use WebView.
# https://chromium.googlesource.com/chromium/src/+/HEAD/android_webview/docs/webview-shell.md
capabilities["goog:chromeOptions"]["androidPackage"] = \
kwargs.get("package_name", "org.chromium.webview_shell")
capabilities["goog:chromeOptions"]["androidActivity"] = \
"org.chromium.webview_shell.WebPlatformTestsActivity"
if kwargs.get("device_serial"):
capabilities["goog:chromeOptions"]["androidDeviceSerial"] = kwargs["device_serial"]
# Workaround: driver.quit() cannot quit SystemWebViewShell.
executor_kwargs["pause_after_test"] = False
# Workaround: driver.close() is not supported.
executor_kwargs["restart_after_test"] = True
executor_kwargs["close_after_done"] = False
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
# allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
return {"server_host": "127.0.0.1"}
class SystemWebViewShell(ChromeAndroidBrowserBase):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver",
remote_queue=None,
device_serial=None,
webdriver_args=None):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
super(SystemWebViewShell, self).__init__(logger,
webdriver_binary, remote_queue, device_serial, webdriver_args)
self.binary = binary
self.wptserver_ports = _wptserve_ports
|
py | b40f8ba52df9d30c6fd52a5edb2263f12c126e18 | # -*- coding: utf-8 -*-
__author__ = 'ElenaSidorova'
import re
from copy import deepcopy
from prereform2modern.token_class import Token
from prereform2modern.word_tokenize import WordTokenizer
class Tokenizer(object):
@classmethod
def tokenize(cls, text):
"""
Tokenizes the text
:param text: the raw text
:return: tokens (dict format)
"""
tokens = {}
parts = WordTokenizer.tokenize(text)
current = 0
id = 0
for i, part in enumerate(parts):
tokens[i] = Token(part)
current += len(part)
id += 1
tokens = cls.get_types(tokens)
return tokens
@classmethod
def refactor(cls, tokens):
"""
Joins the tokens with the brackets []
:param tokens: the tokens (dict)
:return: the refactored tokens
"""
ref = {}
j = 0
for i in range(len(tokens.keys())):
if tokens[i].word == u'[':
if tokens.has_key(i + 1):
if tokens[i + 1].word == u'?':
ref[j] = tokens[i]
i += 1
j += 1
else:
if ref.has_key(j-1):
if ref[j-1].type == u'word':
ref[j-1].word = ref[j-1].word + u'['
i += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
elif tokens[i].word == u'?':
if ref.has_key(j-1):
if ref[j-1].word == u'[':
ref[j-1].word = ref[j-1].word + u'?'
i += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
elif tokens[i].word == u']':
if ref.has_key(j-1):
if ref[j-1].word == u'[?':
ref[j-1].word = ref[j-1].word + u']'
i += 1
else:
if ref[j-1].type == u'word':
if u'[' in ref[j-1].word and u']' not in ref[j-1].word:
ref[j-1].word = ref[j-1].word + u']'
i += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
if ref.has_key(j-1):
if len(ref[j-1].word) > 1 and ref[j-1].word[-1] == u'[' and tokens[i].type == 'word':
ref[j-1].word = ref[j-1].word + tokens[i].word
i += 1
elif len(ref[j-1].word) > 1 and ref[j-1].word[-1] == u']' and tokens[i].type == 'word':
ref[j-1].word = ref[j-1].word + tokens[i].word
i += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
else:
ref[j] = tokens[i]
i += 1
j += 1
out = {}
k = 0
for j in range(len(ref.keys())):
k, out, added = cls.split_passed(j, k, ref, out, u'[', u']')
if not added:
k, out, added = cls.split_passed(j, k, ref, out, u']', u'[')
if not added:
out[k] = ref[j]
k += 1
return out
@classmethod
def split_passed(cls, j, k, ref, out, symbol1, symbol2):
added = 0
if symbol1 in ref[j].word:
if symbol2 in ref[j].word:
out[k] = ref[j]
k += 1
return k, out, 1
else:
tmp = deepcopy(ref[j])
arr = cls.get_tmp_arr(ref[j].word.split(symbol1), symbol1)
for el in arr:
tmp.word = el
out[k] = deepcopy(tmp)
k += 1
added = 1
return k, out, added
@classmethod
def get_tmp_arr(cls, tmpar, symbol):
arr = []
for i, el in enumerate(tmpar):
if i >= len(tmpar) - 1:
if el != u'':
arr.append(el)
continue
if el != u'':
arr.append(el)
arr.append(symbol)
return arr
@classmethod
def add_space_symbols(cls, current, new_lines, tokens, id, res, spaces):
"""
Adds the spaces by the list of positions
:param current: the number of the current letter
:param new_lines: the list of the new lines positions \n
:param tokens: the tokens
:param id: the number of the part of the text
:param res: the list of the \r positions
:param spaces: the list of the spaces positions
:return: the new current position, the tokens, the number of the part
"""
current, tokens, id = cls.add_new_lines(current, new_lines, tokens, id, res, spaces)
current, tokens, id = cls.add_r_lines(current, res, tokens, id, spaces, new_lines)
current, tokens, id = cls.add_spaces(current, spaces, tokens, id, new_lines, res)
return current, tokens, id
@classmethod
def add_new_lines(cls, current, new_lines, tokens, id, res, spaces):
"""
Adds the new lines by the list of positions
:param current: the number of the current letter
:param new_lines: the list of the new lines positions \n
:param tokens: the tokens
:param id: the number of the part of the text
:param res: the list of the \r positions
:param spaces: the list of the spaces positions
:return: the new current position, the tokens, the number of the part
"""
if current in new_lines:
tokens[id] = Token(u'\n')
id += 1
current += 1
current, tokens, id = cls.add_r_lines(current, res, tokens, id, spaces, new_lines)
current, tokens, id = cls.add_spaces(current, spaces, tokens, id, new_lines, res)
current, tokens, id = cls.add_new_lines(current, new_lines, tokens, id, res, spaces)
return current, tokens, id
@classmethod
def add_r_lines(cls, current, res, tokens, id, spaces, new_lines):
"""
Adds the \r by the list of positions
:param current: the number of the current letter
:param res: the list of the \r positions
:param tokens: the tokens
:param id: the number of the part of the text
:param spaces: the list of the spaces positions
:param new_lines: the list of the new lines positions \n
:return: the new current position, the tokens, the number of the part
"""
if current in res:
tokens[id] = Token(u'\r')
id += 1
current += 1
current, tokens, id = cls.add_spaces(current, spaces, tokens, id, new_lines, res)
current, tokens, id = cls.add_new_lines(current, new_lines, tokens, id, res, spaces)
current, tokens, id = cls.add_r_lines(current, res, tokens, id, spaces, new_lines)
return current, tokens, id
@classmethod
def add_spaces(cls, current, spaces, tokens, id, new_lines, res):
if current in spaces:
tokens[id] = Token(u' ')
id += 1
current += 1
current, tokens, id = cls.add_new_lines(current, new_lines, tokens, id, res, spaces)
current, tokens, id = cls.add_r_lines(current, res, tokens, id, spaces, new_lines)
current, tokens, id = cls.add_spaces(current, spaces, tokens, id, new_lines, res)
return current, tokens, id
@classmethod
def get_types(cls, tokens):
for t in tokens.keys():
w = 0
for letter in tokens[t].word:
if letter.isalpha():
tokens[t].type = 'word'
else:
w += 1
if w == len(tokens[t].word):
tokens[t].type = 'punct'
if tokens[t].type == None:
tokens[t].type = 'word'
return tokens
# t = u'\n \r\r\r \n \r \n\n\n \r \n yuy ghjg werwer-er по-моему?'
# t = u'мам[а]м'
# t = u'это всего [лишь скоб[к]и]'
# t = u'«скоб[к»и]» [скобки]'
# t = u'"Да не по горамъ-горамъ"'
# a = Tokenizer()
# b = a.tokenize(t)
# c = a.refactor(b)
# print c
# print b
# print u'\n'.join(b)
|
py | b40f8f1e61c369dd9cc1198d83daa3a4c7210259 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This scripts shows a compressed spectrum of an image.
# Image pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
fft = vtk.vtkImageFFT()
fft.SetInputConnection(reader.GetOutputPort())
fft.ReleaseDataFlagOff()
#fft DebugOn
magnitude = vtk.vtkImageMagnitude()
magnitude.SetInputConnection(fft.GetOutputPort())
magnitude.ReleaseDataFlagOff()
center = vtk.vtkImageFourierCenter()
center.SetInputConnection(magnitude.GetOutputPort())
compress = vtk.vtkImageLogarithmicScale()
compress.SetInputConnection(center.GetOutputPort())
compress.SetConstant(15)
viewer = vtk.vtkImageViewer2()
viewer.SetInputConnection(compress.GetOutputPort())
viewer.SetColorWindow(150)
viewer.SetColorLevel(170)
viewInt = vtk.vtkRenderWindowInteractor()
viewer.SetupInteractor(viewInt)
viewer.Render()
# --- end of script --
|
py | b40f908b6eec62282d2bdba4618a4f655b2655e2 | # -*- coding: utf-8 -*-
from .constituency import CRFConstituencyParser
from .dependency import (BiaffineDependencyParser, CRF2oDependencyParser,
CRFDependencyParser, CRFNPDependencyParser)
from .parser import Parser
from .semantic_dependency import BiaffineSemanticDependencyParser
from .transition_based_sdp import TransitionSemanticDependencyParser
__all__ = [
'BiaffineDependencyParser', 'CRFNPDependencyParser', 'CRFDependencyParser',
'CRF2oDependencyParser', 'CRFConstituencyParser',
'BiaffineSemanticDependencyParser', 'TransitionSemanticDependencyParser',
'Parser'
]
|
py | b40f90ad789af8f55939665f3bc3946c4f1f17ad | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2020 all rights reserved
#
# base class
from .CoFunctor import CoFunctor
# class declaration
class Accumulator(CoFunctor):
"""
A coroutine that accumulates data in a container
"""
# interface
def throw(self, errorTp, error=None, traceback=None):
"""
Handle exceptions
"""
# accumulators ignore errors
return
# meta-methods
def __init__(self, **kwds):
# initialize my cache
self.cache = []
# chain up
super().__init__(**kwds)
# all done
return
# my coroutine
def __call__(self):
"""
Store everything that comes in
"""
# for ever
while True:
# get the item
item = yield
# store it
self.cache.append(item)
# all done
return
# end of file
|
py | b40f90bdeadbee883bf0ee9d87c520cac6fc8fc8 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from rogerthat.models.common import NdbModel
from google.appengine.ext import ndb
# DOCS https://authenticatie.vlaanderen.be/docs/beveiligen-van-toepassingen/integratie-methoden/oidc/
# T&I https://authenticatie-ti.vlaanderen.be/op/.well-known/openid-configuration
# PROD https://authenticatie.vlaanderen.be/op/.well-known/openid-configuration
class ACMSettings(NdbModel):
client_id = ndb.TextProperty()
client_secret = ndb.TextProperty()
openid_config_uri = ndb.TextProperty()
auth_redirect_uri = ndb.TextProperty()
logout_redirect_uri = ndb.TextProperty()
@classmethod
def create_key(cls, app_id):
return ndb.Key(cls, app_id)
class ACMLoginState(NdbModel):
creation_time = ndb.DateTimeProperty(auto_now_add=True)
app_id = ndb.TextProperty()
scope = ndb.TextProperty()
code_challenge = ndb.TextProperty()
token = ndb.JsonProperty()
id_token = ndb.JsonProperty()
@property
def state(self):
return self.key.id()
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
@classmethod
def list_before_date(cls, date):
return cls.query(cls.creation_time < date)
class ACMLogoutState(NdbModel):
creation_time = ndb.DateTimeProperty(auto_now_add=True)
app_id = ndb.TextProperty()
@property
def state(self):
return self.key.id()
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
@classmethod
def list_before_date(cls, date):
return cls.query(cls.creation_time < date) |
py | b40f913f3bdc5150ba8df84296ee4591e1dd9539 | # ##################### The pruning tables cut the search tree during the search. ######################################
# ##################### The pruning values are stored modulo 3 which saves a lot of memory. ############################
import defs
import enums
import moves as mv
import symmetries as sy
import cubie as cb
from os import path
import time
import array as ar
flipslice_twist_depth3 = None # global variables
corners_ud_edges_depth3 = None
cornslice_depth = None
edgeslice_depth = None
# ####################### functions to extract or set values in the pruning tables #####################################
def get_flipslice_twist_depth3(ix):
"""get_fst_depth3(ix) is *exactly* the number of moves % 3 to solve phase 1 of a cube with index ix"""
y = flipslice_twist_depth3[ix // 16]
y >>= (ix % 16) * 2
return y & 3
def get_corners_ud_edges_depth3(ix):
"""corners_ud_edges_depth3(ix) is *at least* the number of moves % 3 to solve phase 2 of a cube with index ix"""
y = corners_ud_edges_depth3[ix // 16]
y >>= (ix % 16) * 2
return y & 3
def set_flipslice_twist_depth3(ix, value):
shift = (ix % 16) * 2
base = ix >> 4
flipslice_twist_depth3[base] &= ~(3 << shift) & 0xffffffff
flipslice_twist_depth3[base] |= value << shift
def set_corners_ud_edges_depth3(ix, value):
shift = (ix % 16) * 2
base = ix >> 4
corners_ud_edges_depth3[base] &= ~(3 << shift) & 0xffffffff
corners_ud_edges_depth3[base] |= value << shift
########################################################################################################################
def create_phase1_prun_table():
"""Creates/loads the flipslice_twist_depth3 pruning table for phase 1."""
global flipslice_twist_depth3
total = defs.N_FLIPSLICE_CLASS * defs.N_TWIST
fname = "phase1_prun"
if not path.isfile(fname):
print("creating " + fname + " table...")
print('This may take half an hour or even longer, depending on the hardware.')
flipslice_twist_depth3 = ar.array('L', [0xffffffff] * (total // 16 + 1))
# #################### create table with the symmetries of the flipslice classes ###############################
cc = cb.CubieCube()
fs_sym = ar.array('H', [0] * defs.N_FLIPSLICE_CLASS)
for i in range(defs.N_FLIPSLICE_CLASS):
if (i + 1) % 1000 == 0:
print('.', end='', flush=True)
rep = sy.flipslice_rep[i]
cc.set_slice(rep // defs.N_FLIP)
cc.set_flip(rep % defs.N_FLIP)
for s in range(defs.N_SYM_D4h):
ss = cb.CubieCube(sy.symCube[s].cp, sy.symCube[s].co, sy.symCube[s].ep,
sy.symCube[s].eo) # copy cube
ss.edge_multiply(cc) # s*cc
ss.edge_multiply(sy.symCube[sy.inv_idx[s]]) # s*cc*s^-1
if ss.get_slice() == rep // defs.N_FLIP and ss.get_flip() == rep % defs.N_FLIP:
fs_sym[i] |= 1 << s
print()
# ##################################################################################################################
fs_classidx = 0 # value for solved phase 1
twist = 0
set_flipslice_twist_depth3(defs.N_TWIST * fs_classidx + twist, 0)
done = 1
depth = 0
backsearch = False
print('depth:', depth, 'done: ' + str(done) + '/' + str(total))
while done != total:
depth3 = depth % 3
if depth == 9:
# backwards search is faster for depth >= 9
print('flipping to backwards search...')
backsearch = True
if depth < 8:
mult = 5
else:
mult = 1
idx = 0
for fs_classidx in range(defs.N_FLIPSLICE_CLASS):
if (fs_classidx + 1) % (200 * mult) == 0:
print('.', end='', flush=True)
if (fs_classidx + 1) % (16000 * mult) == 0:
print('')
twist = 0
while twist < defs.N_TWIST:
# ########## if table entries are not populated, this is very fast: ################################
if not backsearch and idx % 16 == 0 and flipslice_twist_depth3[idx // 16] == 0xffffffff \
and twist < defs.N_TWIST - 16:
twist += 16
idx += 16
continue
####################################################################################################
if backsearch:
match = (get_flipslice_twist_depth3(idx) == 3)
else:
match = (get_flipslice_twist_depth3(idx) == depth3)
if match:
flipslice = sy.flipslice_rep[fs_classidx]
flip = flipslice % 2048 # defs.N_FLIP = 2048
slice_ = flipslice >> 11 # // defs.N_FLIP
for m in enums.Move:
twist1 = mv.twist_move[18 * twist + m] # defs.N_MOVE = 18
flip1 = mv.flip_move[18 * flip + m]
slice1 = mv.slice_sorted_move[432 * slice_ + m] // 24 # defs.N_PERM_4 = 24, 18*24 = 432
flipslice1 = (slice1 << 11) + flip1
fs1_classidx = sy.flipslice_classidx[flipslice1]
fs1_sym = sy.flipslice_sym[flipslice1]
twist1 = sy.twist_conj[(twist1 << 4) + fs1_sym]
idx1 = 2187 * fs1_classidx + twist1 # defs.N_TWIST = 2187
if not backsearch:
if get_flipslice_twist_depth3(idx1) == 3: # entry not yet filled
set_flipslice_twist_depth3(idx1, (depth + 1) % 3)
done += 1
# ####symmetric position has eventually more than one representation ###############
sym = fs_sym[fs1_classidx]
if sym != 1:
for j in range(1, 16):
sym >>= 1
if sym % 2 == 1:
twist2 = sy.twist_conj[(twist1 << 4) + j]
# fs2_classidx = fs1_classidx due to symmetry
idx2 = 2187 * fs1_classidx + twist2
if get_flipslice_twist_depth3(idx2) == 3:
set_flipslice_twist_depth3(idx2, (depth + 1) % 3)
done += 1
####################################################################################
else: # backwards search
if get_flipslice_twist_depth3(idx1) == depth3:
set_flipslice_twist_depth3(idx, (depth + 1) % 3)
done += 1
break
twist += 1
idx += 1 # idx = defs.N_TWIST * fs_class + twist
depth += 1
print()
print('depth:', depth, 'done: ' + str(done) + '/' + str(total))
fh = open(fname, "wb")
flipslice_twist_depth3.tofile(fh)
else:
print("loading " + fname + " table...")
fh = open(fname, "rb")
flipslice_twist_depth3 = ar.array('L')
flipslice_twist_depth3.fromfile(fh, total // 16 + 1)
fh.close()
def create_phase2_prun_table():
"""Creates/loads the corners_ud_edges_depth3 pruning table for phase 2."""
total = defs.N_CORNERS_CLASS * defs.N_UD_EDGES
fname = "phase2_prun"
global corners_ud_edges_depth3
if not path.isfile(fname):
print("creating " + fname + " table...")
corners_ud_edges_depth3 = ar.array('L', [0xffffffff] * (total // 16))
# ##################### create table with the symmetries of the corners classes ################################
cc = cb.CubieCube()
c_sym = ar.array('H', [0] * defs.N_CORNERS_CLASS)
for i in range(defs.N_CORNERS_CLASS):
if (i + 1) % 1000 == 0:
print('.', end='', flush=True)
rep = sy.corner_rep[i]
cc.set_corners(rep)
for s in range(defs.N_SYM_D4h):
ss = cb.CubieCube(sy.symCube[s].cp, sy.symCube[s].co, sy.symCube[s].ep,
sy.symCube[s].eo) # copy cube
ss.corner_multiply(cc) # s*cc
ss.corner_multiply(sy.symCube[sy.inv_idx[s]]) # s*cc*s^-1
if ss.get_corners() == rep:
c_sym[i] |= 1 << s
print()
################################################################################################################
c_classidx = 0 # value for solved phase 2
ud_edge = 0
set_corners_ud_edges_depth3(defs.N_UD_EDGES * c_classidx + ud_edge, 0)
done = 1
depth = 0
print('depth:', depth, 'done: ' + str(done) + '/' + str(total))
while depth < 10: # we fill the table only do depth 9 + 1
depth3 = depth % 3
idx = 0
mult = 2
if depth > 9:
mult = 1
for c_classidx in range(defs.N_CORNERS_CLASS):
if (c_classidx + 1) % (20 * mult) == 0:
print('.', end='', flush=True)
if (c_classidx + 1) % (1600 * mult) == 0:
print('')
ud_edge = 0
while ud_edge < defs.N_UD_EDGES:
# ################ if table entries are not populated, this is very fast: ##########################
if idx % 16 == 0 and corners_ud_edges_depth3[idx // 16] == 0xffffffff \
and ud_edge < defs.N_UD_EDGES - 16:
ud_edge += 16
idx += 16
continue
####################################################################################################
if get_corners_ud_edges_depth3(idx) == depth3:
corner = sy.corner_rep[c_classidx]
# only iterate phase 2 moves
for m in (enums.Move.U1, enums.Move.U2, enums.Move.U3, enums.Move.R2, enums.Move.F2,
enums.Move.D1, enums.Move.D2, enums.Move.D3, enums.Move.L2, enums.Move.B2):
ud_edge1 = mv.ud_edges_move[18 * ud_edge + m]
corner1 = mv.corners_move[18 * corner + m]
c1_classidx = sy.corner_classidx[corner1]
c1_sym = sy.corner_sym[corner1]
ud_edge1 = sy.ud_edges_conj[(ud_edge1 << 4) + c1_sym]
idx1 = 40320 * c1_classidx + ud_edge1 # N_UD_EDGES = 40320
if get_corners_ud_edges_depth3(idx1) == 3: # entry not yet filled
set_corners_ud_edges_depth3(idx1, (depth + 1) % 3) # depth + 1 <= 10
done += 1
# ######symmetric position has eventually more than one representation #############
sym = c_sym[c1_classidx]
if sym != 1:
for j in range(1, 16):
sym >>= 1
if sym % 2 == 1:
ud_edge2 = sy.ud_edges_conj[(ud_edge1 << 4) + j]
# c1_classidx does not change
idx2 = 40320 * c1_classidx + ud_edge2
if get_corners_ud_edges_depth3(idx2) == 3:
set_corners_ud_edges_depth3(idx2, (depth + 1) % 3)
done += 1
####################################################################################
ud_edge += 1
idx += 1 # idx = defs.N_UD_EDGEPERM * corner_classidx + ud_edge
depth += 1
print()
print('depth:', depth, 'done: ' + str(done) + '/' + str(total))
print('remaining unfilled entries have depth >=11')
fh = open(fname, "wb")
corners_ud_edges_depth3.tofile(fh)
else:
print("loading " + fname + " table...")
fh = open(fname, "rb")
corners_ud_edges_depth3 = ar.array('L')
corners_ud_edges_depth3.fromfile(fh, total // 16)
fh.close()
def create_phase2_cornsliceprun_table():
"""Creates/loads the cornslice_depth pruning table for phase 2. With this table we do a fast precheck
at the beginning of phase 2."""
fname = "phase2_cornsliceprun"
global cornslice_depth
if not path.isfile(fname):
print("creating " + fname + " table...")
cornslice_depth = ar.array('b', [-1] * (defs.N_CORNERS * defs.N_PERM_4))
corners = 0 # values for solved phase 2
slice_ = 0
cornslice_depth[defs.N_PERM_4 * corners + slice_] = 0
done = 1
depth = 0
idx = 0
while done != defs.N_CORNERS * defs.N_PERM_4:
for corners in range(defs.N_CORNERS):
for slice_ in range(defs.N_PERM_4):
if cornslice_depth[defs.N_PERM_4 * corners + slice_] == depth:
for m in (enums.Move.U1, enums.Move.U2, enums.Move.U3, enums.Move.R2, enums.Move.F2,
enums.Move.D1, enums.Move.D2, enums.Move.D3, enums.Move.L2, enums.Move.B2):
corners1 = mv.corners_move[18 * corners + m]
slice_1 = mv.slice_sorted_move[18 * slice_ + m]
idx1 = defs.N_PERM_4 * corners1 + slice_1
if cornslice_depth[idx1] == -1: # entry not yet filled
cornslice_depth[idx1] = depth + 1
done += 1
if done % 20000 == 0:
print('.', end='', flush=True)
depth += 1
print()
fh = open(fname, "wb")
cornslice_depth.tofile(fh)
else:
print("loading " + fname + " table...")
fh = open(fname, "rb")
cornslice_depth = ar.array('b')
cornslice_depth.fromfile(fh, defs.N_CORNERS * defs.N_PERM_4)
fh.close()
# array distance computes the new distance from the old_distance i and the new_distance_mod3 j. ########################
# We need this array because the pruning tables only store the distances mod 3. ########################################
distance = ar.array('b', [0 for i in range(60)])
for i in range(20):
for j in range(3):
distance[3*i + j] = (i // 3) * 3 + j
if i % 3 == 2 and j == 0:
distance[3 * i + j] += 3
elif i % 3 == 0 and j == 2:
distance[3 * i + j] -= 3
create_phase1_prun_table()
create_phase2_prun_table()
create_phase2_cornsliceprun_table()
|
py | b40f918d0a78e702a3fc4766bc64125c8b42137a | """
django_excel
~~~~~~~~~~~~~~~~~~~
A django middleware that provides one application programming interface
to read and write data in different excel file formats
:copyright: (c) 2015 by Onni Software Ltd.
:license: New BSD License
"""
from django.core.files.uploadhandler import (
MemoryFileUploadHandler, TemporaryFileUploadHandler)
from django.core.files.uploadedfile import (
InMemoryUploadedFile, TemporaryUploadedFile)
from django.http import HttpResponse
import pyexcel as pe
import pyexcel_webio as webio
from ._compact import DJANGO_ONE_SIX
class ExcelMixin(webio.ExcelInput):
"""
Provide additional pyexcel-webio methods to Django's UploadedFiles
"""
def get_params(self, **keywords):
extension = self.name.split(".")[-1]
keywords['file_type'] = extension
keywords['file_content'] = self.file.read()
return keywords
def save_to_database(self, model=None, initializer=None, mapdict=None,
**keywords):
"""
Save data from a sheet to a nominated django model
"""
params = self.get_params(**keywords)
if 'name_columns_by_row' not in params:
params['name_columns_by_row'] = 0
if 'name_rows_by_column' not in params:
params['name_rows_by_column'] = -1
params['dest_model'] = model
params['dest_initializer'] = initializer
params['dest_mapdict'] = mapdict
pe.save_as(**params)
def save_book_to_database(self, models=None, initializers=None,
mapdicts=None, batch_size=None,
**keywords):
"""
Save data from a book to a nominated django models
"""
params = self.get_params(**keywords)
params['dest_models'] = models
params['dest_initializers'] = initializers
params['dest_mapdicts'] = mapdicts
params['dest_batch_size'] = batch_size
pe.save_book_as(**params)
class ExcelInMemoryUploadedFile(ExcelMixin, InMemoryUploadedFile):
"""
Mix-in pyexcel-webio methods in InMemoryUploadedFile
"""
pass
class TemporaryUploadedExcelFile(ExcelMixin, TemporaryUploadedFile):
"""
Mix-in pyexcel-webio methods in TemporaryUploadedFile
"""
pass
class ExcelMemoryFileUploadHandler(MemoryFileUploadHandler):
"""
Override MemoryFileUploadHandler to bring in ExcelInMemoryUploadedFile
"""
def file_complete(self, file_size):
if not self.activated:
return
self.file.seek(0)
keywords = dict(
file=self.file,
field_name=self.field_name,
name=self.file_name,
content_type=self.content_type,
size=file_size,
charset=self.charset
)
if not DJANGO_ONE_SIX:
keywords["content_type_extra"] = self.content_type_extra
return ExcelInMemoryUploadedFile(**keywords)
class TemporaryExcelFileUploadHandler(TemporaryFileUploadHandler):
"""
Override TemporaryFileUploadHandler to bring in TemporaryUploadedExcelFile
"""
def new_file(self, file_name, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(
file_name,
*args,
**kwargs)
custom_args = [
self.file_name,
self.content_type,
0,
self.charset]
if not DJANGO_ONE_SIX:
custom_args.append(self.content_type_extra)
self.file = TemporaryUploadedExcelFile(*custom_args)
def _make_response(content, content_type, status, file_name=None):
"""
Custom response function that is called by pyexcel-webio
"""
response = HttpResponse(content, content_type=content_type, status=status)
if file_name:
response["Content-Disposition"] = (
"attachment; filename=%s" % (file_name))
return response
webio.ExcelResponse = _make_response
from pyexcel_webio import ( # noqa
make_response,
make_response_from_array,
make_response_from_dict,
make_response_from_records,
make_response_from_book_dict,
make_response_from_query_sets
)
def make_response_from_a_table(model, file_type,
status=200, file_name=None, **keywords):
"""
Produce a single sheet Excel book of *file_type*
:param model: a Django model
:param file_type: same as :meth:`~django_excel.make_response`
:param status: same as :meth:`~django_excel.make_response`
"""
sheet = pe.get_sheet(model=model, **keywords)
return make_response(sheet, file_type, status,
file_name=file_name, **keywords)
def make_response_from_tables(models, file_type,
status=200, file_name=None, **keywords):
"""
Produce a multiple sheet Excel book of *file_type*. It becomes the same
as :meth:`~django_excel.make_response_from_a_table` if you pass *tables*
with an array that has a single table
:param models: a list of Django models
:param file_type: same as :meth:`~django_excel.make_response`
:param status: same as :meth:`~django_excel.make_response`
"""
book = pe.get_book(models=models, **keywords)
return make_response(book, file_type, status,
file_name=file_name, **keywords)
|
py | b40f926809ab7e92f08720296ea62e75799d9fe6 | # flake8: noqa: F401
import os
import sys
# import training function
from training import parse_args, train_fn
# import deployment functions
from explaining import model_fn, predict_fn, input_fn, output_fn
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
train_fn(args)
|
py | b40f933c838d2d8c1b371844cd14c27d5b3b791a | import asyncio
import os
from sanic.request import Request
import uuid
from datetime import datetime
from typing import Text, Iterator
import pytest
import rasa.utils.io
from rasa.core.agent import Agent
from rasa.core.channels.channel import CollectingOutputChannel, OutputChannel
from rasa.core.domain import Domain
from rasa.core.events import ReminderScheduled, UserUttered, ActionExecuted
from rasa.core.nlg import TemplatedNaturalLanguageGenerator
from rasa.core.policies.ensemble import PolicyEnsemble
from rasa.core.policies.memoization import Policy
from rasa.core.processor import MessageProcessor
from rasa.core.slots import Slot
from rasa.core.tracker_store import InMemoryTrackerStore, MongoTrackerStore
from rasa.core.trackers import DialogueStateTracker
DEFAULT_DOMAIN_PATH_WITH_SLOTS = "data/test_domains/default_with_slots.yml"
DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS = (
"data/test_domains/default_with_slots_and_no_actions.yml"
)
DEFAULT_DOMAIN_PATH_WITH_MAPPING = "data/test_domains/default_with_mapping.yml"
DEFAULT_STORIES_FILE = "data/test_stories/stories_defaultdomain.md"
DEFAULT_STACK_CONFIG = "data/test_config/stack_config.yml"
DEFAULT_NLU_DATA = "examples/moodbot/data/nlu.md"
INCORRECT_NLU_DATA = "data/test/markdown_single_sections/incorrect_nlu_format.md"
END_TO_END_STORY_FILE = "data/test_evaluations/end_to_end_story.md"
E2E_STORY_FILE_UNKNOWN_ENTITY = "data/test_evaluations/story_unknown_entity.md"
STORY_FILE_TRIPS_CIRCUIT_BREAKER = (
"data/test_evaluations/stories_trip_circuit_breaker.md"
)
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER = (
"data/test_evaluations/end_to_end_trips_circuit_breaker.md"
)
MOODBOT_MODEL_PATH = "examples/moodbot/models/"
RESTAURANTBOT_PATH = "examples/restaurantbot/"
DEFAULT_ENDPOINTS_FILE = "data/test_endpoints/example_endpoints.yml"
TEST_DIALOGUES = [
"data/test_dialogues/default.json",
"data/test_dialogues/formbot.json",
"data/test_dialogues/moodbot.json",
"data/test_dialogues/restaurantbot.json",
]
EXAMPLE_DOMAINS = [
DEFAULT_DOMAIN_PATH_WITH_SLOTS,
DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS,
DEFAULT_DOMAIN_PATH_WITH_MAPPING,
"examples/formbot/domain.yml",
"examples/moodbot/domain.yml",
"examples/restaurantbot/domain.yml",
]
class CustomSlot(Slot):
def as_feature(self):
return [0.5]
# noinspection PyAbstractClass,PyUnusedLocal,PyMissingConstructor
class ExamplePolicy(Policy):
def __init__(self, example_arg):
pass
class MockedMongoTrackerStore(MongoTrackerStore):
"""In-memory mocked version of `MongoTrackerStore`."""
def __init__(self, _domain: Domain):
from mongomock import MongoClient
self.db = MongoClient().rasa
self.collection = "conversations"
super(MongoTrackerStore, self).__init__(_domain, None)
# https://github.com/pytest-dev/pytest-asyncio/issues/68
# this event_loop is used by pytest-asyncio, and redefining it
# is currently the only way of changing the scope of this fixture
@pytest.yield_fixture(scope="session")
def event_loop(request: Request) -> Iterator[asyncio.AbstractEventLoop]:
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
def loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop = rasa.utils.io.enable_async_loop_debugging(loop)
yield loop
loop.close()
@pytest.fixture(scope="session")
def default_domain_path():
return DEFAULT_DOMAIN_PATH_WITH_SLOTS
@pytest.fixture(scope="session")
def default_stories_file():
return DEFAULT_STORIES_FILE
@pytest.fixture(scope="session")
def default_stack_config():
return DEFAULT_STACK_CONFIG
@pytest.fixture(scope="session")
def default_nlu_data():
return DEFAULT_NLU_DATA
@pytest.fixture(scope="session")
def default_domain():
return Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS)
@pytest.fixture
def default_channel() -> OutputChannel:
return CollectingOutputChannel()
@pytest.fixture
async def default_processor(default_agent: Agent) -> MessageProcessor:
tracker_store = InMemoryTrackerStore(default_agent.domain)
return MessageProcessor(
default_agent.interpreter,
default_agent.policy_ensemble,
default_agent.domain,
tracker_store,
TemplatedNaturalLanguageGenerator(default_agent.domain.templates),
)
@pytest.fixture
def tracker_with_six_scheduled_reminders(
default_processor: MessageProcessor,
) -> DialogueStateTracker:
reminders = [
ReminderScheduled("greet", datetime.now(), kill_on_user_message=False),
ReminderScheduled(
intent="greet",
entities=[{"entity": "name", "value": "Jane Doe"}],
trigger_date_time=datetime.now(),
kill_on_user_message=False,
),
ReminderScheduled(
intent="default",
entities=[{"entity": "name", "value": "Jane Doe"}],
trigger_date_time=datetime.now(),
kill_on_user_message=False,
),
ReminderScheduled(
intent="greet",
entities=[{"entity": "name", "value": "Bruce Wayne"}],
trigger_date_time=datetime.now(),
kill_on_user_message=False,
),
ReminderScheduled("default", datetime.now(), kill_on_user_message=False),
ReminderScheduled(
"default", datetime.now(), kill_on_user_message=False, name="special"
),
]
sender_id = uuid.uuid4().hex
tracker = default_processor.tracker_store.get_or_create_tracker(sender_id)
for reminder in reminders:
tracker.update(UserUttered("test"))
tracker.update(ActionExecuted("action_reminder_reminder"))
tracker.update(reminder)
default_processor.tracker_store.save(tracker)
return tracker
@pytest.fixture(scope="session")
def moodbot_domain(trained_moodbot_path):
domain_path = os.path.join("examples", "moodbot", "domain.yml")
return Domain.load(domain_path)
@pytest.fixture(scope="session")
def moodbot_metadata(unpacked_trained_moodbot_path):
return PolicyEnsemble.load_metadata(
os.path.join(unpacked_trained_moodbot_path, "core")
)
@pytest.fixture
def default_nlg(default_domain):
return TemplatedNaturalLanguageGenerator(default_domain.templates)
@pytest.fixture
def default_tracker(default_domain):
return DialogueStateTracker("my-sender", default_domain.slots)
@pytest.fixture(scope="session")
def project() -> Text:
import tempfile
from rasa.cli.scaffold import create_initial_project
directory = tempfile.mkdtemp()
create_initial_project(directory)
return directory
@pytest.fixture
async def form_bot_agent(trained_async, tmpdir_factory) -> Agent:
zipped_model = await trained_async(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
training_files=["examples/formbot/data/stories.md"],
)
return Agent.load_local_model(zipped_model)
|
py | b40f939a980920154bbad2faf874e8b28b4179b0 | import logging
import os
from django.core.files.base import ContentFile
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedTicket, Event, InvoiceAddress, Order,
OrderPosition,
)
from pretix.base.services.tasks import ProfiledTask
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.base.signals import allow_ticket_download, register_ticket_outputs
from pretix.celery_app import app
from pretix.helpers.database import rolledback_transaction
logger = logging.getLogger(__name__)
def generate_orderposition(order_position: int, provider: str):
order_position = OrderPosition.objects.select_related('order', 'order__event').get(id=order_position)
with language(order_position.order.locale):
responses = register_ticket_outputs.send(order_position.order.event)
for receiver, response in responses:
prov = response(order_position.order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate(order_position)
path, ext = os.path.splitext(filename)
for ct in CachedTicket.objects.filter(order_position=order_position, provider=provider):
ct.delete()
ct = CachedTicket.objects.create(order_position=order_position, provider=provider,
extension=ext, type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
def generate_order(order: int, provider: str):
order = Order.objects.select_related('event').get(id=order)
with language(order.locale):
responses = register_ticket_outputs.send(order.event)
for receiver, response in responses:
prov = response(order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate_order(order)
path, ext = os.path.splitext(filename)
for ct in CachedCombinedTicket.objects.filter(order=order, provider=provider):
ct.delete()
ct = CachedCombinedTicket.objects.create(order=order, provider=provider, extension=ext,
type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
@app.task(base=ProfiledTask)
def generate(model: str, pk: int, provider: str):
if model == 'order':
return generate_order(pk, provider)
elif model == 'orderposition':
return generate_orderposition(pk, provider)
class DummyRollbackException(Exception):
pass
def preview(event: int, provider: str):
event = Event.objects.get(id=event)
with rolledback_transaction(), language(event.settings.locale):
item = event.items.create(name=_("Sample product"), default_price=42.23,
description=_("Sample product description"))
item2 = event.items.create(name=_("Sample workshop"), default_price=23.40)
from pretix.base.models import Order
order = event.orders.create(status=Order.STATUS_PENDING, datetime=now(),
email='[email protected]',
locale=event.settings.locale,
expires=now(), code="PREVIEW1234", total=119)
scheme = PERSON_NAME_SCHEMES[event.settings.name_scheme]
sample = {k: str(v) for k, v in scheme['sample'].items()}
p = order.positions.create(item=item, attendee_name_parts=sample, price=item.default_price)
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p)
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p)
InvoiceAddress.objects.create(order=order, name_parts=sample, company=_("Sample company"))
responses = register_ticket_outputs.send(event)
for receiver, response in responses:
prov = response(event)
if prov.identifier == provider:
return prov.generate(p)
def get_tickets_for_order(order):
can_download = all([r for rr, r in allow_ticket_download.send(order.event, order=order)])
if not can_download:
return []
if not order.ticket_download_available:
return []
providers = [
response(order.event)
for receiver, response
in register_ticket_outputs.send(order.event)
]
tickets = []
for p in providers:
if not p.is_enabled:
continue
if p.multi_download_enabled:
try:
if len(list(order.positions_with_tickets)) == 0:
continue
ct = CachedCombinedTicket.objects.filter(
order=order, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate.apply(args=('order', order.pk, p.identifier))
ct = CachedCombinedTicket.objects.get(pk=retval.get())
tickets.append((
"{}-{}-{}{}".format(
order.event.slug.upper(), order.code, ct.provider, ct.extension,
),
ct
))
except:
logger.exception('Failed to generate ticket.')
else:
for pos in order.positions_with_tickets:
try:
ct = CachedTicket.objects.filter(
order_position=pos, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate.apply(args=('orderposition', pos.pk, p.identifier))
ct = CachedTicket.objects.get(pk=retval.get())
tickets.append((
"{}-{}-{}-{}{}".format(
order.event.slug.upper(), order.code, pos.positionid, ct.provider, ct.extension,
),
ct
))
except:
logger.exception('Failed to generate ticket.')
return tickets
|
py | b40f9418eeac1c3bddb516fc25ee6faeaa0a6cf0 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os
import re
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name='balena-sdk',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=get_version('balena'),
description='Balena SDK for Python',
# The project's main homepage.
url='https://github.com/balena-io/balena-sdk-python',
# Author details
author='Balena',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 3.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
# What does your project relate to?
keywords='balena balena.io resin resin.io api sdk',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['PyJWT>=2.0.0', 'requests>=2.19.1', 'pyotp>=2.2.5', 'pyOpenSSL>=18.0.0', 'Twisted>=18.7.0', 'service-identity', 'semver'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'description': ['DESCRIPTION.rst'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
py | b40f94a987200e667f4dfc87fce4d8371109345e | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1S3Type(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'bucket': 'str',
'key': 'str'
}
attribute_map = {
'bucket': 'bucket',
'key': 'key'
}
def __init__(self, bucket=None, key=None, local_vars_configuration=None): # noqa: E501
"""V1S3Type - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._bucket = None
self._key = None
self.discriminator = None
if bucket is not None:
self.bucket = bucket
if key is not None:
self.key = key
@property
def bucket(self):
"""Gets the bucket of this V1S3Type. # noqa: E501
:return: The bucket of this V1S3Type. # noqa: E501
:rtype: str
"""
return self._bucket
@bucket.setter
def bucket(self, bucket):
"""Sets the bucket of this V1S3Type.
:param bucket: The bucket of this V1S3Type. # noqa: E501
:type: str
"""
self._bucket = bucket
@property
def key(self):
"""Gets the key of this V1S3Type. # noqa: E501
:return: The key of this V1S3Type. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1S3Type.
:param key: The key of this V1S3Type. # noqa: E501
:type: str
"""
self._key = key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1S3Type):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1S3Type):
return True
return self.to_dict() != other.to_dict()
|
py | b40f950eda51b5dca1f0c449b647d82676b60540 | """
Test ThreadSanitizer when multiple different issues are found.
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import json
class TsanMultipleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=["linux"],
bugnumber="non-core functionality, need to reenable and fix later (DES 2014.11.07)")
@expectedFailureNetBSD
@skipIfFreeBSD # llvm.org/pr21136 runtimes not yet available by default
@skipIfRemote
@skipUnlessThreadSanitizer
def test(self):
self.build()
self.tsan_tests()
def tsan_tests(self):
exe = self.getBuildArtifact("a.out")
self.expect(
"file " + exe,
patterns=["Current executable set to .*a.out"])
self.runCmd("env TSAN_OPTIONS=abort_on_error=0")
self.runCmd("run")
stop_reason = self.dbg.GetSelectedTarget().process.GetSelectedThread().GetStopReason()
if stop_reason == lldb.eStopReasonExec:
# On OS X 10.10 and older, we need to re-exec to enable
# interceptors.
self.runCmd("continue")
report_count = 0
while self.dbg.GetSelectedTarget().process.GetSelectedThread(
).GetStopReason() == lldb.eStopReasonInstrumentation:
report_count += 1
stop_description = self.dbg.GetSelectedTarget(
).process.GetSelectedThread().GetStopDescription(100)
self.assertTrue(
(stop_description == "Data race detected") or
(stop_description == "Use of deallocated memory detected") or
(stop_description == "Thread leak detected") or
(stop_description == "Use of an uninitialized or destroyed mutex detected") or
(stop_description == "Unlock of an unlocked mutex (or by a wrong thread) detected")
)
self.expect(
"thread info -s",
"The extended stop info should contain the TSan provided fields",
substrs=[
"instrumentation_class",
"description",
"mops"])
output_lines = self.res.GetOutput().split('\n')
json_line = '\n'.join(output_lines[2:])
data = json.loads(json_line)
self.assertEqual(data["instrumentation_class"], "ThreadSanitizer")
backtraces = self.dbg.GetSelectedTarget().process.GetSelectedThread(
).GetStopReasonExtendedBacktraces(lldb.eInstrumentationRuntimeTypeThreadSanitizer)
self.assertTrue(backtraces.GetSize() >= 1)
self.runCmd("continue")
self.assertEqual(
self.dbg.GetSelectedTarget().process.GetState(),
lldb.eStateExited,
PROCESS_EXITED)
|
py | b40f95b2af76e777d8b813c1533c74c38630fa53 | # Copyright 2017 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .realigner.aligner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from deepvariant.core.genomics import cigar_pb2
from deepvariant import test_utils
from deepvariant.core import cigar as _cigar
from deepvariant.core import ranges
from deepvariant.protos import realigner_pb2
from deepvariant.realigner import aligner
class PairwiseAlignerTest(parameterized.TestCase):
def test_sw_start_offsets(self):
"""Test Aligner._sw_start_offsets()."""
k = 3
read = aligner.Read(
test_utils.make_read(
'AaGAt', start=0, cigar=[(5, 'M')], quals=[64] * 5, name='read_1'))
read.set_read_kmers(k)
target = aligner.Target('TgATCAGATAAG')
target.build_target_index(k)
self.assertEqual([-1, 4, 9],
aligner._sw_start_offsets(target.kmer_index, read.kmers))
def _align_seqs(self,
seq1,
seq2,
match=1,
mismatch=2,
gap_open=4,
gap_extend=1):
pw_aligner = aligner.make_pairwise_aligner(
seq1,
match=match,
mismatch=mismatch,
gap_open_penalty=gap_open,
gap_extend_penalty=gap_extend)
return pw_aligner.align(seq2)
def assertAlignmentEqual(self, alignment, expected_dict):
for k, v in expected_dict.iteritems():
self.assertEqual(
getattr(alignment, k), v,
'Expected field {} of alignment [{}] to be {} but got {}'.format(
k, alignment, v, getattr(alignment, k)))
@parameterized.parameters(
# Try out two exact matches of different lengths.
dict(
seq1='ttAtt',
seq2='ttAtt',
params=dict(match=1, mismatch=2),
expected_score=5,
expected_cigar='5M'),
dict(
seq1='tttAttt',
seq2='tttAttt',
params=dict(match=1, mismatch=2),
expected_score=7,
expected_cigar='7M'),
# One mismatch in the middle.
dict(
seq1='tttAttt',
seq2='tttCttt',
params=dict(match=1, mismatch=2),
expected_score=6 - 2,
expected_cigar='7M'),
# Check that match and mismatch scores are respected.
dict(
seq1='tttAttt',
seq2='tttCttt',
params=dict(match=2, mismatch=3),
expected_score=6 * 2 - 3,
expected_cigar='7M'),
dict(
seq1='tttAttt',
seq2='tttCttt',
params=dict(match=4, mismatch=2),
expected_score=6 * 4 - 2,
expected_cigar='7M'),
# Now for some insertion/deletions.
dict(
seq1='ttAtt',
seq2='tttt',
params=dict(match=4, mismatch=2, gap_open=4, gap_extend=2),
expected_score=4 * 4 - 4,
expected_cigar='2M1I2M'),
# Same as above sequence, but reversed to the cigar is different.
dict(
seq1='tttt',
seq2='ttAtt',
params=dict(match=4, mismatch=2, gap_open=4, gap_extend=2),
expected_score=4 * 4 - 4,
expected_cigar='2M1D2M'),
# Gap extension is respected.
dict(
seq1='ttAAtt',
seq2='tttt',
params=dict(match=4, mismatch=2, gap_open=2, gap_extend=1),
expected_score=4 * 4 - 2 * 1 - 1 * 1,
expected_cigar='2M2I2M'),
dict(
seq1='ttAAAtt',
seq2='tttt',
params=dict(match=4, mismatch=2, gap_open=2, gap_extend=1),
expected_score=4 * 4 - 2 * 1 - 2 * 1,
expected_cigar='2M3I2M'),
)
def test_pairwise_alignment(self, seq1, seq2, params, expected_cigar,
expected_score):
alignment = self._align_seqs(seq1, seq2, **params)
self.assertAlignmentEqual(alignment,
dict(
query_begin=0,
query_end=len(seq1) - 1,
target_begin=0,
cigar=expected_cigar,
target_end_optimal=len(seq2) - 1,
optimal_alignment_score=expected_score))
@parameterized.parameters(
dict(
query='ttACT',
target='ttACTtt',
expected=dict(
query_begin=0,
query_end=4,
target_begin=0,
target_end_optimal=4,
cigar='5M')),
dict(
query='ACTtt',
target='ttACTtt',
expected=dict(
query_begin=0,
query_end=4,
target_begin=2,
target_end_optimal=6,
cigar='5M')),
dict(
query='ACT',
target='ttACTtt',
expected=dict(
query_begin=0,
query_end=2,
target_begin=2,
target_end_optimal=4,
cigar='3M')),
dict(
query='ACTtt',
target='ACT',
expected=dict(
query_begin=0,
query_end=2,
target_begin=0,
target_end_optimal=2,
cigar='3M')),
dict(
query='ttACT',
target='ACT',
expected=dict(
query_begin=2,
query_end=4,
target_begin=0,
target_end_optimal=2,
cigar='3M')),
)
def test_start_ends(self, query, target, expected):
alignment = self._align_seqs(query, target)
self.assertAlignmentEqual(alignment, expected)
class AlignerTest(parameterized.TestCase):
def make_test_aligner(self, ref_seq=None, region=None):
config = realigner_pb2.RealignerOptions.AlignerOptions(
match=1, mismatch=1, gap_open=2, gap_extend=1, k=3, error_rate=.02)
ref_seq = ref_seq or 'AAAAAAA'
region = region or ranges.make_range('ref', 10, 10 + len(ref_seq))
return aligner.Aligner(config, region, ref_seq)
@parameterized.parameters(
('AATA', {
'AAT': [0],
'ATA': [1]
}),
('ATcATCA', {
'ATC': [0, 3],
'TCA': [1, 4],
'CAT': [2]
}),
('AAtA', {
'AAT': [0],
'ATA': [1]
}),
('AT', None),
)
def test_build_target_index(self, seq, expected_kmers_or_none):
"""Test Aligner.set_targets()."""
target = aligner.Target(seq)
result = target.build_target_index(k=3)
self.assertEqual(result, bool(expected_kmers_or_none))
if expected_kmers_or_none is None:
self.assertEqual(len(target.kmer_index), 0)
else:
self.assertEqual(expected_kmers_or_none, target.kmer_index)
@parameterized.parameters(
('ATCATCA', 'ATcATCA', [], []),
('ATCAAATTTCA', 'ATCAATTTCA', [3], [cigar_pb2.CigarUnit.DELETE]),
('ATCAAATTTCA', 'ATCTAAATTTCA', [3], [cigar_pb2.CigarUnit.INSERT]),
# redacted
# global alignment.
('ATCATCA', 'ATCCA', None, None),
('ATCAAATTTCA', 'ATAAATTTCTTA', None, None))
def test_set_targets(self, ref_seq, target_seq, expected_pos,
expected_cigar_op):
"""Test Aligner.align_targets()."""
align_reads = self.make_test_aligner(ref_seq)
align_reads.set_targets([target_seq])
if expected_pos is not None:
self.assertEqual(expected_pos,
[var.pos for var in align_reads.targets[0].gaps])
self.assertEqual(expected_cigar_op,
[var.cigar_op for var in align_reads.targets[0].gaps])
else:
self.assertEqual([], align_reads.targets)
@parameterized.parameters((2), (3))
def test_ssw_alignment(self, target_offset):
"""Test Aligner._ssw_alignment()."""
align_reads = self.make_test_aligner()
read_seq = 'AaGAt'
target_seq = 'TgAAGATCAGA'
pw_aligner = align_reads._make_pairwise_aligner(read_seq)
start_offset, alignment = align_reads._ssw_alignment(
pw_aligner, read_seq, target_seq, target_offset)
self.assertEqual(2, alignment.target_begin + start_offset)
self.assertEqual(5, alignment.optimal_alignment_score)
self.assertEqual('5M', alignment.cigar)
@parameterized.parameters(
('AaGAt', 'AATA', None, None, 'Read has no common k-mer with target.'),
('AaGAt', 'TTAAGAtA', 2, '5M', 'Read has a perfect match.'),
('AAAAAAATAAA', 'AAGAAAAAAAA', 0, '2M1D5M1I3M',
'Read has one insertion and one deletion.'),
('TTCAAAGTC', 'AGTCAAAGTCC', 2, '8M',
'Read starts with a mismatch which should be clipped.'))
def test_realign_read(self, read_seq, target_seq, expected_align_start,
expected_cigar, comment):
"""Test Aligner.test_align_read_to_target()."""
read = aligner.Read(
test_utils.make_read(
read_seq,
chrom='ref',
start=0,
cigar=[(len(read_seq), 'M')],
quals=[64] * len(read_seq),
name='read'))
align_reads = self.make_test_aligner(ref_seq=target_seq)
align_reads.set_targets([target_seq])
align_reads.realign_read(read)
if expected_align_start:
self.assertEqual(align_reads.targets[0], read.target, comment)
self.assertEqual(expected_align_start,
read.target_offset + read.alignment.target_begin,
comment)
self.assertEqual(expected_cigar, read.alignment.cigar, comment)
else:
self.assertIsNone(read.target, comment)
self.assertIsNone(read.target_offset, comment)
self.assertIsNone(read.alignment, comment)
@parameterized.parameters(
('10M', 0, [], 'Cigar with no indels.'),
('10M', 2, [], 'Cigar with no indels with starting soft-clipped bases.'),
('5M2D5M', 2, ['SingleAlnOp(7, DELETE)', 'SingleAlnOp(7, DELETE)'],
'Cigar has one deletion and starting soft-clipped bases.'),
('5M2D4M1I3M', 0, [
'SingleAlnOp(5, DELETE)', 'SingleAlnOp(5, DELETE)',
'SingleAlnOp(9, INSERT)'
], 'Read sequence has one deletion and one insertion.'),
)
def test_cigar_str_to_gaps(self, cigar, query_offset, expected_variants_repr,
comment):
"""Test Aligner._cigar_str_to_gaps()."""
align_reads = self.make_test_aligner()
self.assertEqual(expected_variants_repr, [
str(var) for var in align_reads._cigar_str_to_gaps(cigar, query_offset)
], comment)
@parameterized.parameters(
('chr1', 10, 100, 'chr1', 20, 20, [(20, 'M')], ''),
('chr1', 10, 100, 'chr2', 20, 20, [(20, 'M')],
'readalignment validation: read reference name is inconsistent with '
'reference information.'),
('chr1', 10, 100, 'chr1', 90, 20, [(20, 'M')],
'readalignment validation: read end position is out of reference '
'genomic range.'),
('chr1', 10, 100, 'chr1', 75, 20, [(10, 'M'), (10, 'D'), (10, 'M')],
'readalignment validation: read end position is out of reference '
'genomic range.'),
('chr1', 10, 100, 'chr1', 70, 20, [(10, 'M'), (10, 'D'), (10, 'M')], ''),
('chr1', 10, 100, 'chr1', 80, 20, [(5, 'M'), (10, 'I'), (5, 'M')], ''),
('chr1', 10, 100, 'chr1', 20, 10, [(5, 'M'), (10, 'I'), (5, 'M')],
'readalignment validation: cigar is inconsistent with the read length.'),
('chr1', 10, 100, 'chr1', 20, 20, [(10, 'H'), (5, 'M'), (10, 'I'),
(5, 'S')], ''),
('chr1', 10, 100, 'chr1', 20, 10, [(10, 'H'), (5, 'M'), (10, 'D'),
(5, 'S')], ''),
)
def test_sanity_check_readalignment(self, ref_name, ref_start, ref_end,
read_chrom, read_start, read_len,
read_cigar, exception_msg):
"""Test Aligner.sanity_check_readalignment()."""
region = ranges.make_range(ref_name, ref_start, ref_end)
ref_seq = 'A' * (ref_end - ref_start)
align_reads = self.make_test_aligner(ref_seq, region)
read = test_utils.make_read(
'A' * read_len,
chrom=read_chrom,
start=read_start,
cigar=read_cigar,
quals=[64] * read_len,
name='read')
if exception_msg:
with self.assertRaisesRegexp(ValueError, exception_msg):
align_reads.sanity_check_readalignment(read)
else:
align_reads.sanity_check_readalignment(read)
@parameterized.parameters(
('TGCATGG', 23, [(7, 'M')], 'Read is a prefect match to the reference.'),
('TGCAAGG', 23, [(7, 'M')],
'Read has one mismatch w.r.t. the reference.'), ('TAAGCAGGG', 23, [
(1, 'M'), (2, 'I'), (3, 'M'), (1, 'D'), (3, 'M')
], 'Read is a perfect match to the 2nd target.'),
('CAGGGGG', 25, [(2, 'M'), (1, 'D'),
(5, 'M')], 'Read is a perfect match to the 2nd target.'),
('TAAGCAGGAGG', 23, [(1, 'M'), (2, 'I'), (3, 'M'), (1, 'D'), (5, 'M')],
'Read has one mismatch w.r.t. the 2nd target.'), ('AATAAAGCAGGG', 21, [
(3, 'M'), (3, 'I'), (3, 'M'), (1, 'D'), (3, 'M')
], 'Read has one insertion w.r.t. the 2nd target.'), ('AATAGCAGGG', 21, [
(3, 'M'), (1, 'I'), (3, 'M'), (1, 'D'), (3, 'M')
], 'Read has one deletion w.r.t. the 2nd target.'),
('AATAAAGCGGGGGA', 21, [(3, 'M'), (3, 'I'), (2, 'M'), (2, 'D'), (6, 'M')],
'Read has one insertion and one deletion w.r.t. the 2nd target.'),
('GCAAGGGGGA', 24, [(10, 'M')],
'Read insertion overlaps with the deletion in the 2nd target.'),
('TTTAAGCAGGGGGC', 23, [(2, 'S'), (1, 'M'), (2, 'I'), (3, 'M'), (1, 'D'),
(5, 'M'), (1, 'S')],
'Read has clipped bases w.r.t. the 2nd target.'), ('AAGCAGGGGGC', 24, [
(2, 'S'), (3, 'M'), (1, 'D'), (5, 'M'), (1, 'S')
], 'Read starts in an insertion within the 2nd target.'),
('AAAGCAGGGGGC', 24, [(3, 'S'), (3, 'M'), (1, 'D'), (5, 'M'), (1, 'S')],
'Read starts in an insertion within the 2nd target, followed by an '
'insertion in read to target alignment.'), ('GGGGG', 28, [
(5, 'M')
], 'Read starts after a deletion within the 2nd target.'))
def test_align_reads_simple(self, read_seq, expected_align_pos,
expected_cigar, comment):
"""Test Aligner.align_reads(). Simple tests.
Targets consist of
- original reference sequence.
- a sequence with 'AA' insertion at position 14 and
- 'T' deletion at position 19.
Args:
read_seq: str, read sequence.
expected_align_pos: int, expected aligned position
expected_cigar: [(int, str)], expected cigar information.
comment: str, test comment.
"""
ref_seq = 'AAAAAAAAAAAAATGCATGGGGGATTTTTTTTTTT'
region = ranges.make_range('ref', 10, 10 + len(ref_seq))
align_reads = self.make_test_aligner(ref_seq, region)
# redacted
# implemented. For local alignment, it ensures that there are enough exact
# matches between the reference and target for end-to-end alignment.
targets = [ref_seq, 'AAAAAAAAAAAAATAAGCAGGGGGATTTTTTTTTTT']
read = test_utils.make_read(
read_seq,
chrom='ref',
start=0,
cigar=[(len(read_seq), 'M')],
quals=[64] * len(read_seq),
name='read')
aligned_reads = align_reads.align_reads(targets, [read])
self.assertEqual(expected_align_pos,
aligned_reads[0].alignment.position.position, comment)
self.assertEqual(
_cigar.to_cigar_units(expected_cigar),
list(aligned_reads[0].alignment.cigar), comment)
read = test_utils.make_read(
read_seq,
chrom='ref',
start=0,
cigar=[(2, 'H'), (len(read_seq), 'M'), (1, 'H')],
quals=[64] * len(read_seq),
name='read')
aligned_reads = align_reads.align_reads(targets, [read])
expected_cigar_w_hard_clip = [(2, 'H')] + expected_cigar + [(1, 'H')]
self.assertEqual(
_cigar.to_cigar_units(expected_cigar_w_hard_clip),
list(aligned_reads[0].alignment.cigar), comment)
def test_align_read_with_whole_clippd_seq(self):
"""Test Aligner.align_reads() when the whole read sequence is clipped."""
ref_seq = ('TTTGTTTGTTTGTGTTTGTGTTTTTGTTTGTTTGTGTTTGTGTTTGTTTGTGGTTTGTGT'
'GTTTGTGTTTGTGTTGGTTTG')
ref_len = len(ref_seq)
align_reads = self.make_test_aligner(ref_seq)
target_ins = 'AAAAAGTGGGGGGGAAGTGGGGAAAAA'
targets = [
ref_seq,
ref_seq[:int(ref_len / 2)] + target_ins + ref_seq[int(ref_len / 2):]
]
read_seq = 'CCC' + target_ins + 'CCC'
read = test_utils.make_read(
read_seq,
chrom='ref',
start=10,
cigar=[(len(read_seq), 'M')],
quals=[64] * len(read_seq),
name='read')
aligned_reads = align_reads.align_reads(targets, [read])
self.assertEqual(read, aligned_reads[0],
'Read should have its original alignment.')
def test_no_bad_soft_clipping(self):
self.skipTest('Enable when b/63143285 global alignment is fixed')
common = 'CTA'
read_seq = common + 'GA'
ref_seq = 'N' + common + 'CA' + 'N'
alt_seq = 'A' + ref_seq
targets = [ref_seq, alt_seq]
region = ranges.make_range('ref', 0, len(ref_seq))
align_reads = self.make_test_aligner(ref_seq, region)
read = test_utils.make_read(
read_seq,
chrom='ref',
start=0,
cigar=[(len(read_seq), 'M')],
quals=[35] * len(read_seq),
name='read')
realigned = align_reads.align_reads(targets, [read])[0]
# redacted
# 5M as we'd expect for this read:
# read_seq: -CTAGA-
# ref_seq : NCGTCAN
# But the current algorithm produces a local alignment of the read against
# the haplotypes, and the G <=> C mismatch causes the local aligner to
# simply skip those bases instead of incurring the mismatch penalty for it,
# resulting in a 3M2S read (GA clipped off) instead of the better 5M result.
self.assertEqual([_cigar.to_cigar_unit(len(read_seq), 'M')],
list(realigned.alignment.cigar))
class LibSSWAlignmentFacadeTest(parameterized.TestCase):
"""Tests for special logic in the wrapper class for libssw alignments."""
@parameterized.parameters(('5M', '5M'), ('2I2D2M', '2I2D2M'),
('2X1=2X', '5M'), ('2S5M2S', '5M'))
def test_cigar_simplification(self, cigar, expected_simplified_cigar):
self.assertEqual(
expected_simplified_cigar,
aligner.LibSSWAlignmentFacade._simplify_cigar_string(cigar))
if __name__ == '__main__':
absltest.main()
|
py | b40f96de7b1257a129ab47816286182d51651fbb | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014-2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
from pyannote.parser.base import Parser
import pyannote.core.json
class JSONParser(Parser):
"""PyAnnote JSON file format"""
@classmethod
def file_extensions(cls):
return ['json']
def read(self, path, **kwargs):
with open(path, 'r') as fp:
self._loaded = pyannote.core.json.load(fp)
return self
def empty(self, uri=None, modality=None, **kwargs):
raise NotImplementedError()
def __call__(self, **kwargs):
return self._loaded
|
py | b40f96dfb8c891aea67ccb996d6c2c1959a647fa | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import _testcapi
import asyncio
import unittest
class EventLoopMethodsTestCase(unittest.TestCase):
def test_call_soon_calls(self):
get_debug_called = False
args_info = []
capture_arg = False
class Loop:
def get_debug_impl(self):
nonlocal get_debug_called
get_debug_called = True
return False
def call_soon_impl(self, args, kwargs):
if capture_arg:
self.captured = (args, kwargs)
args_info.append((id(args), id(kwargs)))
Loop.get_debug = _testcapi.make_get_debug_descriptor(Loop)
Loop.call_soon = _testcapi.make_call_soon_descriptor(Loop)
loop = Loop()
loop.__class__
fut = asyncio.Future(loop=loop) # calls get_debug
self.assertTrue(get_debug_called)
fut.set_result(10)
fut.add_done_callback(lambda *args: 0) # calls call_soon
fut.add_done_callback(lambda *args: 0) # calls call_soon
# verify that args were represented by the same tuple/dict in both cases
# as it was not leaked
self.assertEqual(args_info[0], args_info[1])
capture_arg = True
args_info = []
fut.add_done_callback(lambda *args: 0) # calls call_soon
fut.add_done_callback(lambda *args: 0) # calls call_soon
# verify that args were represented by different tuple/dict in both cases
# as it were captured on the first call
self.assertNotEqual(args_info[0][0], args_info[1][0])
self.assertNotEqual(args_info[0][1], args_info[1][1])
|
py | b40f97e953fdb6e4596f9f28826da432ef0fcbd1 | def filter_words(st):
st = " ".join(st.split())
return st.capitalize()
|
py | b40f99e889cafa5c1d98367f3fc739a291a63e6c | import pytest
from pytcher import Choice, Float, Integer, NoMatch, Regex
@pytest.mark.parametrize(
"test_input, matcher, expected",
[
(5, Integer(), 5),
('15', Integer(), 15),
('9', Integer(10, 20), NoMatch),
('21', Integer(10, 20), NoMatch),
('16', Integer(10, 20), 16),
('15.5', Integer(10, 20), NoMatch),
('', Integer(10, 20), NoMatch)
]
)
def test_matcher_integer(test_input, matcher, expected):
assert expected == matcher.match(test_input)
@pytest.mark.parametrize(
"test_input, matcher, expected",
[
(5, Float(), 5),
('15', Float(), 15),
('9', Float(10, 20), NoMatch),
('21', Float(10, 20), NoMatch),
('16', Float(10, 20), 16),
('15.5', Float(10, 20), 15.5),
('', Float(10, 20), NoMatch)
]
)
def test_matcher_float(test_input, matcher, expected):
assert expected == matcher.match(test_input)
@pytest.mark.parametrize(
"test_input, expected",
[
('books', 'books'),
('novels', 'novels'),
('', NoMatch),
('books', 'books'),
('Books', NoMatch),
('Novels', NoMatch),
]
)
def test_matcher_choice_case_sensitive(test_input, expected):
choice = Choice('books', 'novels', ignore_case=False)
assert expected == choice.match(test_input)
@pytest.mark.parametrize(
"test_input, expected",
[
('books', 'books'),
('novels', 'novels'),
('', NoMatch),
('books', 'books'),
('Books', 'books'),
('Novels', 'novels'),
]
)
def test_matcher_choice_non_case_sensitive(test_input, expected):
choice = Choice('books', 'novels', ignore_case=True)
assert 'books' == choice.match('books')
assert 'novels' == choice.match('novels')
assert choice.match('') is NoMatch
assert 'books' == choice.match('Books')
assert 'novels' == choice.match('Novels')
def test_matcher_simple_regex():
regex = Regex('^a.*e$')
assert 'apple' == regex.match('apple')
assert regex.match('apples') is NoMatch
def test_matcher_one_group_regex():
regex = Regex('^fruit-(?P<apple>a.*e)$')
assert 'apple' == regex.match('fruit-apple')
assert regex.match('fruit-apples') is NoMatch
def test_matcher_two_groups_regex():
regex = Regex('^fruit-(?P<apple>a.*e)-(?P<orange>o.*e)$')
assert ['apple', 'orange'] == regex.match('fruit-apple-orange')
assert regex.match('fruit-apple-strawberry') is NoMatch
|
py | b40f99ec853c151b2404b734c46845545bddcb8b | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='django-juno',
version='0.0.9',
author='Alessandra Carneiro',
author_email='[email protected]',
description='Integração da Juno com Django',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/alessandrak/django-juno',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'Framework :: Django :: 3.1',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.7',
)
|
py | b40f99f08852343b236942d3f67b1607aac6b11a | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure batch files run as actions. Regression test for previously missing
trailing quote on command line. cmd typically will implicitly insert a missing
quote, but if the command ends in a quote, it will not insert another, so the
command can sometimes become unterminated.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'batch-file-action'
test.run_gyp('batch-file-action.gyp', chdir=CHDIR)
test.build('batch-file-action.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
py | b40f9ae896932146f75d594229600693fea4003f | """Auto-generated file, do not edit by hand. GR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GR = PhoneMetadata(id='GR', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d\\d(?:\\d{3})?', possible_length=(3, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='116\\d{3}', example_number='116000', possible_length=(6,)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:00|12|66|99)', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:00|1(?:2|6(?:000|1(?:11|23)))|66|99)', example_number='112', possible_length=(3, 6)),
short_data=True)
|
py | b40f9c9a29a1a6d722decf2ef0e42d0d06cd32be | from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = imgs[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
# TODO: support test-time augmentation
assert NotImplementedError
|
py | b40f9d773b2812714d1afe28e41336ece11684e0 | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.10.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_kratos_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_kratos_client.exceptions import ApiAttributeError
class SubmitSelfServiceSettingsFlowWithProfileMethodBody(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'method': (str,), # noqa: E501
'traits': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'csrf_token': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'method': 'method', # noqa: E501
'traits': 'traits', # noqa: E501
'csrf_token': 'csrf_token', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, method, traits, *args, **kwargs): # noqa: E501
"""SubmitSelfServiceSettingsFlowWithProfileMethodBody - a model defined in OpenAPI
Args:
method (str): Method Should be set to profile when trying to update a profile.
traits ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Traits contains all of the identity's traits.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
csrf_token (str): The Anti-CSRF Token This token is only required when performing browser flows.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.method = method
self.traits = traits
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, method, traits, *args, **kwargs): # noqa: E501
"""SubmitSelfServiceSettingsFlowWithProfileMethodBody - a model defined in OpenAPI
Args:
method (str): Method Should be set to profile when trying to update a profile.
traits ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Traits contains all of the identity's traits.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
csrf_token (str): The Anti-CSRF Token This token is only required when performing browser flows.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.method = method
self.traits = traits
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | b40f9e9975bc58e68094f07e5c241068dd049de4 | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Run test:
>>> pytest -q python/test/test_txrequest.py
"""
from pycylon.net.txrequest import TxRequest
import numpy as np
def test_txrequest():
target = 10
length = 8
header = np.array([1, 2, 3, 4], dtype=np.int32)
header_length = header.shape[0]
buf = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float64)
tx = TxRequest(target, buf, length, header, header_length)
assert tx.target == target
assert type(tx.buf) == type(buf) and tx.buf.shape == buf.shape and tx.buf.dtype == buf.dtype
assert type(tx.header) == type(
header) and tx.header.shape == header.shape and tx.header.dtype == header.dtype
assert tx.headerLength == header_length
assert tx.length == length
# print("To String")
# print(tx.to_string(b'double', 32))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.