ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a5315ef770be1515a7fc66bd65d1e17f8fd7b35 | '''
Provides various utility functions.
This file is part of RTSLib.
Copyright (c) 2011-2013 by Datera, Inc
Copyright (c) 2011-2014 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os
import re
import six
import socket
import stat
import subprocess
import uuid
from contextlib import contextmanager
import pyudev
_CONTEXT = pyudev.Context()
class RTSLibError(Exception):
'''
Generic rtslib error.
'''
pass
class RTSLibALUANotSupported(RTSLibError):
'''
Backend does not support ALUA.
'''
pass
class RTSLibBrokenLink(RTSLibError):
'''
Broken link in configfs, i.e. missing LUN storage object.
'''
pass
class RTSLibNotInCFS(RTSLibError):
'''
The underlying configfs object does not exist. Happens when
calling methods of an object that is instantiated but have
been deleted from congifs, or when trying to lookup an
object that does not exist.
'''
pass
def fwrite(path, string):
'''
This function writes a string to a file, and takes care of
opening it and closing it. If the file does not exist, it
will be created.
>>> from rtslib.utils import *
>>> fwrite("/tmp/test", "hello")
>>> fread("/tmp/test")
'hello'
@param path: The file to write to.
@type path: string
@param string: The string to write to the file.
@type string: string
'''
with open(path, 'w') as file_fd:
file_fd.write(str(string))
def fread(path):
'''
This function reads the contents of a file.
It takes care of opening and closing it.
>>> from rtslib.utils import *
>>> fwrite("/tmp/test", "hello")
>>> fread("/tmp/test")
'hello'
>>> fread("/tmp/notexistingfile") # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IOError: [Errno 2] No such file or directory: '/tmp/notexistingfile'
@param path: The path to the file to read from.
@type path: string
@return: A string containing the file's contents.
'''
with open(path, 'r') as file_fd:
return file_fd.read().strip()
def is_dev_in_use(path):
'''
This function will check if the device or file referenced by path is
already mounted or used as a storage object backend. It works by trying to
open the path with O_EXCL flag, which will fail if someone else already
did. Note that the file is closed before the function returns, so this
does not guaranteed the device will still be available after the check.
@param path: path to the file of device to check
@type path: string
@return: A boolean, True is we cannot get exclusive descriptor on the path,
False if we can.
'''
path = os.path.realpath(str(path))
try:
file_fd = os.open(path, os.O_EXCL|os.O_NDELAY)
except OSError:
return True
else:
os.close(file_fd)
return False
def _get_size_for_dev(device):
'''
@param device: the device
@type device: pyudev.Device
@return: the size in logical blocks, 0 if none found
@rtype: int
'''
attributes = device.attributes
try:
sect_size = attributes.asint('size')
except (KeyError, UnicodeDecodeError, ValueError):
return 0
try:
logical_block_size = attributes.asint('queue/logical_block_size')
except (KeyError, UnicodeDecodeError, ValueError):
return 0
return (sect_size * 512) // logical_block_size
def get_size_for_blk_dev(path):
'''
@param path: The path to a block device
@type path: string
@return: The size in logical blocks of the device
@raises: DeviceNotFoundError if corresponding device not found
@raises: EnvironmentError, ValueError in some situations
'''
device = Device.from_device_file(_CONTEXT, os.path.realpath(str(path)))
return _get_size_for_dev(device)
get_block_size = get_size_for_blk_dev
def get_size_for_disk_name(name):
'''
@param name: a kernel disk name, as found in /proc/partitions
@type name: string
@return: The size in logical blocks of a disk-type block device.
@raises: DeviceNotFoundError
'''
# size is in 512-byte sectors, we want to return number of logical blocks
def get_size(name):
"""
:param str name: name of block device
:raises DeviceNotFoundError: if device not found
"""
device = pyudev.Device.from_name(_CONTEXT, 'block', name)
return _get_size_for_dev(device)
# Disk names can include '/' (e.g. 'cciss/c0d0') but these are changed to
# '!' when listed in /sys/block.
# in pyudev 0.19 it should no longer be necessary to swap '/'s in name
name = name.replace("/", "!")
try:
return get_size(name)
except pyudev.DeviceNotFoundError:
# Maybe it's a partition?
m = re.search(r'^([a-z0-9_\-!]+?)(\d+)$', name)
if m:
# If disk name ends with a digit, Linux sticks a 'p' between it and
# the partition number in the blockdev name.
disk = m.groups()[0]
if disk[-1] == 'p' and disk[-2].isdigit():
disk = disk[:-1]
return get_size(m.group())
else:
raise
def get_blockdev_type(path):
'''
This function returns a block device's type.
Example: 0 is TYPE_DISK
If no match is found, None is returned.
>>> from rtslib.utils import *
>>> get_blockdev_type("/dev/sda")
0
>>> get_blockdev_type("/dev/sr0")
5
>>> get_blockdev_type("/dev/scd0")
5
>>> get_blockdev_type("/dev/nodevicehere") is None
True
@param path: path to the block device
@type path: string
@return: An int for the block device type, or None if not a block device.
'''
try:
device = pyudev.Device.from_device_file(_CONTEXT, path)
except (pyudev.DeviceNotFoundError, EnvironmentError, ValueError):
return None
if device.subsystem != u'block':
return None
attributes = device.attributes
disk_type = 0
try:
disk_type = attributes.asint('device/type')
except (KeyError, UnicodeDecodeError, ValueError):
pass
return disk_type
get_block_type = get_blockdev_type
def convert_scsi_path_to_hctl(path):
'''
This function returns the SCSI ID in H:C:T:L form for the block
device being mapped to the udev path specified.
If no match is found, None is returned.
>>> import rtslib.utils as utils
>>> utils.convert_scsi_path_to_hctl('/dev/scd0')
(2, 0, 0, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sr0')
(2, 0, 0, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sda')
(3, 0, 0, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sda1')
>>> utils.convert_scsi_path_to_hctl('/dev/sdb')
(3, 0, 1, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sdc')
(3, 0, 2, 0)
@param path: The udev path to the SCSI block device.
@type path: string
@return: An (host, controller, target, lun) tuple of integer
values representing the SCSI ID of the device, or raise RTSLibError.
'''
try:
path = os.path.realpath(path)
device = pyudev.Device.from_device_file(_CONTEXT, path)
parent = device.find_parent(subsystem='scsi')
return [int(data) for data in parent.sys_name.split(':')]
except:
raise RTSLibError("Could not convert scsi path to hctl")
def convert_scsi_hctl_to_path(host, controller, target, lun):
'''
This function returns a udev path pointing to the block device being
mapped to the SCSI device that has the provided H:C:T:L.
>>> import rtslib.utils as utils
>>> utils.convert_scsi_hctl_to_path(0,0,0,0)
''
>>> utils.convert_scsi_hctl_to_path(2,0,0,0) # doctest: +ELLIPSIS
'/dev/s...0'
>>> utils.convert_scsi_hctl_to_path(3,0,2,0)
'/dev/sdc'
@param host: The SCSI host id.
@type host: int
@param controller: The SCSI controller id.
@type controller: int
@param target: The SCSI target id.
@type target: int
@param lun: The SCSI Logical Unit Number.
@type lun: int
@return: A string for the canonical path to the device, or raise RTSLibError.
'''
try:
host = int(host)
controller = int(controller)
target = int(target)
lun = int(lun)
except ValueError:
raise RTSLibError(
"The host, controller, target and lun parameter must be integers")
hctl = [host, controller, target, lun]
try:
scsi_device = pyudev.Device.from_name(_CONTEXT, 'scsi', ':'.join(hctl))
except pyudev.DeviceNotFoundError:
raise RTSLibError("Could not find path for SCSI hctl")
devices = _CONTEXT.list_devices(
subsystem='block',
parent=scsi_device
)
path = next((dev.device_node for dev in devices), '')
if path == None:
raise RTSLibError("Could not find path for SCSI hctl")
return path
def generate_wwn(wwn_type):
'''
Generates a random WWN of the specified type:
- unit_serial: T10 WWN Unit Serial.
- iqn: iSCSI IQN
- naa: SAS NAA address
@param wwn_type: The WWN address type.
@type wwn_type: str
@returns: A string containing the WWN.
'''
wwn_type = wwn_type.lower()
if wwn_type == 'free':
return str(uuid.uuid4())
if wwn_type == 'unit_serial':
return str(uuid.uuid4())
elif wwn_type == 'iqn':
localname = socket.gethostname().split(".")[0]
localarch = os.uname()[4].replace("_", "")
prefix = "iqn.2003-01.org.linux-iscsi.%s.%s" % (localname, localarch)
prefix = prefix.strip().lower()
serial = "sn.%s" % str(uuid.uuid4())[24:]
return "%s:%s" % (prefix, serial)
elif wwn_type == 'naa':
# see http://standards.ieee.org/develop/regauth/tut/fibre.pdf
# 5 = IEEE registered
# 001405 = OpenIB OUI (they let us use it I guess?)
# rest = random
return "naa.5001405" + uuid.uuid4().hex[-9:]
elif wwn_type == 'eui':
return "eui.001405" + uuid.uuid4().hex[-10:]
else:
raise ValueError("Unknown WWN type: %s" % wwn_type)
def colonize(str):
'''
helper function to add colons every 2 chars
'''
return ":".join(str[i:i+2] for i in range(0, len(str), 2))
def _cleanse_wwn(wwn_type, wwn):
'''
Some wwns may have alternate text representations. Adjust to our
preferred representation.
'''
wwn = str(wwn.strip()).lower()
if wwn_type in ('naa', 'eui', 'ib'):
if wwn.startswith("0x"):
wwn = wwn[2:]
wwn = wwn.replace("-", "")
wwn = wwn.replace(":", "")
if not (wwn.startswith("naa.") or wwn.startswith("eui.") or \
wwn.startswith("ib.")):
wwn = wwn_type + "." + wwn
return wwn
def normalize_wwn(wwn_types, wwn):
'''
Take a WWN as given by the user and convert it to a standard text
representation.
Returns (normalized_wwn, wwn_type), or exception if invalid wwn.
'''
wwn_test = {
'free': lambda wwn: True,
'iqn': lambda wwn: \
re.match("iqn\.[0-9]{4}-[0-1][0-9]\..*\..*", wwn) \
and not re.search(' ', wwn) \
and not re.search('_', wwn),
'naa': lambda wwn: re.match("naa\.[125][0-9a-fA-F]{15}$", wwn),
'eui': lambda wwn: re.match("eui\.[0-9a-f]{16}$", wwn),
'ib': lambda wwn: re.match("ib\.[0-9a-f]{32}$", wwn),
'unit_serial': lambda wwn: \
re.match("[0-9A-Fa-f]{8}(-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$", wwn),
}
for wwn_type in wwn_types:
clean_wwn = _cleanse_wwn(wwn_type, wwn)
found_type = wwn_test[wwn_type](clean_wwn)
if found_type:
break
else:
raise RTSLibError("WWN not valid as: %s" % ", ".join(wwn_types))
return (clean_wwn, wwn_type)
def list_loaded_kernel_modules():
'''
List all currently loaded kernel modules
'''
return [line.split(" ")[0] for line in
fread("/proc/modules").split('\n') if line]
def modprobe(module):
'''
Load the specified kernel module if needed.
@param module: The name of the kernel module to be loaded.
@type module: str
'''
if module in list_loaded_kernel_modules():
return
try:
import kmod
except ImportError:
process = subprocess.Popen(("modprobe", module),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
if process.returncode != 0:
raise RTSLibError(stderrdata)
return
try:
kmod.Kmod().modprobe(module)
except kmod.error.KmodError:
raise RTSLibError("Could not load module: %s" % module)
def mount_configfs():
if not os.path.ismount("/sys/kernel/config"):
cmdline = "mount -t configfs none /sys/kernel/config"
process = subprocess.Popen(cmdline.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
if process.returncode != 0 and not os.path.ismount(
"/sys/kernel/config"):
raise RTSLibError("Cannot mount configfs")
def dict_remove(d, items):
for item in items:
if item in d:
del d[item]
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
#
# These two functions are meant to be used with functools.partial and
# properties.
#
# 'ignore=True' will silently return None if the attribute is not present.
# This is good for attributes only present in some kernel versions.
#
# All curried arguments should be keyword args.
#
# These should only be used for attributes that follow the convention of
# "NULL" having a special sentinel value, such as auth attributes, and
# that return a string.
#
def _get_auth_attr(self, attribute, ignore=False):
self._check_self()
path = "%s/%s" % (self.path, attribute)
try:
value = fread(path)
except:
if not ignore:
raise
return None
if value == "NULL":
return ''
else:
return value
# Auth params take the string "NULL" to unset the attribute
def _set_auth_attr(self, value, attribute, ignore=False):
self._check_self()
path = "%s/%s" % (self.path, attribute)
value = value.strip()
if value == "NULL":
raise RTSLibError("'NULL' is not a permitted value")
if len(value) > 255:
raise RTSLibError("Value longer than maximum length of 255")
if value == '':
value = "NULL"
try:
fwrite(path, "%s" % value)
except:
if not ignore:
raise
def set_attributes(obj, attr_dict, err_func):
for name, value in six.iteritems(attr_dict):
try:
obj.set_attribute(name, value)
except RTSLibError as e:
err_func(str(e))
def set_parameters(obj, param_dict, err_func):
for name, value in six.iteritems(param_dict):
try:
obj.set_parameter(name, value)
except RTSLibError as e:
err_func(str(e))
def _test():
'''Run the doctests'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
py | 1a5316a6ed459124ad7a182a8dfb17b6c1f60156 | '''OpenGL extension APPLE.transform_hint
This module customises the behaviour of the
OpenGL.raw.GL.APPLE.transform_hint to provide a more
Python-friendly API
Overview (from the spec)
The transform_hint extension provides a new target,
TRANSFORM_HINT_APPLE, for the Hint procedure. When the
transform hint is set to FASTEST the GL may choose to
implement certain state dependent algebraic simplifications
in the geometry transformation that affect the sub-pixel
precision of the transformed vertex coordinates.
For example, if two polygons are rendered with identical object
coordinates, different GL state settings, and the transform
hint set to FASTEST, there is no gaurantee that the resulting
window coordinates of the two polygons will be precisely
identical. Therefore, precise tests of the window coordinates,
such as a depth test setting of EQUAL, should not be used.
If the transform hint is set to NICEST or DONT_CARE, two polygons
with identical object coordinates will always be transformed
to identical window coordinates.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/APPLE/transform_hint.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.APPLE.transform_hint import *
from OpenGL.raw.GL.APPLE.transform_hint import _EXTENSION_NAME
def glInitTransformHintAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
py | 1a531733fcc20a288add19f472de56d7fc3a6719 | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility functions."""
import importlib
import os
import shutil
from os import path
from pathlib import Path
from typing import Any, Generator, List, Mapping, Union
JSON = Union[str, int, float, bool, None, Mapping[str, 'JSON'], List['JSON']] # type: ignore
def make_dirs(dir_pathes: Union[str, List[str]]) -> None:
"""Create one or more directories."""
if isinstance(dir_pathes, str):
dir = Path(dir_pathes)
Path.mkdir(dir, parents=True, exist_ok=True)
elif isinstance(dir_pathes, list):
for dir_path in dir_pathes:
dir = Path(dir_path)
Path.mkdir(dir, parents=True, exist_ok=True)
def move_file_or_dir(src_path: str, dest_path: str) -> None:
"""Move a file or a directory."""
shutil.move(src_path, dest_path)
def get_files(src_dir_path: str, excepts: str = '') -> List[str]:
"""Get a list of file pathes."""
return [fp for fp in get_files_generator(src_dir_path, excepts)]
def get_files_generator(src_dir_path: str, excepts: str = '') -> Generator[str, None, None]:
"""Get file pathes."""
for dirpath, dirnames, filenames in os.walk(src_dir_path):
if excepts not in dirpath:
for file_name in filenames:
yield path.join(dirpath, file_name)
def dynamic_class_load(path: str) -> Any:
"""Load a class defined by the argument.
Parameters
----------
path : str
The full pathname that represents the class to be loaeded.
(Actually I don't know how to call it in Python.)
Returns
-------
cls : Any
The class to be loaded.
"""
pathes = path.split('.')
class_name = pathes.pop()
module_name = '.'.join(pathes)
module = importlib.import_module(module_name)
return getattr(module, class_name)
class classproperty(object):
"""Decorator for class property."""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
|
py | 1a531b422e38f77c12015c644a6527176e395afc | from . import SentenceEvaluator, SimilarityFunction
import logging
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
from typing import List
from ..readers import InputExample
logger = logging.getLogger(__name__)
class TripletEvaluator(SentenceEvaluator):
"""
Evaluate a model based on a triplet: (sentence, positive_example, negative_example).
Checks if distance(sentence, positive_example) < distance(sentence, negative_example).
"""
def __init__(
self,
anchors: List[str],
positives: List[str],
negatives: List[str],
main_distance_function: SimilarityFunction = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
):
"""
:param anchors: Sentences to check similarity to. (e.g. a query)
:param positives: List of positive sentences
:param negatives: List of negative sentences
:param main_distance_function: One of 0 (Cosine), 1 (Euclidean) or 2 (Manhattan). Defaults to None, returning all 3.
:param name: Name for the output
:param batch_size: Batch size used to compute embeddings
:param show_progress_bar: If true, prints a progress bar
:param write_csv: Write results to a CSV file
"""
self.anchors = anchors
self.positives = positives
self.negatives = negatives
self.name = name
assert len(self.anchors) == len(self.positives)
assert len(self.anchors) == len(self.negatives)
self.main_distance_function = main_distance_function
self.batch_size = batch_size
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file: str = "triplet_evaluation" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy_cosinus", "accuracy_manhatten", "accuracy_euclidean"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
anchors = []
positives = []
negatives = []
for example in examples:
anchors.append(example.texts[0])
positives.append(example.texts[1])
negatives.append(example.texts[2])
return cls(anchors, positives, negatives, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("TripletEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
num_triplets = 0
num_correct_cos_triplets, num_correct_manhatten_triplets, num_correct_euclidean_triplets = 0, 0, 0
embeddings_anchors = model.encode(
self.anchors, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
embeddings_positives = model.encode(
self.positives, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
embeddings_negatives = model.encode(
self.negatives, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
# Cosine distance
pos_cos_distance = paired_cosine_distances(embeddings_anchors, embeddings_positives)
neg_cos_distances = paired_cosine_distances(embeddings_anchors, embeddings_negatives)
# Manhattan
pos_manhattan_distance = paired_manhattan_distances(embeddings_anchors, embeddings_positives)
neg_manhattan_distances = paired_manhattan_distances(embeddings_anchors, embeddings_negatives)
# Euclidean
pos_euclidean_distance = paired_euclidean_distances(embeddings_anchors, embeddings_positives)
neg_euclidean_distances = paired_euclidean_distances(embeddings_anchors, embeddings_negatives)
for idx in range(len(pos_cos_distance)):
num_triplets += 1
if pos_cos_distance[idx] < neg_cos_distances[idx]:
num_correct_cos_triplets += 1
if pos_manhattan_distance[idx] < neg_manhattan_distances[idx]:
num_correct_manhatten_triplets += 1
if pos_euclidean_distance[idx] < neg_euclidean_distances[idx]:
num_correct_euclidean_triplets += 1
accuracy_cos = num_correct_cos_triplets / num_triplets
accuracy_manhattan = num_correct_manhatten_triplets / num_triplets
accuracy_euclidean = num_correct_euclidean_triplets / num_triplets
logger.info("Accuracy Cosine Distance: \t{:.2f}".format(accuracy_cos * 100))
logger.info("Accuracy Manhattan Distance:\t{:.2f}".format(accuracy_manhattan * 100))
logger.info("Accuracy Euclidean Distance:\t{:.2f}\n".format(accuracy_euclidean * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhattan, accuracy_euclidean])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhattan, accuracy_euclidean])
if self.main_distance_function == SimilarityFunction.COSINE:
return accuracy_cos
if self.main_distance_function == SimilarityFunction.MANHATTAN:
return accuracy_manhattan
if self.main_distance_function == SimilarityFunction.EUCLIDEAN:
return accuracy_euclidean
return max(accuracy_cos, accuracy_manhattan, accuracy_euclidean)
|
py | 1a531b75e46c9e8b14c17ddbd6deda5f1a863a08 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python38_cloudsql_mysql_pooling]
# [START gae_python3_cloudsql_mysql_pooling]
import os
from flask import Flask
import sqlalchemy
db_user = os.environ.get('CLOUD_SQL_USERNAME')
db_password = os.environ.get('CLOUD_SQL_PASSWORD')
db_name = os.environ.get('CLOUD_SQL_DATABASE_NAME')
db_connection_name = os.environ.get('CLOUD_SQL_CONNECTION_NAME')
# When deployed to App Engine, the `GAE_ENV` environment variable will be
# set to `standard`
if os.environ.get('GAE_ENV') == 'standard':
# If deployed, use the local socket interface for accessing Cloud SQL
unix_socket = '/cloudsql/{}'.format(db_connection_name)
engine_url = 'mysql+pymysql://{}:{}@/{}?unix_socket={}'.format(
db_user, db_password, db_name, unix_socket)
else:
# If running locally, use the TCP connections instead
# Set up Cloud SQL Proxy (cloud.google.com/sql/docs/mysql/sql-proxy)
# so that your application can use 127.0.0.1:3306 to connect to your
# Cloud SQL instance
host = '127.0.0.1'
engine_url = 'mysql+pymysql://{}:{}@{}/{}'.format(
db_user, db_password, host, db_name)
# The Engine object returned by create_engine() has a QueuePool integrated
# See https://docs.sqlalchemy.org/en/latest/core/pooling.html for more
# information
engine = sqlalchemy.create_engine(engine_url, pool_size=3)
app = Flask(__name__)
@app.route('/')
def main():
cnx = engine.connect()
cursor = cnx.execute('SELECT NOW() as now;')
result = cursor.fetchall()
current_time = result[0][0]
# If the connection comes from a pool, close() will send the connection
# back to the pool instead of closing it
cnx.close()
return str(current_time)
# [END gae_python3_cloudsql_mysql_pooling]
# [END gae_python38_cloudsql_mysql_pooling]
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
py | 1a531c65bbf1e1a9e3d52a4537ffd25a23316ed9 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import math
import time
import tempfile
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Linear, Pool2D
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from predictor_utils import PredictorTools
SEED = 2020
IMAGENET1000 = 1281167
base_lr = 0.001
momentum_rate = 0.9
l2_decay = 1e-4
# NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout.
batch_size = 2
epoch_num = 1
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
program_translator = ProgramTranslator()
if fluid.is_compiled_with_cuda():
fluid.set_flags({'FLAGS_cudnn_deterministic': True})
def optimizer_setting(parameter_list=None):
optimizer = fluid.optimizer.Momentum(
learning_rate=base_lr,
momentum=momentum_rate,
regularization=fluid.regularizer.L2Decay(l2_decay),
parameter_list=parameter_list)
return optimizer
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
self._batch_norm = BatchNorm(num_filters, act=act)
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self, num_channels, num_filters, stride, shortcut=True):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu')
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu')
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 4,
filter_size=1,
act=None)
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=stride)
self.shortcut = shortcut
self._num_channels_out = num_filters * 4
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2)
layer_helper = fluid.layer_helper.LayerHelper(
self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNet(fluid.dygraph.Layer):
def __init__(self, layers=50, class_dim=102):
super(ResNet, self).__init__()
self.layers = layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_channels = [64, 256, 512, 1024]
num_filters = [64, 128, 256, 512]
self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
self.pool2d_max = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.bottleneck_block_list = []
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels[block]
if i == 0 else num_filters[block] * 4,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut))
self.bottleneck_block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
stdv = 1.0 / math.sqrt(2048 * 1.0)
self.out = Linear(
self.pool2d_avg_output,
class_dim,
act='softmax',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
def forward(self, inputs):
y = self.conv(inputs)
y = self.pool2d_max(y)
for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y)
y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])
pred = self.out(y)
return pred
def reader_decorator(reader):
def __reader__():
for item in reader():
img = np.array(item[0]).astype('float32').reshape(3, 224, 224)
label = np.array(item[1]).astype('int64').reshape(1)
yield img, label
return __reader__
class ResNetHelper:
def __init__(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_save_dir = os.path.join(self.temp_dir.name, 'inference')
self.model_save_prefix = os.path.join(self.model_save_dir, 'resnet')
self.model_filename = 'resnet' + INFER_MODEL_SUFFIX
self.params_filename = 'resnet' + INFER_PARAMS_SUFFIX
self.dy_state_dict_save_path = os.path.join(self.temp_dir.name,
'resnet.dygraph')
def __del__(self):
self.temp_dir.cleanup()
def train(self, to_static, build_strategy=None):
"""
Tests model decorated by `dygraph_to_static_output` in static mode. For users, the model is defined in dygraph mode and trained in static mode.
"""
with fluid.dygraph.guard(place):
np.random.seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),
batch_size=batch_size,
drop_last=True)
data_loader = fluid.io.DataLoader.from_generator(
capacity=5, iterable=True)
data_loader.set_sample_list_generator(train_reader)
resnet = ResNet()
if to_static:
resnet = paddle.jit.to_static(
resnet, build_strategy=build_strategy)
optimizer = optimizer_setting(parameter_list=resnet.parameters())
for epoch in range(epoch_num):
total_loss = 0.0
total_acc1 = 0.0
total_acc5 = 0.0
total_sample = 0
for batch_id, data in enumerate(data_loader()):
start_time = time.time()
img, label = data
pred = resnet(img)
loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(x=loss)
acc_top1 = fluid.layers.accuracy(
input=pred, label=label, k=1)
acc_top5 = fluid.layers.accuracy(
input=pred, label=label, k=5)
avg_loss.backward()
optimizer.minimize(avg_loss)
resnet.clear_gradients()
total_loss += avg_loss
total_acc1 += acc_top1
total_acc5 += acc_top5
total_sample += 1
end_time = time.time()
if batch_id % 2 == 0:
print( "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" % \
( epoch, batch_id, total_loss.numpy() / total_sample, \
total_acc1.numpy() / total_sample, total_acc5.numpy() / total_sample, end_time-start_time))
if batch_id == 10:
if to_static:
fluid.dygraph.jit.save(resnet,
self.model_save_prefix)
else:
fluid.dygraph.save_dygraph(
resnet.state_dict(),
self.dy_state_dict_save_path)
# avoid dataloader throw abort signaal
data_loader._reset()
break
return total_loss.numpy()
def predict_dygraph(self, data):
program_translator.enable(False)
with fluid.dygraph.guard(place):
resnet = ResNet()
model_dict, _ = fluid.dygraph.load_dygraph(
self.dy_state_dict_save_path)
resnet.set_dict(model_dict)
resnet.eval()
pred_res = resnet(fluid.dygraph.to_variable(data))
return pred_res.numpy()
def predict_static(self, data):
paddle.enable_static()
exe = fluid.Executor(place)
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
self.model_save_dir,
executor=exe,
model_filename=self.model_filename,
params_filename=self.params_filename)
pred_res = exe.run(inference_program,
feed={feed_target_names[0]: data},
fetch_list=fetch_targets)
return pred_res[0]
def predict_dygraph_jit(self, data):
with fluid.dygraph.guard(place):
resnet = fluid.dygraph.jit.load(self.model_save_prefix)
resnet.eval()
pred_res = resnet(data)
return pred_res.numpy()
def predict_analysis_inference(self, data):
output = PredictorTools(self.model_save_dir, self.model_filename,
self.params_filename, [data])
out = output()
return out
class TestResnet(unittest.TestCase):
def setUp(self):
self.resnet_helper = ResNetHelper()
def train(self, to_static):
program_translator.enable(to_static)
return self.resnet_helper.train(to_static)
def verify_predict(self):
image = np.random.random([1, 3, 224, 224]).astype('float32')
dy_pre = self.resnet_helper.predict_dygraph(image)
st_pre = self.resnet_helper.predict_static(image)
dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
predictor_pre = self.resnet_helper.predict_analysis_inference(image)
self.assertTrue(
np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
self.assertTrue(
np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre))
self.assertTrue(
np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(predictor_pre,
st_pre))
def test_resnet(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(
np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(static_loss,
dygraph_loss))
self.verify_predict()
def test_in_static_mode_mkldnn(self):
fluid.set_flags({'FLAGS_use_mkldnn': True})
try:
if paddle.fluid.core.is_compiled_with_mkldnn():
self.resnet_helper.train(to_static=True)
finally:
fluid.set_flags({'FLAGS_use_mkldnn': False})
if __name__ == '__main__':
# switch into new eager mode
with fluid.framework._test_eager_guard():
unittest.main()
|
py | 1a531ca1c25de13aa78de666170fe5d7ba781711 |
import argparse
import json
import os
import sys
from random import shuffle
from multiprocessing.pool import ThreadPool
from functools import partial
import io
from tqdm import tqdm
# Assumes ai4eutils is on the path (github.com/Microsoft/ai4eutils)
from write_html_image_list import write_html_image_list
#from data_management.megadb.schema import sequences_schema_check
from data_management.megadb.megadb_utils import MegadbUtils
from visualization import visualization_utils as vis_utils
def render_image_info(rendering, args):
blob_service = rendering['blob_service']
image_obj = io.BytesIO()
try:
_ = blob_service.get_blob_to_stream(rendering['container_name'], rendering['blob_path'], image_obj)
except Exception as e:
print(f'Image not found in blob storage: {rendering["blob_path"]}')
print(e)
return
# resize is for displaying them more quickly
image = vis_utils.resize_image(vis_utils.open_image(image_obj), args.output_image_width)
vis_utils.render_megadb_bounding_boxes(rendering['bbox'], image)
annotated_img_name = rendering['annotated_img_name']
annotated_img_path = os.path.join(args.output_dir, 'rendered_images', annotated_img_name)
image.save(annotated_img_path)
def visualize_sequences(datasets_table, sequences, args):
num_images = 0
images_html = []
rendering_info = []
for seq in sequences:
if 'images' not in seq:
continue
# dataset and seq_id are required fields
dataset_name = seq['dataset']
seq_id = seq['seq_id']
# sort the images in the sequence
images_in_seq = sorted(seq['images'], key=lambda x: x['frame_num']) if len(seq['images']) > 1 else seq['images']
for im in images_in_seq:
if args.trim_to_images_bboxes_labeled and 'bbox' not in im:
continue
num_images += 1
blob_path = MegadbUtils.get_full_path(datasets_table, dataset_name, im['file'])
frame_num = im.get('frame_num', -1)
im_class = im.get('class', None)
if im_class is None: # if no class label on the image, show the class label on the sequence
im_class = seq.get('class', [])
rendering = {}
rendering['blob_service'] = MegadbUtils.get_blob_service(datasets_table, dataset_name)
rendering['container_name'] = datasets_table[dataset_name]['container']
rendering['blob_path'] = blob_path
rendering['bbox'] = im.get('bbox', [])
annotated_img_name = 'anno_' + blob_path.replace('/', args.pathsep_replacement).replace('\\', args.pathsep_replacement)
rendering['annotated_img_name'] = annotated_img_name
rendering_info.append(rendering)
images_html.append({
'filename': 'rendered_images/{}'.format(annotated_img_name),
'title': 'Seq ID: {}. Frame number: {}<br/> Image file: {}<br/> number of boxes: {}, image class labels: {}'.format(seq_id, frame_num, blob_path, len(rendering['bbox']), im_class),
'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
})
if num_images >= args.num_to_visualize:
print('num_images visualized is {}'.format(num_images))
break
# pool = ThreadPool()
render_image_info_partial = partial(render_image_info, args=args)
# print('len of rendering_info', len(rendering_info))
# tqdm(pool.imap_unordered(render_image_info_partial, rendering_info), total=len(rendering_info))
for rendering in tqdm(rendering_info):
render_image_info_partial(rendering)
print('Making HTML...')
html_path = os.path.join(args.output_dir, 'index.html')
# options = write_html_image_list()
# options['headerHtml']
write_html_image_list(
filename=html_path,
images=images_html
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('megadb_entries', type=str, help='Path to a json list of MegaDB entries')
parser.add_argument('output_dir', action='store', type=str,
help='Output directory for html and rendered images')
parser.add_argument('--trim_to_images_bboxes_labeled', action='store_true',
help='Only include images that have been sent for bbox labeling (but may be actually empty). Turn this on if QAing annotations.')
parser.add_argument('--num_to_visualize', action='store', type=int, default=200,
help='Number of images to visualize (all comformant images in a sequence are shown, so may be a few more than specified). Sequences are shuffled. Defaults to 200. Use -1 to visualize all.')
parser.add_argument('--pathsep_replacement', action='store', type=str, default='~',
help='Replace path separators in relative filenames with another character (default ~)')
parser.add_argument('-w', '--output_image_width', type=int,
help=('an integer indicating the desired width in pixels of the output annotated images. '
'Use -1 to not resize.'),
default=700)
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
assert 'COSMOS_ENDPOINT' in os.environ and 'COSMOS_KEY' in os.environ
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'rendered_images'))
print('Connecting to MegaDB to get the datasets table...')
megadb_utils = MegadbUtils()
datasets_table = megadb_utils.get_datasets_table()
print('Loading the MegaDB entries...')
with open(args.megadb_entries) as f:
sequences = json.load(f)
print('Total number of sequences: {}'.format(len(sequences)))
# print('Checking that the MegaDB entries conform to the schema...')
# sequences_schema_check.sequences_schema_check(sequences)
shuffle(sequences)
visualize_sequences(datasets_table, sequences, args)
if __name__ == '__main__':
main()
|
py | 1a531ca2a6e02c8b45de76decd19587c792adcf1 | from flask import g, request
from pprint import pformat
from requests.exceptions import ConnectionError
from requests.models import Request
import logging
import requests
import sys
import time
def getLogger(*args, **kwargs):
logger = logging.getLogger(*args, **kwargs)
return CustomLogger(logger=logger)
class CustomLogger(object):
def __init__(self, logger):
self.logger = logger
def __getattr__(self, attr):
return getattr(self.logger, attr)
def debug(self, *args, **kwargs):
return self._add_correlation_id_and_log('debug', args, kwargs)
def info(self, *args, **kwargs):
return self._add_correlation_id_and_log('info', args, kwargs)
def warn(self, *args, **kwargs):
return self._add_correlation_id_and_log('warn', args, kwargs)
def error(self, *args, **kwargs):
return self._add_correlation_id_and_log('error', args, kwargs)
def critical(self, *args, **kwargs):
return self._add_correlation_id_and_log('critical', args, kwargs)
def _add_correlation_id_and_log(self, level_name, args, kwargs):
my_extra = {**kwargs.get('extra', {})}
try:
my_extra['correlation_id'] = g.correlation_id
except RuntimeError:
# outside flask application context, this is OK
pass
kwargs['extra'] = my_extra
log_fn = getattr(self.logger, level_name)
return log_fn(*args, **kwargs)
def exception(self, *args, **kwargs):
if 'extra' in kwargs:
my_extra = kwargs['extra'].copy()
else:
my_extra = {}
my_extra['exception'] = pformat(sys.exc_info()[1])
my_extra['correlation_id'] = g.correlation_id
kwargs['extra'] = my_extra
self.logger.exception(*args, **kwargs)
def logged_response(logger, endpoint_name, endpoint_version):
def _log_response(target):
def wrapper(*args, **kwargs):
start_time = time.time()
log_level = logger.getEffectiveLevel()
source = request.access_route[0]
label = '[%s] %s %s from %s' % (g.correlation_id, request.method,
request.full_path, source)
extra = {'request.method': request.method,
'request.endpoint_name': endpoint_name,
'request.endpoint_version': endpoint_version,
'request.path': request.path,
'request.full_path': request.full_path,
'request.source': source}
if log_level <= logging.DEBUG:
extra['request.body'] = pformat(request.json)
extra['request.headers'] = pformat(dict(request.headers))
try:
result = target(*args, **kwargs)
except Exception as e:
logger.exception("%s while handling %s",
e.__class__.__name__, label,
extra=extra)
raise
names = ('body', 'status_code', 'headers')
levels = (logging.DEBUG, logging.INFO, logging.DEBUG)
formatters = (pformat, pformat, lambda x: pformat(dict(x)))
for value, name, level, formatter in zip(
result, names, levels, formatters):
if log_level <= level:
extra['response.%s' % name] = formatter(value)
end_time = time.time()
extra['response.took'] = int((end_time - start_time) * 1000)
logger.info("Responding %s to %s",
result[1], label,
extra=extra)
return result
return wrapper
return _log_response
def _log_request(target, kind):
def wrapper(*args, **kwargs):
logger = kwargs.get('logger', getLogger(__name__))
if 'logger' in kwargs:
del kwargs['logger']
log_level = logger.getEffectiveLevel()
kwargs_for_constructor = get_args_for_request_constructor(kwargs)
request = Request(kind.upper(), *args, **kwargs_for_constructor)
extra = {'request.url': request.url,
'request.method': kind.upper()}
if log_level <= logging.DEBUG:
extra['request.body'] = pformat(request.data)
extra['request.headers'] = pformat(dict(request.headers))
extra['request.params'] = request.params
label = '%s %s' % (kind.upper(), request.url)
try:
response = target(*args, **kwargs)
except ConnectionError:
# exception should be logged elsewhere
raise
except Exception:
logger.exception("Exception while sending %s", label,
extra=extra)
raise
extra['response.status_code'] = response.status_code
if log_level <= logging.DEBUG:
extra['response.text'] = pformat(response.text)
extra['response.headers'] = pformat(dict(response.headers))
return response
return wrapper
def get_args_for_request_constructor(kwargs):
kwargs_for_constructor = kwargs.copy()
if 'timeout' in kwargs_for_constructor:
# timout is an argument to requests.get/post/ect but not
# Request.__init__
del kwargs_for_constructor['timeout']
return kwargs_for_constructor
class LoggedRequest(object):
def __getattr__(self, name):
return _log_request(getattr(requests, name), name)
logged_request = LoggedRequest()
|
py | 1a531cfdcf6a6909a04b014f1b4cbe6b494b344d | # Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import sys
from mock import call, MagicMock, patch, PropertyMock
import pytest
from six import PY2
from sagemaker_training import entry_point, environment, errors, process, runner
builtins_open = "__builtin__.open" if PY2 else "builtins.open"
@pytest.fixture
def entry_point_type_module():
with patch("os.listdir", lambda x: ("setup.py",)):
yield
@pytest.fixture(autouse=True)
def entry_point_type_script():
with patch("os.listdir", lambda x: ()):
yield
@pytest.fixture()
def has_requirements():
with patch("os.path.exists", lambda x: x.endswith("requirements.txt")):
yield
@patch("sagemaker_training.modules.prepare")
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_module(check_error, prepare, entry_point_type_module):
path = "c://sagemaker-pytorch-container"
entry_point.install("python_module.py", path)
cmd = [sys.executable, "-m", "pip", "install", "."]
check_error.assert_called_with(cmd, errors.InstallModuleError, capture_error=False, cwd=path)
with patch("os.path.exists", return_value=True):
entry_point.install("python_module.py", path)
check_error.assert_called_with(
cmd + ["-r", "requirements.txt"],
errors.InstallModuleError,
cwd=path,
capture_error=False,
)
@patch("sagemaker_training.modules.prepare")
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_script(check_error, prepare, entry_point_type_module, has_requirements):
path = "c://sagemaker-pytorch-container"
entry_point.install("train.py", path)
with patch("os.path.exists", return_value=True):
entry_point.install(path, "python_module.py")
@patch("sagemaker_training.modules.prepare")
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_fails(check_error, prepare, entry_point_type_module):
check_error.side_effect = errors.ClientError()
with pytest.raises(errors.ClientError):
entry_point.install("git://aws/container-support", "script")
@patch("sagemaker_training.modules.prepare")
@patch("sys.executable", None)
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_no_python_executable(
check_error, prepare, has_requirements, entry_point_type_module
):
with pytest.raises(RuntimeError) as e:
entry_point.install("train.py", "git://aws/container-support")
assert str(e.value) == "Failed to retrieve the real path for the Python executable binary"
@patch("os.chmod")
@patch("sagemaker_training.process.check_error", autospec=True)
@patch("socket.gethostbyname")
def test_script_entry_point_with_python_package(
gethostbyname, check_error, chmod, entry_point_type_module
):
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://dummy-uri",
user_entry_point="train.sh",
args=["dummy_arg"],
runner_type=runner_mock,
)
chmod.assert_called_with(os.path.join(environment.code_dir, "train.sh"), 511)
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("sagemaker_training.process.check_error", autospec=True)
@patch("socket.gethostbyname")
def test_run_module_wait(gethostbyname, check_error, chmod, download_and_extract):
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://url",
user_entry_point="launcher.sh",
args=["42"],
capture_error=True,
runner_type=runner_mock,
)
download_and_extract.assert_called_with(uri="s3://url", path=environment.code_dir)
runner_mock.run.assert_called_with(True, True)
chmod.assert_called_with(os.path.join(environment.code_dir, "launcher.sh"), 511)
@patch("sagemaker_training.files.download_and_extract")
@patch("sagemaker_training.modules.install")
@patch.object(
environment.Environment, "hosts", return_value=["algo-1", "algo-2"], new_callable=PropertyMock
)
@patch("socket.gethostbyname")
def test_run_calls_hostname_resolution(gethostbyname, install, hosts, download_and_extract):
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://url", user_entry_point="launcher.py", args=["42"], runner_type=runner_mock
)
gethostbyname.assert_called_with("algo-2")
gethostbyname.assert_any_call("algo-1")
@patch("sagemaker_training.files.download_and_extract")
@patch("sagemaker_training.modules.install")
@patch.object(
environment.Environment, "hosts", return_value=["algo-1", "algo-2"], new_callable=PropertyMock
)
@patch("socket.gethostbyname")
def test_run_waits_hostname_resolution(gethostbyname, hosts, install, download_and_extract):
gethostbyname.side_effect = [ValueError(), ValueError(), True, True]
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://url", user_entry_point="launcher.py", args=["42"], runner_type=runner_mock
)
gethostbyname.assert_has_calls([call("algo-1"), call("algo-1"), call("algo-1"), call("algo-2")])
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("socket.gethostbyname")
def test_run_module_no_wait(gethostbyname, chmod, download_and_extract):
runner_mock = MagicMock(spec=process.ProcessRunner)
module_name = "default_user_module_name"
entry_point.run(
uri="s3://url",
user_entry_point=module_name,
args=["42"],
wait=False,
runner_type=runner_mock,
)
runner_mock.run.assert_called_with(False, False)
@patch("sys.path")
@patch("sagemaker_training.runner.get")
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("socket.gethostbyname")
def test_run_module_with_env_vars(gethostbyname, chmod, download_and_extract, get_runner, sys_path):
module_name = "default_user_module_name"
args = ["--some-arg", "42"]
entry_point.run(
uri="s3://url", user_entry_point=module_name, args=args, env_vars={"FOO": "BAR"}
)
expected_env_vars = {"FOO": "BAR", "PYTHONPATH": ""}
get_runner.assert_called_with(
runner.ProcessRunnerType, module_name, args, expected_env_vars, None
)
@patch("sys.path")
@patch("sagemaker_training.runner.get")
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("socket.gethostbyname")
def test_run_module_with_extra_opts(
gethostbyname, chmod, download_and_extract, get_runner, sys_path
):
module_name = "default_user_module_name"
args = ["--some-arg", "42"]
extra_opts = {"foo": "bar"}
entry_point.run(uri="s3://url", user_entry_point=module_name, args=args, extra_opts=extra_opts)
get_runner.assert_called_with(runner.ProcessRunnerType, module_name, args, {}, extra_opts)
|
py | 1a531da8c69376894f6653c3f9d3f8ce8d8eb600 | from setuptools import setup
setup(
name='faceshifter',
version='1.0',
packages=[''],
url='https://github.com/flipflopbboi/faceshifter',
license='3-clause BSD license',
author='flipflopbboi',
author_email='[email protected]',
description='Forked from mindslab-ai/faceshifter'
)
|
py | 1a531e9990dad7b181d779f27a544bffd4fffa92 | from statistics import mean
import numpy as np
import matplotlib.pylab as plt
from matplotlib import style
style.use("fivethirtyeight")
#dtype important later
xs = np.array([1,2,3,4,5,6],dtype=np.float64)
ys = np.array([5,4,6,5,6,7],dtype=np.float64)
plt.scatter(xs,ys)
plt.show()
def best_fit_slope(xs,ys):
m = (mean(xs)*mean(ys)-mean(xs*ys))/(mean(xs)**2.-mean(xs**2.))
return m
m=best_fit_slope(xs,ys)
print(m)
########################PEMDAS
plt.scatter(xs,ys)
plt.plot(xs,m*xs,color='C1')
plt.show()
b=mean(ys)-m*mean(xs)
plt.scatter(xs,ys)
plt.plot(xs,m*xs+b,color='C1')
plt.show()
def best_fit_slope_and_intercept(xs,ys):
m = (mean(xs)*mean(ys)-mean(xs*ys))/(mean(xs)**2.-mean(xs**2.))
b=mean(ys)-m*mean(xs)
return m,b
m,b=best_fit_slope_and_intercept(xs,ys)
plt.scatter(xs,ys)
plt.plot(xs,m*xs+b,color='C1')
plt.show()
regression_line=[(m*x)+b for x in xs]
print(m,b)
predicted_x=8
predicted_y=m*predicted_x+b
plt.scatter(xs,ys)
plt.plot(xs,m*xs+b,color='C1')
plt.scatter(predicted_x,predicted_y, color='C2')
plt.show()
|
py | 1a532224e75ee01463d61c91d4a52c38d23580ef | # -*- coding: utf-8 -*-
import re
from scrapy import Spider, Request
from dateutil import parser
from artbot_scraper.items import EventItem
from pytz import timezone
class AmbushSpider(Spider):
name = "Goodspace"
allowed_domains = ["goodspace.co"]
start_urls = ["http://goodspace.co/upcoming/"]
def parse(self, response):
for href in response.xpath('//a[contains(@class, "project")]/@href'):
url = response.urljoin(href.extract())
yield Request(url, callback=self.parse_event)
def parse_event(self, response):
item = EventItem()
item['url'] = response.url
item['venue'] = self.name
item['title'] = response.xpath('//h1/text()').extract_first().strip()
item['description'] = ''.join(response.xpath('//div[contains(@class, "event_details")]//text()').extract())
item['image'] = response.xpath('//figure[contains(@class, "amb_gal_img")]//img/@src').extract_first()
time = ''.join(response.xpath('//time//text()').extract())
match = re.match('(?P<start>[a-zA-Z]+\d+)(?P<end>[a-zA-Z]+\d+)', time)
if (match):
tz = timezone('Australia/Sydney')
item['start'] = tz.localize(parser.parse(match.group('start')))
item['end'] = tz.localize(parser.parse(match.group('end')))
yield item
|
py | 1a53234423d5709ee270d557b8bcff488db9afd9 | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 151750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 1a5323451d7f902ceea27095d473988c1e8f926b | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from threading import Event
from typing import IO, Any, Optional, Union
import libvirt # type: ignore
from . import libvirt_events_thread
# Reads serial console log from libvirt VM and writes it to a file.
class QemuConsoleLogger:
def __init__(self) -> None:
self._stream_completed = Event()
self._console_stream: Optional[libvirt.virStream] = None
self._console_stream_callback_started = False
self._console_stream_callback_added = False
self._log_file: Optional[IO[Any]] = None
# Attach logger to a libvirt VM.
def attach(
self,
qemu_conn: libvirt.virConnect,
domain: libvirt.virDomain,
log_file_path: str,
) -> None:
# Open the log file.
self._log_file = open(log_file_path, "ab")
# Open the libvirt console stream.
console_stream = qemu_conn.newStream(libvirt.VIR_STREAM_NONBLOCK)
domain.openConsole(
None,
console_stream,
libvirt.VIR_DOMAIN_CONSOLE_FORCE | libvirt.VIR_DOMAIN_CONSOLE_SAFE,
)
self._console_stream = console_stream
libvirt_events_thread.run_callback(self._register_console_callbacks)
self._console_stream_callback_started = True
# Close the logger.
def close(self) -> None:
# Check if attach() run successfully.
if self._console_stream_callback_started:
# Close the stream on libvirt callbacks thread.
libvirt_events_thread.run_callback(self._close_stream, True)
self._stream_completed.wait()
else:
if self._console_stream:
self._console_stream.abort()
if self._log_file:
self._log_file.close()
# Wait until the stream closes.
# Typically used when gracefully shutting down a VM.
def wait_for_close(self) -> None:
if self._console_stream_callback_started:
self._stream_completed.wait()
# Register the console stream events.
# Threading: Must only be called on libvirt events thread.
def _register_console_callbacks(self) -> None:
# Attach callback for stream events.
assert self._console_stream
self._console_stream.eventAddCallback(
libvirt.VIR_STREAM_EVENT_READABLE
| libvirt.VIR_STREAM_EVENT_ERROR
| libvirt.VIR_STREAM_EVENT_HANGUP,
self._stream_event,
None,
)
self._console_stream_callback_added = True
# Handles events for the console stream.
# Threading: Must only be called on libvirt events thread.
def _stream_event(
self, stream: libvirt.virStream, events: Union[int, bytes], context: Any
) -> None:
if events & libvirt.VIR_STREAM_EVENT_READABLE:
# Data is available to be read.
while True:
data = stream.recv(libvirt.virStorageVol.streamBufSize)
if data == -2:
# No more data available at the moment.
break
if len(data) == 0:
# EOF reached.
self._close_stream(False)
break
assert self._log_file
self._log_file.write(data)
if (
events & libvirt.VIR_STREAM_EVENT_ERROR
or events & libvirt.VIR_STREAM_EVENT_HANGUP
):
# Stream is shutting down. So, close it.
self._close_stream(True)
# Close the stream resource.
# Threading: Must only be called on libvirt events thread.
def _close_stream(self, abort: bool) -> None:
if self._stream_completed.is_set():
# Already closed. Nothing to do.
return
try:
# Close the log file
assert self._log_file
self._log_file.close()
# Close the stream
assert self._console_stream
if self._console_stream_callback_added:
self._console_stream.eventRemoveCallback()
if abort:
self._console_stream.abort()
else:
self._console_stream.finish()
finally:
# Signal that the stream has closed.
self._stream_completed.set()
|
py | 1a532512cc62e2bbeb6fdf8c9a3b597bb5e5ce77 | cont = ('zero', 'um', 'dois', 'três', 'quatro',
'cinco', 'seis', 'sete', 'oito', 'nove',
'dez', 'onze', 'doze', 'treze', 'catorze',
'quinze', 'dezesseis', 'dezessete', 'dezoito',
'dezenove', 'vinte')
while True:
núm = int(input('Digite um número entre 0 e 20:\n'))
if 0 <= núm <= 20:
break
print('Tente novamente.' , end='')
print(f'Você digitou o número {cont[núm]}')
|
py | 1a532aa24515e5b6f1c138a606929f973b8b5719 | """
IM inlining test
"""
l = []
l2 = []
if 5 % 2:
a = l.append
else:
a = l2.append
a(1)
print len(l), len(l2)
|
py | 1a532c798a5b5c92a668a1a7a9aa3d53485f0ea5 | import json
import shutil
import re
from os.path import exists,join, realpath
import os
file_name = 'compile_commands.json'
with open(file_name, 'r') as f:
data = json.load(f)
cnt = 0
CYBER_DIR = "/home/zhihaohe/cybertron"
PROTO_DIR = '/home/zhihaohe/cybertron'
container_dirs = ['/home/zhihaohe/container_cybertron/usr/local/include/',
'/home/zhihaohe/container_cybertron/tmp',
'/home/zhihaohe/container_cybertron/usr/include/',
'/home/zhihaohe/container_cybertron/pybind11/include/',
'/home/zhihaohe/container_cybertron/usr/local/apollo',
'/home/zhihaohe/container_cybertron/usr/local/src']
def extract_path_item(path, i):
path.split('/')[i]
def insert_to_DB(db, fn, path):
m = db
items = path.split('/')
i = -1
while abs(i) < len(items) and items[i] in db:
if str is type(db[items[i]]):
old_path = db[items[i]]
stem = old_path.split('/')[i]
assert stem == items[i]
next_stem = old_path.split('/')[i-1]
db[items[i]] = {next_stem: old_path}
else:
assert dict is type(db[items[i]]), "Bug, should only be str or dict, but it is " + type(db[items[i]])
db = db[items[i]]
i -= 1
if abs(i) >= len(items):
raise Exception("insert path twice.%s"%path)
db[items[i]] = path
def build_filepath_db(directory, header_db):
insert_set = set()
for fn in os.listdir(directory):
path = join(directory, fn)
if path == '/home/zhihaohe/usr_local_cybertron/tmp/lz4-1.9.2/lib/lz4.h':
print(directory)
if os.path.isdir(path):
build_filepath_db(path, header_db)
elif path not in insert_set:
insert_set.add(path)
insert_to_DB(header_db, fn, path)
header_db = {}
for d in container_dirs:
header_db[d] = {}
build_filepath_db(d, header_db[d])
with open('header.db', 'w') as f:
json.dump(header_db, f, indent=2)
def change_file(f, new_dir):
items = f.split('/')
a = ''
for i in items:
a = i + '/' + a
if exists(join(new_dir, a)):
return a
return None
def search_db(path, db):
items = path.split('/')
for i in range(-1, -len(items), -1):
if items[i] not in db:
break
if type(db[items[i]]) is str:
return db[items[i]]
assert type(db[items[i]]) is dict
db = db[items[i]]
return None
def change_directory(f, d):
global header_db
if f.endswith('.pb.h') or f.endswith('.pb.cc'):
return f, PROTO_DIR
elif f.endswith('.so') or re.search('so\.\d*', f) or f.endswith('.txx') or f.endswith('.a'):
# TODO process lib properly
return f, d
elif exists(join(CYBER_DIR, f)):
return f, CYBER_DIR
else:
for container_dir, db in header_db.items():
fp = search_db(f, db)
if fp:
assert fp[:len(container_dir)] == container_dir
return fp[len(container_dir)+1:], container_dir
return None, None
def migrate_include(cmd, keyword, new_include_path):
tmpl = '-isystem \S*%s\S*' % keyword
return re.sub(tmpl, '-isystem %s'%new_include_path, cmd)
def remove_include(cmd, keyword):
tmpl = '-isystem \S*%s\S*' % keyword
return re.sub(tmpl, '', cmd)
def insert_include(cmd, path):
p = cmd.find('-isystem')
if -1 == p:
return cmd
return cmd[:p] + ' -isystem %s '%path + cmd[p:]
def process_command(cmd):
# remove not used compile flag
cmd = cmd.replace('-fno-canonical-system-headers', '')
cmd = insert_include(cmd, '/home/zhihaohe/container_cybertron/usr_local/include')
cmd = migrate_include(cmd, 'opencv', '/opt/ros/kinetic/include/opencv-3.3.1-dev/')
cmd = remove_include(cmd, 'boost')
return cmd
new_data = []
unfound_log = open('./not_founded_files.log', 'w')
for l in data:
l['command'] = process_command(l['command'])
# l['directory'] = CYBER_DIR
f, d = change_directory(l['file'], l['directory'])
if f and d:
l['file'] = f
l['directory'] = d
else:
unfound_log.write('%s, %s, %s\n' % (l['directory'], l['file'],
realpath(join(l['directory'],
l['file']))))
new_data.append(l)
shutil.move(file_name, file_name + '.backup')
with open('compile_commands.json', 'w') as f:
json.dump(new_data, f, indent=2)
|
py | 1a532d8e8858d2d0fee70e8925c79887c70a6401 | # Logging level must be set before importing any stretch_body class
import stretch_body.robot_params
stretch_body.robot_params.RobotParams.set_logging_level("DEBUG")
import unittest
import stretch_body.dynamixel_XL430
import logging
from concurrent.futures import ThreadPoolExecutor
class TestDynamixelXL430(unittest.TestCase):
def test_concurrent_access(self):
"""Verify zero comms errors in concurrent access.
"""
print('Testing Concurrent Access')
servo = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=12,
usb="/dev/hello-dynamixel-head",
logger=logging.getLogger("test_dynamixel"))
self.assertTrue(servo.startup())
def ping_n(n):
# servo.pretty_print() # causes many more servo communications
servo.do_ping()
ns = [1,2,3,4,5]
with ThreadPoolExecutor(max_workers = 2) as executor:
results = executor.map(ping_n, ns)
self.assertEqual(servo.comm_errors, 0)
self.assertTrue(servo.last_comm_success)
servo.stop()
def test_handle_comm_result(self):
"""Verify comm results correctly handled.
"""
print('Testing Handle Comm Result')
servo = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=12,
usb="/dev/hello-dynamixel-head",
logger=logging.getLogger("test_dynamixel"))
self.assertTrue(servo.startup())
ret = servo.handle_comm_result('DXL_TEST', 0, 0)
self.assertTrue(ret)
self.assertTrue(servo.last_comm_success)
self.assertEqual(servo.comm_errors, 0)
self.assertRaises(stretch_body.dynamixel_XL430.DynamixelCommError, servo.handle_comm_result, 'DXL_TEST', -1000, 0) # -1000 = PORT BUSY
self.assertFalse(servo.last_comm_success)
self.assertEqual(servo.comm_errors, 1)
self.assertRaises(stretch_body.dynamixel_XL430.DynamixelCommError, servo.handle_comm_result, 'DXL_TEST', -3002, 1) # -3002 = RX Corrupt
self.assertFalse(servo.last_comm_success)
self.assertEqual(servo.comm_errors, 2)
servo.stop()
def test_change_baud_rate(self, dxl_id=13, usb="/dev/hello-dynamixel-wrist"):
"""Verify can change baud rate.
TODO AE: Restarting a new connection to a just changecd baudrate does not always succeed. Need to close port?
"""
logger = logging.getLogger("test_dynamixel")
start_baud = stretch_body.dynamixel_XL430.DynamixelXL430.identify_baud_rate(dxl_id=dxl_id, usb=usb)
print('Testing changing baud rate from {0} to {1} and back'.format(start_baud, 115200 if start_baud != 115200 else 57600))
servo1 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=start_baud, logger=logger)
self.assertTrue(servo1.do_ping())
curr_baud = servo1.get_baud_rate()
self.assertEqual(curr_baud, start_baud)
self.assertTrue(servo1.do_ping())
# invalid baud goal
goal_baud = 9000
succeeded = servo1.set_baud_rate(goal_baud)
self.assertFalse(succeeded)
curr_baud = servo1.get_baud_rate()
self.assertNotEqual(curr_baud, goal_baud)
self.assertTrue(servo1.do_ping())
# change the baud
goal_baud = 115200 if start_baud != 115200 else 57600
succeeded = servo1.set_baud_rate(goal_baud)
servo1.stop()
self.assertTrue(succeeded)
servo2 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=goal_baud, logger=logger)
curr_baud = servo2.get_baud_rate()
self.assertEqual(curr_baud, goal_baud)
self.assertTrue(servo2.do_ping())
servo2.stop()
servo3 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=start_baud, logger=logger)
self.assertRaises(stretch_body.dynamixel_XL430.DynamixelCommError, servo3.get_baud_rate)
servo3.stop()
# reset baud to its starting baud
servo4 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=goal_baud, logger=logger)
self.assertTrue(servo4.do_ping())
succeeded = servo4.set_baud_rate(start_baud)
self.assertTrue(succeeded)
servo4.stop()
servo5 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=start_baud, logger=logger)
curr_baud = servo5.get_baud_rate()
self.assertEqual(curr_baud, start_baud)
self.assertTrue(servo5.do_ping())
servo5.stop()
|
py | 1a532e17dd77afe826f39b016d7c9c3fcb6baf42 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lty/catkin_ws/src/driver/depth_camera/iai_kinect2-master/kinect2_bridge/include".split(';') if "/home/lty/catkin_ws/src/driver/depth_camera/iai_kinect2-master/kinect2_bridge/include" != "" else []
PROJECT_CATKIN_DEPENDS = "kinect2_registration".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kinect2_bridge"
PROJECT_SPACE_DIR = "/home/lty/catkin_ws/devel"
PROJECT_VERSION = "0.0.1"
|
py | 1a532ea37007679bfd0a09e3b49ae0d6806169bd | import cv2
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_extractor(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return
for (x, y, w, h) in faces:
cropped_faces = img[y:y + h, x:x + w]
return cropped_faces
cap = cv2.VideoCapture(0)
count = 0
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
file_name_path = '/home/abhishek/Documents/Face/users'+str(count) + '.jpg'
cv2.imwrite(file_name_path, face)
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Face Cropper', face)
else:
print("Face not found")
pass
if cv2.waitKey(1) & 0xFF == ord('q') or count == 500:
break
cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete!!")
|
py | 1a532f27876a30a4c3d53e297b36bc79c5edd264 | from tqdm import tqdm
import numpy as np
import torch
from typing import Callable, Optional, Union
from alibi_detect.cd.base_online import BaseDriftOnline
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch import zero_diag, quantile
class MMDDriftOnlineTorch(BaseDriftOnline):
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': 'pytorch'})
# set backend
if device is None or device.lower() in ['gpu', 'cuda']:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if self.device.type == 'cpu':
print('No GPU detected, fall back on CPU.')
else:
self.device = torch.device('cpu')
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, np.ndarray) else None
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
self.x_ref = torch.from_numpy(self.x_ref).to(self.device)
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=(sigma is None))
self._configure_thresholds()
self._initialise()
def _configure_ref_subset(self):
etw_size = 2*self.window_size-1 # etw = extended test window
rw_size = self.n - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
mmd_init = None
while mmd_init is None or mmd_init >= self.get_threshold(0):
# Make split
perm = torch.randperm(self.n)
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
self.test_window = self.x_ref[self.init_test_inds]
# Compute initial mmd to check for initial detection
self.k_xx_sub = self.k_xx[self.ref_inds][:, self.ref_inds]
self.k_xx_sub_sum = zero_diag(self.k_xx_sub).sum()/(rw_size*(rw_size-1))
self.k_xy = self.kernel(self.x_ref[self.ref_inds], self.test_window)
k_yy = self.kernel(self.test_window, self.test_window)
mmd_init = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum()/(self.window_size*(self.window_size-1)) -
2*self.k_xy.mean()
)
def _configure_thresholds(self):
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2*w_size-1 # etw = extended test window
rw_size = self.n - etw_size # rw = sub-ref window
perms = [torch.randperm(self.n) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:-etw_size] for perm in perms]
y_inds_all = [perm[-etw_size:] for perm in perms]
if self.verbose:
print("Generating permutations of kernel matrix..")
# Need to compute mmd for each bs for each of W overlapping windows
# Most of the computation can be done once however
# We avoid summing the rw_size^2 submatrix for each bootstrap sample by instead computing the full
# sum once and then subtracting the relavent parts (k_xx_sum = k_full_sum - 2*k_xy_sum - k_yy_sum).
# We also reduce computation of k_xy_sum from O(nW) to O(W) by caching column sums
k_full_sum = zero_diag(self.k_xx).sum()
k_xy_col_sums_all = [
self.k_xx[x_inds][:, y_inds].sum(0) for x_inds, y_inds in
(tqdm(zip(x_inds_all, y_inds_all), total=self.n_bootstraps) if self.verbose else
zip(x_inds_all, y_inds_all))
]
k_xx_sums_all = [(
k_full_sum - zero_diag(self.k_xx[y_inds][:, y_inds]).sum() - 2*k_xy_col_sums.sum()
)/(rw_size*(rw_size-1)) for y_inds, k_xy_col_sums in zip(y_inds_all, k_xy_col_sums_all)]
k_xy_col_sums_all = [k_xy_col_sums/(rw_size*w_size) for k_xy_col_sums in k_xy_col_sums_all]
# Now to iterate through the W overlapping windows
thresholds = []
p_bar = tqdm(range(w_size), "Computing thresholds") if self.verbose else range(w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:w+w_size] for y_inds in y_inds_all] # test windows of size w_size
mmds = [(
k_xx_sum +
zero_diag(self.k_xx[y_inds_w][:, y_inds_w]).sum()/(w_size*(w_size-1)) -
2*k_xy_col_sums[w:w+w_size].sum()
) for k_xx_sum, y_inds_w, k_xy_col_sums in zip(k_xx_sums_all, y_inds_all_w, k_xy_col_sums_all)
]
mmds = torch.tensor(mmds) # an mmd for each bootstrap sample
# Now we discard all bootstrap samples for which mmd is in top (1/ert)% and record the thresholds
thresholds.append(quantile(mmds, 1-self.fpr))
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if mmds[i] < thresholds[-1]]
k_xx_sums_all = [
k_xx_sums_all[i] for i in range(len(k_xx_sums_all)) if mmds[i] < thresholds[-1]
]
k_xy_col_sums_all = [
k_xy_col_sums_all[i] for i in range(len(k_xy_col_sums_all)) if mmds[i] < thresholds[-1]
]
self.thresholds = thresholds
def score(self, x_t: np.ndarray) -> Union[float, None]:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
If the test-window is not yet full then a test-statistic of None is returned.
Parameters
----------
x_t
A single instance.
Returns
-------
Squared MMD estimate between reference window and test window
"""
x_t = torch.from_numpy(x_t[None, :]).to(self.device)
kernel_col = self.kernel(self.x_ref[self.ref_inds], x_t)
self.test_window = torch.cat([self.test_window[(1-self.window_size):], x_t], 0)
self.k_xy = torch.cat([self.k_xy[:, (1-self.window_size):], kernel_col], 1)
k_yy = self.kernel(self.test_window, self.test_window)
mmd = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum()/(self.window_size*(self.window_size-1)) -
2*self.k_xy.mean()
)
return float(mmd.detach().cpu())
|
py | 1a532f9154e6dafee5f4fd3125c36ba65afc2e6b | from utils import *
import pickle
import numpy_indexed
import copy
# version of dfs that builds the graph as it traverses it
def dfs_2(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth = 11):
'''
Parameters:
start : str
name of the root node, e.g. 'ctg1'
goals : list of str
list of goal nodes, e.g. goals = ['ctg1', 'ctg2']
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r : numpy indexed object
object that contains info about overlaps
e.g. grouped_c_r[0] gives info about continuing reads for contig1, grouped_c_r[1] for contig2, etc.
grouped_r_r : numpy indexed object
overlaps between reads, similair to grouped_c_r
keys_c_r : list of str
a list of keys for grouped_r_r object, e.g. ['ctg1', 'ctg2']
keys_r_r are used to get the index of contig in grouped_c_r object based on its name
keys_r_r : list of str
similair to keys_c_r
Returns:
paths_to_goals: list of lists of str
e.g. [['ctg1', 'read1', 'ctg2'], ['ctg1', 'read2', 'ctg2']]
a list of paths, where a path is a list of strings
'''
graph = dict()
graph[start] = set(monte_carlo_extending_for_contig(start, side, grouped_c_r, keys_c_r))
paths_to_goals = []
stack = [(start, [start])]
while stack:
# print(stack)
(vertex, path) = stack.pop()
if len(path) <= max_depth:
if vertex in graph:
for next in graph[vertex] - set(path):
if next in goals:
paths_to_goals.append(path + [next])
# print(paths_to_goals)
else:
stack.append((next, path + [next]))
# find new connecting reads for this node if they exist
# if next in graph:
graph[next] = set(monte_carlo_extending_for_read(next, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r))
return paths_to_goals
# def get_n_best_connecting_reads_for_contig(contig, grouped, keys, side, n = 2, first_approach = True):
# group = grouped[keys.index(contig)]
# connecting_reads = group[np.where(group['extension_side'] == side)]
# if first_approach:
# ind = np.lexsort((connecting_reads['SI'], connecting_reads['OS']))
# else:
# ind = np.lexsort((connecting_reads['SI'], connecting_reads['ES']))
# return connecting_reads[ind][-n:]['query_name']
# def get_n_best_connecting_reads_for_read(read, side, grouped_r_r, keys_r_r, grouped_c_r, keys_c_r, n = 2, first_approach = True):
# if side == 'right':
# other_side = 'left'
# else:
# other_side = 'right'
# second_group = []
# for i in range(len(keys_c_r)):
# group = grouped_c_r[i]
# for j in group:
# if j['query_name'] == read and j['extension_side'] == other_side:
# new_row = copy.deepcopy(j)
# new_row['query_name'] = keys_c_r[i]
# second_group.append(new_row)
# # print(type(second_group))
# # print(second_group)
# if read in keys_r_r:
# group = grouped_r_r[keys_r_r.index(read)]
# connecting_reads = group[np.where(group['extension_side'] == side)]
# final_group = connecting_reads
# # final_group = []
# if second_group != []:
# final_group = np.append(connecting_reads, second_group)
# # if second_group != []:
# # final_group.append(second_group)
# if first_approach:
# ind = np.lexsort((final_group['SI'], final_group['OS']))
# else:
# ind = np.lexsort((final_group['SI'], final_group['ES']))
# return final_group[ind][-n:]['query_name']
# else:
# return []
def monte_carlo_extending_for_contig(contig, side, grouped_c_r, keys_c_r,):
'''
Parameters:
contig : str
name of the starting contig, e.g. 'ctg1'
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r : numpy indexed object
object that contains info about overlaps
e.g. grouped_c_r[0] gives info about continuing reads for contig1, grouped_c_r[1] for contig2, etc.
keys_c_r : list of str
a list of keys for grouped_r_r object, e.g. ['ctg1', 'ctg2']
keys_r_r are used to get the index of contig in grouped_c_r object based on its name
Returns:
chosen_read : list of one str
read that the monte carlo approach found as a continuing read for a given contig
in a list because method dfs_2 expects a list of continuing reads. dfs_2 written that way
because of first two approaches
In case it doesnt find it, method returns empty list so that dfs_2 method doesnt crash
subject to change ???!!!
Karlo, feel free to change this so that it returns a string, and change dfs_2 so that is accepts a string
if that suits you
'''
group = grouped_c_r[keys_c_r.index(contig)]
group = group[np.where(group['extension_side'] == side)]
group = group[np.where(group['ES'] >= 0)]
if group.size != 0:
reads_ES = group['ES']
sum_ES = np.sum(reads_ES)
probabilities = [x / sum_ES for x in reads_ES]
chosen_read = np.random.choice(a = group, p = probabilities)
chosen_read = [chosen_read['query_name']]
return chosen_read
else:
return []
def monte_carlo_extending_for_read(read, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r):
'''
Parameters:
read : str
name of the read for which we want to find continuing read, e.g. 'read00291'
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r/grouped_r_r/keys_c_r/keys_r_r
look at previous comments in other methods
Returns:
chosen_read : list of one str
read that the monte carlo approach found as a continuing read for a given read
in a list because method dfs_2 expects a list of continuing reads. dfs_2 written that way
because of first two approaches
In case it doesnt find it, method returns empty list so that dfs_2 method doesnt crash
subject to change ???
Karlo, feel free to change this so that it returns a string, and change dfs_2 so that is accepts a string
if that suits you
'''
if side == 'right':
other_side = 'left'
else:
other_side = 'right'
second_group = []
for i in range(len(keys_c_r)):
group = grouped_c_r[i]
for j in group:
if j['query_name'] == read and j['extension_side'] == other_side and j['ES'] > 0:
new_row = copy.deepcopy(j)
new_row['query_name'] = keys_c_r[i]
second_group.append(new_row)
if read in keys_r_r:
group = grouped_r_r[keys_r_r.index(read)]
group = group[np.where(group['extension_side'] == side)]
group = group[np.where(group['ES'] >= 0)]
if second_group != []:
group = np.append(group, second_group)
if group.size != 0:
reads_ES = group['ES']
sum_ES = np.sum(reads_ES)
probabilities = [x / sum_ES for x in reads_ES]
chosen_read = np.random.choice(a = group, p = probabilities)
chosen_read = [chosen_read['query_name']]
return chosen_read
else:
return []
else:
return []
def try_monte_carlo(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth=50, n_times = 100):
'''
Parameters:
start : str
name of the root node, e.g. 'ctg1'
goals : list of str
list of goal nodes, e.g. goals = ['ctg1', 'ctg2']
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r : numpy indexed object
object that contains info about overlaps
e.g. grouped_c_r[0] gives info about continuing reads for contig1, grouped_c_r[1] for contig2, etc.
grouped_r_r : numpy indexed object
overlaps between reads, similair to grouped_c_r
keys_c_r : list of str
a list of keys for grouped_r_r object, e.g. ['ctg1', 'ctg2']
keys_r_r are used to get the index of contig in grouped_c_r object based on its name
keys_r_r : list of str
similair to keys_c_r
max_depth : int
maximum number of nodes in a path
n_times : int
number of times that we try to get a path with monte carlo approach
given a 1000 tries, we usually find around 5-10 paths
Returns:
all_paths: list of lists of paths
e.g. [[['ctg1', 'read1', 'ctg2']], [['ctg1', 'read2', 'ctg2']]]
probably one level of unnecessary complication, built that way because of first two approaches
where we expected to find more than one path per dfs search, SUBJECT TO CHANGE, you could
change this so that all_paths is a list of paths, not a list inside list
'''
all_paths = []
for i in range(n_times):
paths = dfs_2(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth=max_depth)
if paths != []:
all_paths.append(paths)
return all_paths
if __name__ == '__main__':
# data = {'A': set(['r1', 'r2', 'r3']),
# 'r1': set(['r4', 'r5']),
# 'r2': set(['r6', 'r7']),
# 'r3': set(['r8', 'r9']),
# 'r4': set(['A1', 'r10']),
# 'r5': set(['r11', 'r12']),
# 'r9': set(['r13', 'r14']),
# 'r11': set(['A2', 'r16']),
# 'r10': set(['r4']),
# 'r13': set(['A3', 'r15']),
# 'r15': set(['r17', 'r18'])}
# start = 'A'
# goals = ['A1', 'A2', 'A3']
# paths = dfs_2(data, start, goals)
# print(paths)
# testing on fake data
# grouped_c_r = [np.array([('read1', 1, 2, 3, 'right')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')])), np.array([('read3', 1, 2, 3, 'left')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')]))]
# keys_c_r = ['ctg1', 'ctg2']
# grouped_r_r = [np.array([('read2', 1, 2, 3, 'right')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')])), np.array([('read3', 1, 2, 3, 'right')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')])), np.array([], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')]))]
# keys_r_r = ['read1', 'read2', 'read3']
with open('grouped_data_c_r', 'rb') as grouped_c_r_file:
grouped_c_r = pickle.load(grouped_c_r_file)
with open('grouped_data_r_r', 'rb') as grouped_r_r_file:
grouped_r_r = pickle.load(grouped_r_r_file)
with open('keys_c_r', 'rb') as keys_c_r_file:
keys_c_r = pickle.load(keys_c_r_file).tolist()
with open('keys_r_r', 'rb') as keys_r_r_file:
keys_r_r = pickle.load(keys_r_r_file).tolist()
start = keys_c_r[1]
goals = keys_c_r
side = 'left'
paths = try_monte_carlo(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth=30, n_times = 500)
# with open('mc_ctg3_right', 'wb') as paths_right_side_file:
# pickle.dump(paths, paths_right_side_file)
print(paths) |
py | 1a532fb7e090da7e09d7bcc04443971a1971d546 | """ILI9341 demo (Scrolling Marquee)."""
from ili9341 import Display, color565
from time import sleep
from sys import implementation
def test():
"""Scrolling Marquee."""
try:
# Implementation dependant pin and SPI configuration
if implementation.name == 'circuitpython':
import board
from busio import SPI
from digitalio import DigitalInOut
cs_pin = DigitalInOut(board.P0_15)
dc_pin = DigitalInOut(board.P0_17)
rst_pin = DigitalInOut(board.P0_20)
spi = SPI(clock=board.P0_24, MOSI=board.P0_22)
else:
from machine import Pin, SPI
cs_pin = Pin(16)
dc_pin = Pin(4)
rst_pin = Pin(17)
# Baud rate of 40000000 seems about the max
spi = SPI(1, baudrate=40000000, sck=Pin(14), mosi=Pin(13))
# Create the ILI9341 display:
display = Display(spi, dc=dc_pin, cs=cs_pin, rst=rst_pin)
display.clear()
# Draw non-moving circles
display.fill_rectangle(0, 0, 239, 99, color565(27, 72, 156))
display.fill_rectangle(0, 168, 239, 151, color565(220, 27, 72))
# Load Marquee image
display.draw_image('images/Rototron128x26.raw', 56, 120, 128, 26)
# Set up scrolling
display.set_scroll(top=152, bottom=100)
spectrum = list(range(152, 221)) + list(reversed(range(152, 220)))
while True:
for y in spectrum:
display.scroll(y)
sleep(.1)
except KeyboardInterrupt:
display.cleanup()
test()
|
py | 1a532fe0970853cd5d28ae5079fbe15bb480fb76 | # AICToolbox
# Auteur : Robin BARKAS
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QGridLayout, QVBoxLayout, QHBoxLayout, QLineEdit, QLabel, QMessageBox, QGroupBox, QFrame
import utils
# Définit un formulaire, lié à un scénario, permettant de saisir les paramètres de celui-ci
# Ex : nom d'utilisateur, mot de passe...
class FormScenario(QFrame):
def __init__(self, fields):
super().__init__()
self.fields = []
self.setUI(fields)
def setUI(self, fields):
self.hbox = QHBoxLayout()
for field in fields:
lineEdit = utils.LineEditWithPlaceholder(field)
if (field.startswith("Mot de passe")):
lineEdit.setEchoMode(QLineEdit.Password)
self.fields.append(lineEdit)
self.hbox.addWidget(lineEdit)
self.setLayout(self.hbox)
def add_line_edit (self, placeholder = None):
line_edit = QLineEdit()
if not (placeholder is None):
line_edit.setPlaceholderText(placeholder)
def get_values(self):
values = []
for field in self.fields:
values.append(field.text())
return values
|
py | 1a53304cad9310197104ed182a7415ddaa77346a | from regression_tests import *
class Testx64MSVCDetection(Test):
settings = TestSettings(
tool='fileinfo',
input='ackermann.exe'
)
def test_detected_autoit(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output.contains(r'Microsoft Linker \(14\.1\)')
assert self.fileinfo.output.contains(r'MSVC \(15\.0\) Visual Studio 2017')
|
py | 1a5330bd3e3e510d1d25b42e749d586f4d0d9a2a | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_extract_key_phrases_async.py
DESCRIPTION:
This sample demonstrates how to extract key talking points from a batch of documents.
USAGE:
python sample_extract_key_phrases_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
import asyncio
class ExtractKeyPhrasesSampleAsync(object):
endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
async def extract_key_phrases_async(self):
# [START batch_extract_key_phrases_async]
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=TextAnalyticsApiKeyCredential(self.key))
documents = [
"Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.",
"I need to take my cat to the veterinarian.",
"I will travel to South America in the summer.",
]
async with text_analytics_client:
result = await text_analytics_client.extract_key_phrases(documents)
for doc in result:
if not doc.is_error:
print(doc.key_phrases)
if doc.is_error:
print(doc.id, doc.error)
# [END batch_extract_key_phrases_async]
async def main():
sample = ExtractKeyPhrasesSampleAsync()
await sample.extract_key_phrases_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
py | 1a5331d9a854254876a95bd2882f0fc2b1c7cb56 | import os
from indico import IndicoClient, IndicoConfig
# Will connect to https://app.indico.io
client = IndicoClient()
# Environment variables override defaults
os.environ["INDICO_HOST"] = "foo.bar.com"
# Will connect to https://foo.bar.com
client = IndicoClient()
# IndicoConfig will override environment variables and defaults
my_config = IndicoConfig(
host="indico.my-company.com", # Overrides environment variable
api_token_path="../path/to/custom_api_token.txt",
)
# Will connect to https://indico.my-company.com
client = IndicoClient(config=my_config)
|
py | 1a533228de74e37ad23c00de6c4440a7f2b092f9 | #!/usr/bin/env python3
import common_test_lib
import os
import subprocess
import argparse
#######################################################################################
# edit test parameters into these lists to run different workloads
ibof_root = os.path.dirname(os.path.abspath(__file__)) + "/../../../"
#######################################################################################
stdout_type = subprocess.STDOUT
unittest_path = ["src/bio/ubio_error_test", "src/device/unvme/mdts_detach_test"]
def build_ibofos_library_option():
current_path = os.getcwd()
os.chdir(ibof_root)
subprocess.call(["./configure", \
"--with-library-build"])
ret = subprocess.call(["make", "-j4"])
if(ret != 0):
print("\tBuild Failed !!")
exit(-1)
os.chdir(current_path)
def build_and_test(fabric_ip):
current_path = os.getcwd()
for test_path in unittest_path:
common_test_lib.print_start(test_path);
os.chdir(ibof_root)
os.chdir(ibof_root+test_path)
ret = subprocess.call(["make"])
if(ret != 0):
print("\tMake failed for %s" % (test_path))
exit(-1)
test_name = test_path.split('/')[-1]
common_test_lib.kill_and_wait([test_name, "poseidonos", "fio"])
ret = subprocess.call(["./" + test_name, "-a", fabric_ip])
if (ret != 0 and ret != -9): #Sigkill is correct.
print("\tTest failed for %s, ret : %d" % (test_path, ret))
exit(-1)
os.chdir(current_path)
default_target_ip = "10.100.11.9"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='IO Unit Test')
parser.add_argument('-f', '--fabric_ip', default=default_target_ip,\
help='Set target IP, default: ' + default_target_ip)
args = parser.parse_args()
if(args.fabric_ip != None):
default_target_ip = args.fabric_ip
build_ibofos_library_option()
print (default_target_ip)
build_and_test(default_target_ip)
|
py | 1a5333d07fb53d78dcfdcd3c2c4c4050e3afa1d0 | import gevent
import io
import logging
import re
from datetime import datetime
from gevent import Greenlet, sleep
from gtts import gTTS
from .blob import blobs
from .hashtag import HashtagModel
from .twitter import get
from .tweet import TweetModel
logger = logging.getLogger('umahuesla')
class Crawler(Greenlet):
def run(self):
while True:
gevent.joinall([gevent.spawn(self.do_it)])
sleep(120)
def do_it(self):
hashtags = HashtagModel.query().filter(
HashtagModel.active == True # noqa
).all()
for hashtag in hashtags:
kwargs = {
'term': f'#{hashtag.tag} -filter:nativeretweets'
}
res = get(**kwargs)
for item in res:
self.store(item, hashtag.tag)
def store(self, item, hashtag):
tweet = TweetModel.get(item.id_str)
if not tweet:
text = re.sub(r'https?:\/\/.*[\r\n]*', '', item.full_text)
tts = gTTS(text.replace('#', '').replace('@', ''), lang='de')
stream = io.BytesIO()
tts.write_to_fp(stream)
res = blobs.create(stream)
alexa_text = text.replace('#', '<phoneme alphabet="ipa" ph="ˈhæʃtæɡ">#</phoneme>')
video_url = None
if item.media:
for i in item.media:
if i.type == 'video' and i.video_info:
bitrate = 0
for var in i.video_info.get('variants', []):
if var.get('bitrate', 0) > bitrate:
bitrate = var['bitrate']
video_url = var.get('url')
try:
logger.info(f"{item.user.name} - {text}")
tweet = TweetModel(
uid=item.id_str,
update_date=datetime.fromtimestamp(item.created_at_in_seconds),
title_text=item.user.name,
main_text=alexa_text,
stream_id=res,
video_url=video_url,
hashtags=[hashtag]
)
tweet.add_to_session()
tweet.session.flush()
except Exception as e:
logging.error(e.message)
elif hashtag not in tweet.hashtags:
tweet.hashtags.append(hashtag)
tweet.session.flush()
|
py | 1a5336147029c244ab279baf8c7a0d3c47914b05 | import logging
from ..redislist import RedisDropboxIndexList
from .solrupdater import DropboxSolrUpdater
log = logging.getLogger('dropbox')
class DropboxIndexer:
"""
Read all Dropbox entries stored in Redis for a `bearertoken_id` and send them to Solr.
Parameters:
bearertoken_id -- a `models.BearerToken.id`.
access_token -- a `models.BearerToken.access_token`.
"""
def __init__(self, bearertoken_id, access_token):
self.bearertoken_id = bearertoken_id
self.access_token = access_token
def run(self):
redis = RedisDropboxIndexList(self.bearertoken_id)
solr_updater = DropboxSolrUpdater(self.bearertoken_id)
for redis_entry in redis.iterate():
# `redis_entry` is a `RedisDropboxEntry` instance.
# If:
# - `redis_entry.is_del()`: delete the file from Sorl
# - `redis_entry.is_reset()`: delete the entire index from Solr
# - `redis_entry.is_add()`: add the file to Solr (the file has already
# been downloaded locally)
#
# Bear in mind that:
# - entries with `redis_entry.is_add()` are only files (no dirs cause they have
# already been filtered out)
# - entries with `redis_entry.is_del()`: we don't know if they are files or dir
# but we don't care since during indexing we ask Solr to delete: name and name/*
# And a sanity check is run when creating a `RedisDropboxEntry` instance.
if redis_entry.is_del():
log.debug('Solr DEL: {}'.format(redis_entry.remote_path))
solr_updater.delete(redis_entry)
if redis_entry.is_reset():
log.debug('Solr RESET')
solr_updater.reset()
if redis_entry.is_add():
log.debug('Solr ADD: {}'.format(redis_entry.remote_path))
solr_updater.add(redis_entry)
solr_updater.commit() |
py | 1a5337157d6ec1f54cd3274b6778a83b6843671a | from .ifd import *
from .rib import *
|
py | 1a533749aefedac5e8623201df9edf8457e0c039 | from math import sqrt
import pytest
import torch
from torch_geometric.testing import withPackage
from torch_geometric.utils import geodesic_distance
@withPackage('gdist')
@pytest.mark.skip(reason="No way of currently testing this")
def test_geodesic_distance():
pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])
face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t()
out = geodesic_distance(pos, face)
expected = [
[0, 1, 1, sqrt(2)],
[1, 0, sqrt(2), 1],
[1, sqrt(2), 0, 1],
[sqrt(2), 1, 1, 0],
]
assert torch.allclose(out, torch.tensor(expected))
assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1))
out = geodesic_distance(pos, face, norm=False)
expected = [
[0, 2, 2, 2 * sqrt(2)],
[2, 0, 2 * sqrt(2), 2],
[2, 2 * sqrt(2), 0, 2],
[2 * sqrt(2), 2, 2, 0],
]
assert torch.allclose(out, torch.tensor(expected))
src = torch.tensor([0, 0, 0, 0])
dest = torch.tensor([0, 1, 2, 3])
out = geodesic_distance(pos, face, src=src, dest=dest)
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, dest=dest)
expected = [0, 0, 0, 0]
assert torch.allclose(out, torch.Tensor(expected))
|
py | 1a533760163b1dbfd2425419a81a8078bdf9c42f | #!/usr/bin/env python3
"""Evaluate an Equation.
Create a function that evaluates an equation.
Source:
https://edabit.com/challenge/QM6ZgHxvQCDX9Tzoa
"""
def eq(equation: str) -> int:
"""Evaluate the equation."""
code = compile(equation, "<string>", "eval")
return eval(code)
def main():
"""Run sample functions. Do not import."""
assert eq("1+2") == 3
assert eq("6/(9-7)") == 3
assert eq("3+2-4") == 1
assert eq("3*4+1") == 13
assert eq("5*8-4*9") == 4
assert eq("3**7") == 2187
assert eq("(6**3)+3") == 219
print('Passed.')
if __name__ == "__main__":
main() |
py | 1a533766111487df0b515d1f847f051b45c24794 | import unittest
from iching.hex_generator import Hexagram
class TestTrigram(unittest.TestCase):
def test_trigram(self):
first, second, third = Hexagram.trigram()
# Test the generation of trigrams
self.assertLessEqual(first, 1, "Must be less than 2")
self.assertLessEqual(second, 1, "Must be less than 2")
self.assertLessEqual(third, 1, "Must be less than 2")
self.assertIsNotNone(Hexagram.trigram())
class TestHexagram(unittest.TestCase):
def test_hexagram(self):
hex1, hex2 = Hexagram.create_hexagram()
assert tuple(hex1)
assert tuple(hex2)
|
py | 1a53393344fda59b86a3505f690125359cd0c42b | #!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from builtins import object
from typing import Any, List
import click
from openr.cli.commands import decision
from openr.cli.utils.utils import parse_nodes
class DecisionCli(object):
def __init__(self):
self.decision.add_command(PathCli().path)
self.decision.add_command(DecisionAdjCli().adj)
self.decision.add_command(DecisionPrefixesCli().prefixes)
self.decision.add_command(
DecisionRoutesComputedCli().routes, name="routes-computed"
)
self.decision.add_command(
DecisionRoutesUnInstallableCli().routes, name="routes-uninstallable"
)
# for TG backward compatibility. Deprecated.
self.decision.add_command(DecisionRoutesComputedCli().routes, name="routes")
self.decision.add_command(DecisionValidateCli().validate)
@click.group()
@click.pass_context
def decision(ctx): # noqa: B902
""" CLI tool to peek into Decision module. """
pass
class PathCli(object):
@click.command()
@click.option(
"--src", default="", help="source node, " "default will be the current host"
)
@click.option(
"--dst",
default="",
help="destination node or prefix, " "default will be the current host",
)
@click.option("--max-hop", default=256, help="max hop count")
@click.option("--area", default=None, help="area identifier")
@click.pass_obj
def path(cli_opts, src, dst, max_hop, area): # noqa: B902
""" path from src to dst """
decision.PathCmd(cli_opts).run(src, dst, max_hop, area)
class DecisionRoutesComputedCli(object):
@click.command()
@click.option(
"--nodes",
default="",
help="Get routes for a list of nodes. Default will get "
"host's routes. Get routes for all nodes if 'all' is given.",
)
@click.option(
"--prefixes",
"-p",
default="",
multiple=True,
help="Get route for specific IPs or Prefixes.",
)
@click.option(
"--labels",
"-l",
type=click.INT,
multiple=True,
help="Get route for specific labels.",
)
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.pass_obj
def routes(cli_opts, nodes, prefixes, labels, json): # noqa: B902
""" Request the routing table from Decision module """
nodes = parse_nodes(cli_opts, nodes)
decision.DecisionRoutesComputedCmd(cli_opts).run(nodes, prefixes, labels, json)
class DecisionRoutesUnInstallableCli(object):
@click.command()
@click.option(
"--prefixes",
"-p",
default="",
multiple=True,
help="Get route for specific IPs or Prefixes.",
)
@click.option(
"--labels",
"-l",
type=click.INT,
multiple=True,
help="Get route for specific labels.",
)
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.pass_obj
def routes(cli_opts, prefixes, labels, json): # noqa: B902
""" Request un installable routing table of the current host """
decision.DecisionRoutesUnInstallableCmd(cli_opts).run(prefixes, labels, json)
class DecisionPrefixesCli(object):
@click.command()
@click.option(
"--nodes",
default="",
help="Dump prefixes for a list of nodes. Default will dump host's "
"prefixes. Dump prefixes for all nodes if 'all' is given.",
)
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.option("--prefix", "-p", default="", help="Prefix filter. Exact match")
@click.option(
"--client-type",
"-c",
default="",
help="Client type filter. Provide name e.g. loopback, bgp",
)
@click.pass_obj
def prefixes(
cli_opts: Any, # noqa: B902
nodes: List[str],
json: bool,
prefix: str,
client_type: str,
) -> None:
""" show the prefixes from Decision module """
nodes = parse_nodes(cli_opts, nodes)
decision.DecisionPrefixesCmd(cli_opts).run(nodes, json, prefix, client_type)
class DecisionAdjCli(object):
@click.command()
@click.option(
"--nodes",
default="",
help="Dump adjacencies for a list of nodes. Default will dump "
"host's adjs. Dump adjs for all nodes if 'all' is given",
)
@click.option("--bidir/--no-bidir", default=True, help="Only bidir adjacencies")
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.pass_obj
def adj(cli_opts, nodes, bidir, json): # noqa: B902
""" dump the link-state adjacencies from Decision module """
nodes = parse_nodes(cli_opts, nodes)
decision.DecisionAdjCmd(cli_opts).run(nodes, bidir, json)
class DecisionValidateCli(object):
@click.command()
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.option("--area", default=None, help="area identifier")
@click.pass_obj
def validate(cli_opts, json, area): # noqa: B902
"""
Check all prefix & adj dbs in Decision against that in KvStore
If --json is provided, returns database diffs in the following format.
"neighbor_down" is a list of nodes not in the inspected node's dump that were expected,
"neighbor_up" is a list of unexpected nodes in inspected node's dump,
"neighbor_update" is a list of expected nodes whose metadata are unexpected.
{
"neighbor_down": [
{
"new_adj": null,
"old_adj": $inconsistent_node
}
],
"neighbor_up": [
{
"new_adj": $inconsistent_node
"old_adj": null
}
],
"neighbor_update": [
{
"new_adj": $inconsistent_node
"old_adj": $inconsistent_node
}
]
}
"""
return_code = decision.DecisionValidateCmd(cli_opts).run(json, area)
sys.exit(return_code)
|
py | 1a533950635051d8d7d9fb1cf7f3dd0eef80b02b | #
# voter business logic: put commits here
#
import config
from model import Voter
db = config.db
import voter_dao
import user
def insert_voters_array(votation_id, ar):
"""returns number of inserted rows"""
count = 0
for user_name in ar:
u = user.load_user_by_username(user_name)
if u:
n = db.session.query(Voter).filter(Voter.user_id == u.user_id,Voter.votation_id==votation_id).count()
if n == 0:
o = Voter(votation_id = votation_id, user_id = u.user_id, voted = 0)
if voter_dao.insert_dto(o):
count += 1
if count > 0:
db.session.commit()
return count
|
py | 1a533961f31edb614262dcebabd6728c6eba2653 |
import os, pathlib, PIL
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from ResNet18 import ResNet18
from ResNet18V2 import ResNet18V2
from tensorflow.keras.applications.resnet import ResNet50
from tensorflow.keras.applications.resnet import ResNet101
from tensorflow.keras.applications.resnet import ResNet152
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.keras.applications.resnet_v2 import ResNet101V2
from tensorflow.keras.applications.resnet_v2 import ResNet152V2
class ResNet(Model):
def __init__(self, data_shape=(224, 224, 3), resnet_version=1, resnet_layer_number=50, num_classes=1000):
super(ResNet, self).__init__()
weights = None
if num_classes == 1000 and data_shape == (224, 224, 3):
weights = 'imagenet'
self.resnet_version = resnet_version
self.data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip(
"horizontal",
input_shape=data_shape),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
self.rescaling = layers.experimental.preprocessing.Rescaling(1./255)
def preprocess_input(x, data_format=None):
from tensorflow.keras.applications import imagenet_utils
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='tf')
#return x
self.preprocess_input = preprocess_input
if resnet_layer_number == 18:
if resnet_version == 1:
self.resnet = ResNet18(category_num=num_classes)
else:
self.resnet = ResNet18V2(category_num=num_classes)
elif resnet_layer_number == 50:
if resnet_version == 1:
self.resnet = ResNet50(weights=weights, input_shape=data_shape, classes=num_classes)
else:
self.resnet = ResNet50V2(weights=weights, input_shape=data_shape, classes=num_classes)
elif resnet_layer_number == 101:
if resnet_version == 1:
self.resnet = ResNet101(weights=weights, input_shape=data_shape, classes=num_classes)
else:
self.resnet = ResNet101V2(weights=weights, input_shape=data_shape, classes=num_classes)
elif resnet_layer_number == 152:
if resnet_version == 1:
self.resnet = ResNet152(weights=weights, input_shape=data_shape, classes=num_classes)
else:
self.resnet = ResNet152V2(weights=weights, input_shape=data_shape, classes=num_classes)
self.build((None,) + data_shape)
def call(self, x):
x = self.data_augmentation(x)
x = self.rescaling(x)
x = self.preprocess_input(x)
x = tf.keras.applications.mobilenet.preprocess_input(x)
x = self.resnet(x)
return x
class ResNetWork():
def __init__(self, args):
# dataset
train_data_dir = pathlib.Path(args.train_dataset_path)
test_data_dir = pathlib.Path(args.test_dataset_path)
self.image_height = args.image_height
self.image_width = args.image_width
data_shape = (args.image_height, args.image_width, 3)
batch_size = args.batchsize
pretrain_model_path_or_dir = args.pre_train_model_path_dir
# create model
self.model = ResNet(
data_shape = data_shape,
resnet_version=args.resnet_version,
resnet_layer_number=args.resnet_layer_number,
num_classes=args.classes)
if os.path.exists(pretrain_model_path_or_dir):
if args.use_whole_network_model:
dir = pretrain_model_path_or_dir
self.model = keras.models.load_model(dir)
print("Whole network load from {} dir".format(dir))
else:
path = pretrain_model_path_or_dir
self.model.load_weights(path)
print("Network model load from {}".format(path))
# Optimization
self.learning_rate = args.lr
self.epochs = args.epochs
if args.opt_type == 'Adam':
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=args.lr)
elif args.opt_type == 'SGD':
self.optimizer = tf.keras.optimizers.SGD(
learning_rate=args.lr,
momentum=args.momentum)
elif args.opt_type == 'Adadelta':
self.optimizer = tf.keras.optimizers.Adadelta(
learning_rate=args.lr)
elif args.opt_type == 'Adamax':
self.optimizer = tf.keras.optimizers.Adamax(
learning_rate=args.lr)
elif args.opt_type == 'Ftrl':
self.optimizer = tf.keras.optimizers.Ftrl(
learning_rate=args.lr)
elif args.opt_type == 'Nadam':
self.optimizer = tf.keras.optimizers.Nadam(
learning_rate=args.lr)
else:
self.optimizer = tf.keras.optimizers.RMSprop(
learning_rate=args.lr)
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# get the data set
train_image_count = 0
train_image_count += len(list(train_data_dir.glob('*/*.jpg')))
train_image_count += len(list(train_data_dir.glob('*/*.JPEG')))
print("train image number:", train_image_count)
test_image_count = 0
test_image_count += len(list(test_data_dir.glob('*/*.jpg')))
test_image_count += len(list(test_data_dir.glob('*/*.JPEG')))
print("Test image number:", test_image_count)
# train dataset
self.train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_data_dir,
#subset="training",
seed=123,
image_size=(args.image_height, args.image_width),
batch_size=batch_size)
self.class_names = self.train_ds.class_names
self.train_ds = self.train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# valid/test dataset
self.test_ds = tf.keras.preprocessing.image_dataset_from_directory(
test_data_dir,
#subset="validation",
seed=123,
image_size=(args.image_height, args.image_width),
batch_size=batch_size)
self.test_ds = self.test_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.test_loss = tf.keras.metrics.Mean(name='valid_loss')
self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='vaild_accuracy')
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model(images)
loss = self.loss_object(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
self.train_loss(loss)
self.train_accuracy(labels, predictions)
# [end train_step]
@tf.function
def test_step(self, images, labels):
predictions = self.model(images)
t_loss = self.loss_object(labels, predictions)
self.test_loss(t_loss)
self.test_accuracy(labels, predictions)
# [end test_step]
def train(self):
# Model summary
self.model.summary()
for epoch in range(self.epochs):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
try:
with tqdm(self.train_ds, ncols=80) as t:
for images, labels in t:
self.train_step(images, labels)
template = '[Train\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'
template = template.format(epoch+1, self.train_loss.result(), self.train_accuracy.result()*100)
t.set_description(desc=template)
except KeyboardInterrupt:
t.close()
raise
try:
with tqdm(self.test_ds, ncols=80) as t:
for test_images, test_labels in t:
self.test_step(test_images, test_labels)
template = '[Test\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'
template = template.format(epoch+1, self.test_loss.result(), self.test_accuracy.result()*100)
t.set_description(desc=template)
except KeyboardInterrupt:
t.close()
raise
# [end train]
def saveModel(self, path_or_dir, mode='save_weight'):
if mode == 'save_weight':
path = path_or_dir
self.model.save_weights(path)
print("Network model save to {}".format(path))
elif mode == 'whole_network':
dir = path_or_dir
self.model.save(dir)
print("Whole network save to {} dir".format(dir))
# [end saveModel]
def test(self, args):
if not os.path.exists(args.test_image):
return
image_path = args.test_image
img = keras.preprocessing.image.load_img(
image_path, target_size=(
args.image_height,
args.image_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = self.model.predict(img_array)
score = tf.nn.softmax(predictions[0])
import numpy as np
print("{} most likely belongs to {} with a {:.2f} percent confidence.".format(image_path, self.class_names[np.argmax(score)], 100 * np.max(score)))
# [end test]
|
py | 1a533a9cf9ea9c0f8ba1fda83f830c81d85c5194 | ###########
# testing #
###########
from exhibitionist.isubscriber import ISubscriber
from exhibitionist.pubsubdispatch import PubSubDispatch
import unittest
import time
import threading
class IOLoopMock(object):
def add_callback(self, callback):
# import random
# time.sleep(random.random()*0.05)
callback()
pass
def running(self):
return True
class Testpubsubdispatch(unittest.TestCase):
def setUp(self):
self.pubsubdispatch = PubSubDispatch(IOLoopMock())
def tearDown(self):
pass
@staticmethod
def wait_for_predicate(pred, timeout, interval=None):
interval = interval or timeout
waited = 0
while waited <= timeout:
if pred():
return True
time.sleep(interval)
waited += interval
return False
def test_message_rx_tx(self):
l = []
class A(ISubscriber):
def notify(self,channel, payload):
l.append(payload)
self.pubsubdispatch.subscribe(A(), "ch1")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch1", payload="the payload")
self.wait_for_predicate(lambda: len(l), 1, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload")
# and again
self.pubsubdispatch.publish(channel="ch1", payload="the payload2")
self.wait_for_predicate(lambda: len(l), 1, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload2")
# two receivers
self.assertEqual(len(l), 0)
self.pubsubdispatch.subscribe(A(), "ch1")
self.pubsubdispatch.publish(channel="ch1", payload="the payload3")
self.wait_for_predicate(lambda: len(l) >= 2, 1, 0.001)
self.assertEqual(len(l), 2)
self.assertEqual(l.pop(), "the payload3")
self.assertEqual(l.pop(), "the payload3")
# just the registered channels get the messages for a channel
self.assertEqual(len(l), 0)
self.pubsubdispatch.subscribe(A(), "ch2")
self.pubsubdispatch.publish(channel="ch1", payload="the payload4")
self.wait_for_predicate(lambda: len(l) >= 2, 1, 0.001)
self.assertEqual(len(l), 2)
self.assertEqual(l.pop(), "the payload4")
self.assertEqual(l.pop(), "the payload4")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch2", payload="the payload5")
self.wait_for_predicate(lambda: len(l) >= 1, 1, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload5")
def test_make_sure_we_dont_recieve_our_own_message(self):
l = []
class A(ISubscriber):
def notify(self,channel, payload):
l.append(payload)
a=A()
#do
self.pubsubdispatch.subscribe(a, "ch1")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch1", payload="the payload")
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload")
#don't
self.pubsubdispatch.publish(channel="ch1", payload="the payload",exclude=a)
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 0)
def test_make_sure_we_dont_recieve_our_own_message_multiple_subs(self):
# make sure the other subscriber does get it, no matter the subscribe order
l = []
class A(ISubscriber):
def notify(self,channel, payload):
l.append(self)
a=A()
b=A()
#do
self.pubsubdispatch.subscribe(a, "ch1")
self.pubsubdispatch.subscribe(b, "ch1")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch1", payload="the payload",exclude=a)
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), b)
self.pubsubdispatch.publish(channel="ch1", payload="the payload",exclude=b)
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), a) |
py | 1a533acc1dd444c775c0ab8e93b1b20309936ef5 | with open('python.txt') as file_object:
contents = file_object.read()
print(contents.replace('python','java')) #use the replace() method to replace any word in astring with a different word.
#print(contents)
#Reading line by line.
filename = 'python.txt'
with open(filename) as object:
for line in object:
print(line.rstrip())
#Making a list of lines from a file
filename = 'python.txt'
with open (filename) as object:
lines = object.readlines()
"""counting total letters"""
string = ' '
for line in lines:
string += line.strip()
print(line.rstrip())
print(len(line))
|
py | 1a533b0a0070caecf4438554293f03f810e5bc72 | import cv2
import numpy as np
cap = cv2.VideoCapture('video.mp4')
while(1):
# Take each frame
frame = cap.read()
print(frame)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
print(hsv)
# define range of blue color in HSV
lower_blue = np.array([110, 50, 50])
upper_blue = np.array([130, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
py | 1a533ba621531acafe24a60fa70f31697b7a676e | from __future__ import with_statement, absolute_import
import time
from contextlib import closing
import psycopg2
from . import print_row_progress, status_logger
from .postgres_writer import PostgresWriter
class PostgresDbWriter(PostgresWriter):
"""Class used to stream DDL and/or data
from a MySQL server to a PostgreSQL.
:Parameters:
- `db_options`: :py:obj:`dict` containing connection specific variables
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
class FileObjFaker(object):
"""A file-like class to support streaming
table data directly to :py:meth:`pscopg2.copy_from`.
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `data`:
- `processor`:
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
def __init__(self, table, data, processor, verbose=False):
self.data = iter(data)
self.table = table
self.processor = processor
self.verbose = verbose
if verbose:
self.idx = 1
self.start_time = time.time()
self.prev_val_len = 0
self.prev_idx = 0
def readline(self, *args, **kwargs):
try:
row = list(self.data.next())
except StopIteration:
if self.verbose:
print('')
return ''
else:
self.processor(self.table, row)
try:
return '%s\n' % ('\t'.join(row))
except UnicodeDecodeError:
return '%s\n' % ('\t'.join(r.decode('utf8') for r in row))
finally:
if self.verbose:
if (self.idx % 20000) == 0:
now = time.time()
elapsed = now - self.start_time
val = '%.2f rows/sec [%s] ' % ((self.idx - self.prev_idx) / elapsed, self.idx)
print_row_progress('%s%s' % (("\b" * self.prev_val_len), val)),
self.prev_val_len = len(val) + 3
self.start_time = now
self.prev_idx = self.idx + 0
self.idx += 1
def read(self, *args, **kwargs):
return self.readline(*args, **kwargs)
def __init__(self, db_options, verbose=False, *args, **kwargs):
super(PostgresDbWriter, self).__init__(*args, **kwargs)
self.execute_error_log = ''
self.verbose = verbose
self.db_options = {
'host': str(db_options['hostname']),
'port': db_options.get('port', 5432),
'database': str(db_options['database']),
'password': str(db_options.get('password', None)) or '',
'user': str(db_options['username']),
}
if ':' in str(db_options['database']):
self.db_options['database'], self.schema = self.db_options['database'].split(':')
else:
self.schema = None
self.open()
def open(self):
self.conn = psycopg2.connect(**self.db_options)
with closing(self.conn.cursor()) as cur:
if self.schema:
cur.execute('SET search_path TO %s' % self.schema)
cur.execute('SET client_encoding = \'UTF8\'')
if self.conn.server_version >= 80200:
cur.execute('SET standard_conforming_strings = off')
cur.execute('SET check_function_bodies = false')
cur.execute('SET client_min_messages = warning')
def query(self, sql, args=(), one=False):
with closing(self.conn.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone() if one else cur
def execute(self, sql, args=(), many=False):
with closing(self.conn.cursor()) as cur:
try:
if many:
cur.executemany(sql, args)
else:
cur.execute(sql, args)
except Exception as e:
self.execute_error_log += '\n######POSTGRES SCRIPTS:######\n '+sql+'\n######ERROR:######\n '+str(e)
print('ERROR: '+str(e))
self.conn.commit()
def copy_from(self, file_obj, table_name, columns):
with closing(self.conn.cursor()) as cur:
cur.copy_from(file_obj,
table=table_name,
columns=columns
)
self.conn.commit()
def close(self):
"""Closes connection to the PostgreSQL server"""
self.conn.close()
def exists(self, relname):
rc = self.query('SELECT COUNT(!) FROM pg_class WHERE relname = %s', (relname, ), one=True)
return rc and int(rc[0]) == 1
@status_logger
def truncate(self, table):
"""Send DDL to truncate the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
truncate_sql, serial_key_sql = super(PostgresDbWriter, self).truncate(table)
self.execute(truncate_sql)
if serial_key_sql:
self.execute(serial_key_sql)
@status_logger
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql, table_comment_sql = super(PostgresDbWriter, self).write_table(table)
for sql in serial_key_sql + table_sql:
self.execute(sql)
"""Execute comment with the error encoding(sometimes):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe7 in position 94: ordinal not in range(128)
"""
for sql in table_comment_sql:
self.execute(sql)
@status_logger
def write_indexes(self, table):
"""Send DDL to create the specified `table` indexes
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_indexes(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_triggers(self, table):
"""Send DDL to create the specified `table` triggers
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_triggers(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_constraints(self, table):
"""Send DDL to create the specified `table` constraints
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
constraint_sql = super(PostgresDbWriter, self).write_constraints(table)
for sql in constraint_sql:
self.execute(sql)
@status_logger
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
|
py | 1a533db63c060d155cc9e9abd5348fbeb648d553 | """Test MQTT fans."""
import copy
from unittest.mock import patch
import pytest
from voluptuous.error import MultipleInvalid
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
NotValidPresetModeError,
)
from homeassistant.components.mqtt.fan import (
CONF_OSCILLATION_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_PERCENTAGE_COMMAND_TOPIC,
CONF_PERCENTAGE_STATE_TOPIC,
CONF_PRESET_MODE_COMMAND_TOPIC,
CONF_PRESET_MODE_STATE_TOPIC,
MQTT_FAN_ATTRIBUTES_BLOCKED,
)
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_reloadable_late,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_setup_manual_entity_from_yaml,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.fan import common
DEFAULT_CONFIG = {
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
}
}
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if command fails with command topic."""
assert await async_setup_component(
hass, fan.DOMAIN, {fan.DOMAIN: {"platform": "mqtt", "name": "test"}}
)
await hass.async_block_till_done()
assert hass.states.get("fan.test") is None
async def test_controlling_state_via_topic(hass, mqtt_mock, caplog):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_off": "StAtE_OfF",
"payload_on": "StAtE_On",
"oscillation_state_topic": "oscillation-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"payload_oscillation_off": "OsC_OfF",
"payload_oscillation_on": "OsC_On",
"percentage_state_topic": "percentage-state-topic",
"percentage_command_topic": "percentage-command-topic",
"preset_mode_state_topic": "preset-mode-state-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": [
"auto",
"smart",
"whoosh",
"eco",
"breeze",
"silent",
],
"speed_range_min": 1,
"speed_range_max": 200,
"payload_reset_percentage": "rEset_percentage",
"payload_reset_preset_mode": "rEset_preset_mode",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "StAtE_On")
state = hass.states.get("fan.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "StAtE_OfF")
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get("oscillating") is False
async_fire_mqtt_message(hass, "oscillation-state-topic", "OsC_On")
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is True
async_fire_mqtt_message(hass, "oscillation-state-topic", "OsC_OfF")
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is False
assert state.attributes.get("percentage_step") == 1.0
async_fire_mqtt_message(hass, "percentage-state-topic", "0")
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 0
async_fire_mqtt_message(hass, "percentage-state-topic", "50")
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 25
async_fire_mqtt_message(hass, "percentage-state-topic", "100")
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 50
async_fire_mqtt_message(hass, "percentage-state-topic", "200")
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
async_fire_mqtt_message(hass, "percentage-state-topic", "202")
assert "not a valid speed within the speed range" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "percentage-state-topic", "invalid")
assert "not a valid speed within the speed range" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "preset-mode-state-topic", "low")
assert "not a valid preset mode" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "preset-mode-state-topic", "auto")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "auto"
async_fire_mqtt_message(hass, "preset-mode-state-topic", "eco")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "eco"
async_fire_mqtt_message(hass, "preset-mode-state-topic", "silent")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "silent"
async_fire_mqtt_message(hass, "preset-mode-state-topic", "rEset_preset_mode")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") is None
async_fire_mqtt_message(hass, "preset-mode-state-topic", "ModeUnknown")
assert "not a valid preset mode" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "percentage-state-topic", "rEset_percentage")
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) is None
async_fire_mqtt_message(hass, "state-topic", "None")
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
async def test_controlling_state_via_topic_with_different_speed_range(
hass, mqtt_mock, caplog
):
"""Test the controlling state via topic using an alternate speed range."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: [
{
"platform": "mqtt",
"name": "test1",
"command_topic": "command-topic",
"percentage_state_topic": "percentage-state-topic1",
"percentage_command_topic": "percentage-command-topic1",
"speed_range_min": 1,
"speed_range_max": 100,
},
{
"platform": "mqtt",
"name": "test2",
"command_topic": "command-topic",
"percentage_state_topic": "percentage-state-topic2",
"percentage_command_topic": "percentage-command-topic2",
"speed_range_min": 1,
"speed_range_max": 200,
},
{
"platform": "mqtt",
"name": "test3",
"command_topic": "command-topic",
"percentage_state_topic": "percentage-state-topic3",
"percentage_command_topic": "percentage-command-topic3",
"speed_range_min": 81,
"speed_range_max": 1023,
},
]
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "percentage-state-topic1", "100")
state = hass.states.get("fan.test1")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
async_fire_mqtt_message(hass, "percentage-state-topic2", "100")
state = hass.states.get("fan.test2")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 50
async_fire_mqtt_message(hass, "percentage-state-topic3", "1023")
state = hass.states.get("fan.test3")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
async_fire_mqtt_message(hass, "percentage-state-topic3", "80")
state = hass.states.get("fan.test3")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 0
state = hass.states.get("fan.test3")
async_fire_mqtt_message(hass, "percentage-state-topic3", "79")
assert "not a valid speed within the speed range" in caplog.text
caplog.clear()
async def test_controlling_state_via_topic_no_percentage_topics(
hass, mqtt_mock, caplog
):
"""Test the controlling state via topic without percentage topics."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"preset_mode_state_topic": "preset-mode-state-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": [
"auto",
"smart",
"whoosh",
"eco",
"breeze",
],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "preset-mode-state-topic", "smart")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "smart"
assert state.attributes.get(fan.ATTR_PERCENTAGE) is None
async_fire_mqtt_message(hass, "preset-mode-state-topic", "auto")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "auto"
assert state.attributes.get(fan.ATTR_PERCENTAGE) is None
async_fire_mqtt_message(hass, "preset-mode-state-topic", "whoosh")
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "whoosh"
assert state.attributes.get(fan.ATTR_PERCENTAGE) is None
async_fire_mqtt_message(hass, "preset-mode-state-topic", "medium")
assert "not a valid preset mode" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "preset-mode-state-topic", "low")
assert "not a valid preset mode" in caplog.text
caplog.clear()
async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock, caplog):
"""Test the controlling state via topic and JSON message (percentage mode)."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"oscillation_state_topic": "oscillation-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"percentage_state_topic": "percentage-state-topic",
"percentage_command_topic": "percentage-command-topic",
"preset_mode_state_topic": "preset-mode-state-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": [
"auto",
"smart",
"whoosh",
"eco",
"breeze",
"silent",
],
"state_value_template": "{{ value_json.val }}",
"oscillation_value_template": "{{ value_json.val }}",
"percentage_value_template": "{{ value_json.val }}",
"preset_mode_value_template": "{{ value_json.val }}",
"speed_range_min": 1,
"speed_range_max": 100,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", '{"val":"ON"}')
state = hass.states.get("fan.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", '{"val": null}')
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", '{"val":"OFF"}')
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get("oscillating") is False
async_fire_mqtt_message(hass, "oscillation-state-topic", '{"val":"oscillate_on"}')
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is True
async_fire_mqtt_message(hass, "oscillation-state-topic", '{"val":"oscillate_off"}')
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is False
async_fire_mqtt_message(hass, "percentage-state-topic", '{"val": 1}')
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 1
async_fire_mqtt_message(hass, "percentage-state-topic", '{"val": 100}')
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
async_fire_mqtt_message(hass, "percentage-state-topic", '{"val": "None"}')
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) is None
async_fire_mqtt_message(hass, "percentage-state-topic", '{"otherval": 100}')
assert "Ignoring empty speed from" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "preset-mode-state-topic", '{"val": "low"}')
assert "not a valid preset mode" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "preset-mode-state-topic", '{"val": "auto"}')
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "auto"
async_fire_mqtt_message(hass, "preset-mode-state-topic", '{"val": "breeze"}')
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "breeze"
async_fire_mqtt_message(hass, "preset-mode-state-topic", '{"val": "silent"}')
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") == "silent"
async_fire_mqtt_message(hass, "preset-mode-state-topic", '{"val": "None"}')
state = hass.states.get("fan.test")
assert state.attributes.get("preset_mode") is None
async_fire_mqtt_message(hass, "preset-mode-state-topic", '{"otherval": 100}')
assert "Ignoring empty preset_mode from" in caplog.text
caplog.clear()
async def test_controlling_state_via_topic_and_json_message_shared_topic(
hass, mqtt_mock, caplog
):
"""Test the controlling state via topic and JSON message using a shared topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "shared-state-topic",
"command_topic": "command-topic",
"oscillation_state_topic": "shared-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"percentage_state_topic": "shared-state-topic",
"percentage_command_topic": "percentage-command-topic",
"preset_mode_state_topic": "shared-state-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": [
"auto",
"smart",
"whoosh",
"eco",
"breeze",
"silent",
],
"state_value_template": "{{ value_json.state }}",
"oscillation_value_template": "{{ value_json.oscillation }}",
"percentage_value_template": "{{ value_json.percentage }}",
"preset_mode_value_template": "{{ value_json.preset_mode }}",
"speed_range_min": 1,
"speed_range_max": 100,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(
hass,
"shared-state-topic",
'{"state":"ON","preset_mode":"eco","oscillation":"oscillate_on","percentage": 50}',
)
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get("oscillating") is True
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 50
assert state.attributes.get("preset_mode") == "eco"
async_fire_mqtt_message(
hass,
"shared-state-topic",
'{"state":"ON","preset_mode":"auto","oscillation":"oscillate_off","percentage": 10}',
)
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get("oscillating") is False
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 10
assert state.attributes.get("preset_mode") == "auto"
async_fire_mqtt_message(
hass,
"shared-state-topic",
'{"state":"OFF","preset_mode":"auto","oscillation":"oscillate_off","percentage": 0}',
)
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get("oscillating") is False
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 0
assert state.attributes.get("preset_mode") == "auto"
async_fire_mqtt_message(
hass,
"shared-state-topic",
'{"percentage": 100}',
)
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
assert state.attributes.get("preset_mode") == "auto"
assert "Ignoring empty preset_mode from" in caplog.text
assert "Ignoring empty state from" in caplog.text
assert "Ignoring empty oscillation from" in caplog.text
caplog.clear()
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock, caplog):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_off": "StAtE_OfF",
"payload_on": "StAtE_On",
"oscillation_command_topic": "oscillation-command-topic",
"payload_oscillation_off": "OsC_OfF",
"payload_oscillation_on": "OsC_On",
"percentage_command_topic": "percentage-command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": [
"whoosh",
"breeze",
"silent",
],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "StAtE_On", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "StAtE_OfF", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", True)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "OsC_On", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", False)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "OsC_OfF", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", -1)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", 101)
await common.async_set_percentage(hass, "fan.test", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 0
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "low")
await common.async_set_preset_mode(hass, "fan.test", "whoosh")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "whoosh"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "breeze")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "breeze", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "breeze"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "silent")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "silent"
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_with_alternate_speed_range(hass, mqtt_mock):
"""Test the controlling state via topic using an alternate speed range."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: [
{
"platform": "mqtt",
"name": "test1",
"command_topic": "command-topic",
"percentage_state_topic": "percentage-state-topic1",
"percentage_command_topic": "percentage-command-topic1",
"speed_range_min": 1,
"speed_range_max": 3,
},
{
"platform": "mqtt",
"name": "test2",
"command_topic": "command-topic",
"percentage_state_topic": "percentage-state-topic2",
"percentage_command_topic": "percentage-command-topic2",
"speed_range_min": 1,
"speed_range_max": 200,
},
{
"platform": "mqtt",
"name": "test3",
"command_topic": "command-topic",
"percentage_state_topic": "percentage-state-topic3",
"percentage_command_topic": "percentage-command-topic3",
"speed_range_min": 81,
"speed_range_max": 1023,
},
]
},
)
await hass.async_block_till_done()
await common.async_set_percentage(hass, "fan.test1", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic1", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test1")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test1", 33)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic1", "1", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test1")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test1", 66)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic1", "2", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test1")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test1", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic1", "3", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test1")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test2", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic2", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test2")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test2", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic2", "200", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test2")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test3", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic3", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test3")
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test3", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic3", "1023", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test3")
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_and_optimistic_no_legacy(hass, mqtt_mock, caplog):
"""Test optimistic mode without state topic without legacy speed command topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"percentage_command_topic": "percentage-command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": [
"whoosh",
"breeze",
"silent",
],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "ON", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", -1)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", 101)
await common.async_set_percentage(hass, "fan.test", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 0
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "low")
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "auto")
await common.async_set_preset_mode(hass, "fan.test", "whoosh")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "whoosh"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "breeze")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "breeze", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "breeze"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "silent")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "silent"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", percentage=25)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call("percentage-command-topic", "25", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_any_call("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", preset_mode="whoosh")
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call(
"preset-mode-command-topic", "whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_turn_on(hass, "fan.test", preset_mode="freaking-high")
async def test_sending_mqtt_command_templates_(hass, mqtt_mock, caplog):
"""Test optimistic mode without state topic without legacy speed command topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"command_template": "state: {{ value }}",
"oscillation_command_topic": "oscillation-command-topic",
"oscillation_command_template": "oscillation: {{ value }}",
"percentage_command_topic": "percentage-command-topic",
"percentage_command_template": "percentage: {{ value }}",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_mode_command_template": "preset_mode: {{ value }}",
"preset_modes": [
"whoosh",
"breeze",
"silent",
],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "state: ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "state: OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", -1)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", 101)
await common.async_set_percentage(hass, "fan.test", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "percentage: 100", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 100
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "percentage: 0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PERCENTAGE) == 0
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "low")
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "medium")
await common.async_set_preset_mode(hass, "fan.test", "whoosh")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "preset_mode: whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "whoosh"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "breeze")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "preset_mode: breeze", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "breeze"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "silent")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "preset_mode: silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) == "silent"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", percentage=25)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "state: ON", 0, False)
mqtt_mock.async_publish.assert_any_call(
"percentage-command-topic", "percentage: 25", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_any_call("command-topic", "state: OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", preset_mode="whoosh")
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "state: ON", 0, False)
mqtt_mock.async_publish.assert_any_call(
"preset-mode-command-topic", "preset_mode: whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_turn_on(hass, "fan.test", preset_mode="low")
async def test_sending_mqtt_commands_and_optimistic_no_percentage_topic(
hass, mqtt_mock, caplog
):
"""Test optimistic mode without state topic without percentage command topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_mode_state_topic": "preset-mode-state-topic",
"preset_modes": [
"whoosh",
"breeze",
"silent",
"high",
],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "medium")
await common.async_set_preset_mode(hass, "fan.test", "whoosh")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) is None
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "breeze")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "breeze", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) is None
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "silent")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.attributes.get(fan.ATTR_PRESET_MODE) is None
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_and_explicit_optimistic(hass, mqtt_mock, caplog):
"""Test optimistic mode with state topic and turn on attributes."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"oscillation_state_topic": "oscillation-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"percentage_state_topic": "percentage-state-topic",
"percentage_command_topic": "percentage-command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_mode_state_topic": "preset-mode-state-topic",
"preset_modes": [
"whoosh",
"breeze",
"silent",
],
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "ON", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", percentage=25)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call("percentage-command-topic", "25", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_any_call("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_turn_on(hass, "fan.test", preset_mode="auto")
assert mqtt_mock.async_publish.call_count == 1
# We can turn on, but the invalid preset mode will raise
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "fan.test", preset_mode="whoosh")
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call(
"preset-mode-command-topic", "whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_any_call("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", preset_mode="silent")
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call(
"preset-mode-command-topic", "silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", preset_mode="silent")
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call(
"preset-mode-command-topic", "silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", True)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "oscillate_on", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", percentage=50)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call("percentage-command-topic", "50", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_any_call("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", False)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "oscillate_off", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 33)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "33", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 50)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "50", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 100)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_percentage(hass, "fan.test", 0)
mqtt_mock.async_publish.assert_called_once_with(
"percentage-command-topic", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(MultipleInvalid):
await common.async_set_percentage(hass, "fan.test", 101)
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "low")
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "medium")
await common.async_set_preset_mode(hass, "fan.test", "whoosh")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "whoosh", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_preset_mode(hass, "fan.test", "silent")
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-command-topic", "silent", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
with pytest.raises(NotValidPresetModeError):
await common.async_set_preset_mode(hass, "fan.test", "freaking-high")
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
@pytest.mark.parametrize(
"topic,value,attribute,attribute_value",
[
("state_topic", "ON", None, "on"),
(CONF_PRESET_MODE_STATE_TOPIC, "auto", ATTR_PRESET_MODE, "auto"),
(CONF_PERCENTAGE_STATE_TOPIC, "60", ATTR_PERCENTAGE, 60),
(
CONF_OSCILLATION_STATE_TOPIC,
"oscillate_on",
ATTR_OSCILLATING,
True,
),
],
)
async def test_encoding_subscribable_topics(
hass, mqtt_mock, caplog, topic, value, attribute, attribute_value
):
"""Test handling of incoming encoded payload."""
config = copy.deepcopy(DEFAULT_CONFIG[fan.DOMAIN])
config[ATTR_PRESET_MODES] = ["eco", "auto"]
config[CONF_PRESET_MODE_COMMAND_TOPIC] = "fan/some_preset_mode_command_topic"
config[CONF_PERCENTAGE_COMMAND_TOPIC] = "fan/some_percentage_command_topic"
config[CONF_OSCILLATION_COMMAND_TOPIC] = "fan/some_oscillation_command_topic"
await help_test_encoding_subscribable_topics(
hass,
mqtt_mock,
caplog,
fan.DOMAIN,
config,
topic,
value,
attribute,
attribute_value,
)
async def test_attributes(hass, mqtt_mock, caplog):
"""Test attributes."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"percentage_command_topic": "percentage-command-topic",
"preset_modes": [
"breeze",
"silent",
],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state == STATE_UNKNOWN
await common.async_turn_on(hass, "fan.test")
state = hass.states.get("fan.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_OSCILLATING) is None
await common.async_turn_off(hass, "fan.test")
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_OSCILLATING) is None
await common.async_oscillate(hass, "fan.test", True)
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_OSCILLATING) is True
await common.async_oscillate(hass, "fan.test", False)
state = hass.states.get("fan.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
async def test_supported_features(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: [
{
"platform": "mqtt",
"name": "test1",
"command_topic": "command-topic",
},
{
"platform": "mqtt",
"name": "test2",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
},
{
"platform": "mqtt",
"name": "test3b",
"command_topic": "command-topic",
"percentage_command_topic": "percentage-command-topic",
},
{
"platform": "mqtt",
"name": "test3c1",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
},
{
"platform": "mqtt",
"name": "test3c2",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["eco", "auto"],
},
{
"platform": "mqtt",
"name": "test3c3",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["eco", "smart", "auto"],
},
{
"platform": "mqtt",
"name": "test4pcta",
"command_topic": "command-topic",
"percentage_command_topic": "percentage-command-topic",
},
{
"platform": "mqtt",
"name": "test4pctb",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"percentage_command_topic": "percentage-command-topic",
},
{
"platform": "mqtt",
"name": "test5pr_ma",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["Mode1", "Mode2", "Mode3"],
},
{
"platform": "mqtt",
"name": "test5pr_mb",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["whoosh", "silent", "auto"],
},
{
"platform": "mqtt",
"name": "test5pr_mc",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["Mode1", "Mode2", "Mode3"],
},
{
"platform": "mqtt",
"name": "test6spd_range_a",
"command_topic": "command-topic",
"percentage_command_topic": "percentage-command-topic",
"speed_range_min": 1,
"speed_range_max": 40,
},
{
"platform": "mqtt",
"name": "test6spd_range_b",
"command_topic": "command-topic",
"percentage_command_topic": "percentage-command-topic",
"speed_range_min": 50,
"speed_range_max": 40,
},
{
"platform": "mqtt",
"name": "test6spd_range_c",
"command_topic": "command-topic",
"percentage_command_topic": "percentage-command-topic",
"speed_range_min": 0,
"speed_range_max": 40,
},
{
"platform": "mqtt",
"name": "test7reset_payload_in_preset_modes_a",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["auto", "smart", "normal", "None"],
},
{
"platform": "mqtt",
"name": "test7reset_payload_in_preset_modes_b",
"command_topic": "command-topic",
"preset_mode_command_topic": "preset-mode-command-topic",
"preset_modes": ["whoosh", "silent", "auto", "None"],
"payload_reset_preset_mode": "normal",
},
]
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test1")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 0
state = hass.states.get("fan.test2")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_OSCILLATE
state = hass.states.get("fan.test3b")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_SET_SPEED
state = hass.states.get("fan.test3c1")
assert state is None
state = hass.states.get("fan.test3c2")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_PRESET_MODE
state = hass.states.get("fan.test3c3")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_PRESET_MODE
state = hass.states.get("fan.test4pcta")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_SET_SPEED
state = hass.states.get("fan.test4pctb")
assert (
state.attributes.get(ATTR_SUPPORTED_FEATURES)
== fan.SUPPORT_OSCILLATE | fan.SUPPORT_SET_SPEED
)
state = hass.states.get("fan.test5pr_ma")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_PRESET_MODE
state = hass.states.get("fan.test5pr_mb")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_PRESET_MODE
state = hass.states.get("fan.test5pr_mc")
assert (
state.attributes.get(ATTR_SUPPORTED_FEATURES)
== fan.SUPPORT_OSCILLATE | fan.SUPPORT_PRESET_MODE
)
state = hass.states.get("fan.test6spd_range_a")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_SET_SPEED
assert state.attributes.get("percentage_step") == 2.5
state = hass.states.get("fan.test6spd_range_b")
assert state is None
state = hass.states.get("fan.test6spd_range_c")
assert state is None
state = hass.states.get("fan.test7reset_payload_in_preset_modes_a")
assert state is None
state = hass.states.get("fan.test7reset_payload_in_preset_modes_b")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_PRESET_MODE
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG, True, "state-topic", "1"
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG, True, "state-topic", "1"
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG, MQTT_FAN_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique_id option only creates one fan per id."""
config = {
fan.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, fan.DOMAIN, config)
async def test_discovery_removal_fan(hass, mqtt_mock, caplog):
"""Test removal of discovered fan."""
data = '{ "name": "test", "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, fan.DOMAIN, data)
async def test_discovery_update_fan(hass, mqtt_mock, caplog):
"""Test update of discovered fan."""
config1 = {"name": "Beer", "command_topic": "test_topic"}
config2 = {"name": "Milk", "command_topic": "test_topic"}
await help_test_discovery_update(
hass, mqtt_mock, caplog, fan.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_fan(hass, mqtt_mock, caplog):
"""Test update of discovered fan."""
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
with patch(
"homeassistant.components.mqtt.fan.MqttFan.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, fan.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
await help_test_discovery_broken(hass, mqtt_mock, caplog, fan.DOMAIN, data1, data2)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT fan device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT fan device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG, fan.SERVICE_TURN_ON
)
@pytest.mark.parametrize(
"service,topic,parameters,payload,template",
[
(
fan.SERVICE_TURN_ON,
"command_topic",
None,
"ON",
None,
),
(
fan.SERVICE_TURN_OFF,
"command_topic",
None,
"OFF",
None,
),
(
fan.SERVICE_SET_PRESET_MODE,
"preset_mode_command_topic",
{fan.ATTR_PRESET_MODE: "eco"},
"eco",
"preset_mode_command_template",
),
(
fan.SERVICE_SET_PERCENTAGE,
"percentage_command_topic",
{fan.ATTR_PERCENTAGE: "45"},
45,
"percentage_command_template",
),
(
fan.SERVICE_OSCILLATE,
"oscillation_command_topic",
{fan.ATTR_OSCILLATING: "on"},
"oscillate_on",
"oscillation_command_template",
),
],
)
async def test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
service,
topic,
parameters,
payload,
template,
):
"""Test publishing MQTT payload with different encoding."""
domain = fan.DOMAIN
config = copy.deepcopy(DEFAULT_CONFIG[domain])
if topic == "preset_mode_command_topic":
config["preset_modes"] = ["auto", "eco"]
await help_test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
domain,
config,
service,
topic,
parameters,
payload,
template,
)
async def test_reloadable(hass, mqtt_mock, caplog, tmp_path):
"""Test reloading the MQTT platform."""
domain = fan.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config)
async def test_reloadable_late(hass, mqtt_client_mock, caplog, tmp_path):
"""Test reloading the MQTT platform with late entry setup."""
domain = fan.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable_late(hass, caplog, tmp_path, domain, config)
async def test_setup_manual_entity_from_yaml(hass, caplog, tmp_path):
"""Test setup manual configured MQTT entity."""
platform = fan.DOMAIN
config = copy.deepcopy(DEFAULT_CONFIG[platform])
config["name"] = "test"
del config["platform"]
await help_test_setup_manual_entity_from_yaml(
hass, caplog, tmp_path, platform, config
)
assert hass.states.get(f"{platform}.test") is not None
|
py | 1a533db8602b4f59c1d87aa2df75195ec7b29ce6 | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException, CMakeTarget, TargetOptions, CMakeConfiguration, language_map, check_cmake_args
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel, ReplyCMakeInputs, ReplyCodeModel
from .fileapi import CMakeFileAPI
from .executor import CMakeExecutor
from .toolchain import CMakeToolchain, CMakeExecScope
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog, mesonlib
from ..mesonlib import MachineChoice, OrderedSet, version_compare, path_is_in_root, relative_to_if_possible, OptionKey
from ..mesondata import mesondata
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from enum import Enum
from functools import lru_cache
from pathlib import Path
import typing as T
import re
from os import environ
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if T.TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
from ..environment import Environment
TYPE_mixed = T.Union[str, int, bool, Path, BaseNode]
TYPE_mixed_list = T.Union[TYPE_mixed, T.Sequence[TYPE_mixed]]
TYPE_mixed_kwargs = T.Dict[str, TYPE_mixed_list]
# Disable all warnings automaticall enabled with --trace and friends
# See https://cmake.org/cmake/help/latest/variable/CMAKE_POLICY_WARNING_CMPNNNN.html
disable_policy_warnings = [
'CMP0025',
'CMP0047',
'CMP0056',
'CMP0060',
'CMP0065',
'CMP0066',
'CMP0067',
'CMP0082',
'CMP0089',
'CMP0102',
]
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'-Wall', '-Wextra', '-Weverything', '-Werror', '-Wpedantic', '-pedantic', '-w',
'/W1', '/W2', '/W3', '/W4', '/Wall', '/WX', '/w',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
transfer_dependencies_from = ['header_only']
_cmake_name_regex = re.compile(r'[^_a-zA-Z0-9]')
def _sanitize_cmake_name(name: str) -> str:
name = _cmake_name_regex.sub('_', name)
return 'cm_' + name
class OutputTargetMap:
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, build_dir: Path):
self.tgt_map = {} # type: T.Dict[str, T.Union['ConverterTarget', 'ConverterCustomTarget']]
self.build_dir = build_dir
def add(self, tgt: T.Union['ConverterTarget', 'ConverterCustomTarget']) -> None:
def assign_keys(keys: T.List[str]) -> None:
for i in [x for x in keys if x]:
self.tgt_map[i] = tgt
keys = [self._target_key(tgt.cmake_name)]
if isinstance(tgt, ConverterTarget):
keys += [tgt.full_name]
keys += [self._rel_artifact_key(x) for x in tgt.artifacts]
keys += [self._base_artifact_key(x) for x in tgt.artifacts]
if isinstance(tgt, ConverterCustomTarget):
keys += [self._rel_generated_file_key(x) for x in tgt.original_outputs]
keys += [self._base_generated_file_key(x) for x in tgt.original_outputs]
assign_keys(keys)
def _return_first_valid_key(self, keys: T.List[str]) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
for i in keys:
if i and i in self.tgt_map:
return self.tgt_map[i]
return None
def target(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._target_key(name)])
def executable(self, name: str) -> T.Optional['ConverterTarget']:
tgt = self.target(name)
if tgt is None or not isinstance(tgt, ConverterTarget):
return None
if tgt.meson_func() != 'executable':
return None
return tgt
def artifact(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
keys = []
candidates = [name, OutputTargetMap.rm_so_version.sub('', name)]
for i in lib_suffixes:
if not name.endswith('.' + i):
continue
new_name = name[:-len(i) - 1]
new_name = OutputTargetMap.rm_so_version.sub('', new_name)
candidates += ['{}.{}'.format(new_name, i)]
for i in candidates:
keys += [self._rel_artifact_key(Path(i)), Path(i).name, self._base_artifact_key(Path(i))]
return self._return_first_valid_key(keys)
def generated(self, name: Path) -> T.Optional['ConverterCustomTarget']:
res = self._return_first_valid_key([self._rel_generated_file_key(name), self._base_generated_file_key(name)])
assert res is None or isinstance(res, ConverterCustomTarget)
return res
# Utility functions to generate local keys
def _rel_path(self, fname: Path) -> T.Optional[Path]:
try:
return fname.resolve().relative_to(self.build_dir)
except ValueError:
pass
return None
def _target_key(self, tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _rel_generated_file_key(self, fname: Path) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relgen_{}__'.format(path.as_posix()) if path else None
def _base_generated_file_key(self, fname: Path) -> str:
return '__gen_{}__'.format(fname.name)
def _rel_artifact_key(self, fname: Path) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relart_{}__'.format(path.as_posix()) if path else None
def _base_artifact_key(self, fname: Path) -> str:
return '__art_{}__'.format(fname.name)
class ConverterTarget:
def __init__(self, target: CMakeTarget, env: 'Environment', for_machine: MachineChoice) -> None:
self.env = env
self.for_machine = for_machine
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.cmake_name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = None # type: T.Optional[Path]
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
self.depends_raw = [] # type: T.List[str]
self.depends = [] # type: T.List[T.Union[ConverterTarget, ConverterCustomTarget]]
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = set() # type: T.Set[str]
self.sources = [] # type: T.List[Path]
self.generated = [] # type: T.List[Path]
self.generated_ctgt = [] # type: T.List[CustomTargetReference]
self.includes = [] # type: T.List[Path]
self.sys_includes = [] # type: T.List[Path]
self.link_with = [] # type: T.List[T.Union[ConverterTarget, ConverterCustomTarget]]
self.object_libs = [] # type: T.List[ConverterTarget]
self.compile_opts = {} # type: T.Dict[str, T.List[str]]
self.public_compile_opts = [] # type: T.List[str]
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = [] # type: T.List[str]
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
self.generated_raw = [] # type: T.List[Path]
for i in target.files:
languages = set() # type: T.Set[str]
src_suffixes = set() # type: T.Set[str]
# Insert suffixes
for j in i.sources:
if not j.suffix:
continue
src_suffixes.add(j.suffix[1:])
# Determine the meson language(s)
# Extract the default language from the explicit CMake field
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
languages.add(lang_cmake_to_meson.get(i.language.lower(), 'c'))
# Determine missing languages from the source suffixes
for sfx in src_suffixes:
for key, val in lang_suffixes.items():
if sfx in val:
languages.add(key)
break
# Register the new languages and initialize the compile opts array
for lang in languages:
self.languages.add(lang)
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
for lang in languages:
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x.path for x in i.includes if x.path not in self.includes and not x.isSystem]
self.sys_includes += [x.path for x in i.includes if x.path not in self.sys_includes and x.isSystem]
# Add sources to the right array
if i.is_generated:
self.generated_raw += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: Path, subdir: Path, install_prefix: Path, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard and do additional compiler args manipulation
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
ctgt = output_target_map.generated(Path(j))
if m:
std = m.group(2)
supported = self._all_lang_stds(i)
if std not in supported:
mlog.warning(
'Unknown {0}_std "{1}" -> Ignoring. Try setting the project-'
'level {0}_std if build errors occur. Known '
'{0}_stds are: {2}'.format(i, std, ' '.join(supported)),
once=True
)
continue
self.override_options += ['{}_std={}'.format(i, std)]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif isinstance(ctgt, ConverterCustomTarget):
# Sometimes projects pass generated source files as compiler
# flags. Add these as generated sources to ensure that the
# corresponding custom target is run.2
self.generated_raw += [Path(j)]
temp += [j]
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
tgt = trace.targets.get(self.cmake_name)
if tgt:
self.depends_raw = trace.targets[self.cmake_name].depends
# TODO refactor this copy paste from CMakeDependency for future releases
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
to_process = [self.cmake_name]
processed = []
while len(to_process) > 0:
curr = to_process.pop(0)
if curr in processed or curr not in trace.targets:
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
otherDeps = []
libraries = []
mlog.debug(str(tgt))
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
self.includes += [Path(x) for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_LINK_OPTIONS' in tgt.properties:
self.link_flags += [x for x in tgt.properties['INTERFACE_LINK_OPTIONS'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
self.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
self.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if 'CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['CONFIGURATIONS'] if x]
cfg = cfgs[0]
is_debug = self.env.coredata.get_option(OptionKey('debug'));
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if 'IMPORTED_IMPLIB_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB_{}'.format(cfg)] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif 'IMPORTED_LOCATION_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION_{}'.format(cfg)] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if 'IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg) in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg)] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
for j in otherDeps:
if j in trace.targets:
to_process += [j]
elif reg_is_lib.match(j) or Path(j).exists():
libraries += [j]
for j in libraries:
if j not in self.link_libraries:
self.link_libraries += [j]
processed += [curr]
elif self.type.upper() not in ['EXECUTABLE', 'OBJECT_LIBRARY']:
mlog.warning('CMake: Target', mlog.bold(self.cmake_name), 'not found in CMake trace. This can lead to build errors')
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not Path(i).is_absolute():
link_with = output_target_map.artifact(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.name.endswith(y) for y in supported])]
self.generated_raw = [x for x in self.generated_raw if any([x.name.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: Path, is_header: bool, is_generated: bool) -> T.Optional[Path]:
if not x.is_absolute():
x = self.src_dir / x
x = x.resolve()
assert x.is_absolute()
if not x.exists() and not any([x.name.endswith(y) for y in obj_suffixes]) and not is_generated:
if path_is_in_root(x, Path(self.env.get_build_dir()), resolve=True):
x.mkdir(parents=True, exist_ok=True)
return x.relative_to(Path(self.env.get_build_dir()) / subdir)
else:
mlog.warning('CMake: path', mlog.bold(x.as_posix()), 'does not exist.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if x in trace.explicit_headers:
return None
if (
path_is_in_root(x, Path(self.env.get_source_dir()))
and not (
path_is_in_root(x, root_src_dir) or
path_is_in_root(x, Path(self.env.get_build_dir()))
)
):
mlog.warning('CMake: path', mlog.bold(x.as_posix()), 'is inside the root project but', mlog.bold('not'), 'inside the subproject.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if path_is_in_root(x, Path(self.env.get_build_dir())) and is_header:
return x.relative_to(Path(self.env.get_build_dir()) / subdir)
if path_is_in_root(x, root_src_dir):
return x.relative_to(root_src_dir)
return x
build_dir_rel = self.build_dir.relative_to(Path(self.env.get_build_dir()) / subdir)
self.generated_raw = [rel_path(x, False, True) for x in self.generated_raw]
self.includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.includes)] + [build_dir_rel]))
self.sys_includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.sys_includes)]))
self.sources = [rel_path(x, False, False) for x in self.sources]
# Resolve custom targets
for gen_file in self.generated_raw:
ctgt = output_target_map.generated(gen_file)
if ctgt:
assert isinstance(ctgt, ConverterCustomTarget)
ref = ctgt.get_ref(gen_file)
assert isinstance(ref, CustomTargetReference) and ref.valid()
self.generated_ctgt += [ref]
elif gen_file is not None:
self.generated += [gen_file]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sys_includes = [x for x in self.sys_includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
# Make sure '.' is always in the include directories
if Path('.') not in self.includes:
self.includes += [Path('.')]
# make install dir relative to the install prefix
if self.install_dir and self.install_dir.is_absolute():
if path_is_in_root(self.install_dir, install_prefix):
self.install_dir = self.install_dir.relative_to(install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
# Handle OSX frameworks
def handle_frameworks(flags: T.List[str]) -> T.List[str]:
res: T.List[str] = []
for i in flags:
p = Path(i)
if not p.exists() or not p.name.endswith('.framework'):
res += [i]
continue
res += ['-framework', p.stem]
return res
self.link_libraries = handle_frameworks(self.link_libraries)
self.link_flags = handle_frameworks(self.link_flags)
# Handle explicit CMake add_dependency() calls
for i in self.depends_raw:
dep_tgt = output_target_map.target(i)
if dep_tgt:
self.depends.append(dep_tgt)
def process_object_libs(self, obj_target_list: T.List['ConverterTarget'], linker_workaround: bool) -> None:
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if any([x.name.endswith('.' + y) for y in obj_suffixes])]
stem = [x.stem for x in temp]
exts = self._all_source_suffixes()
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [x.name for x in i.sources + i.generated]
for j in stem:
# On some platforms (specifically looking at you Windows with vs20xy backend) CMake does
# not produce object files with the format `foo.cpp.obj`, instead it skipps the language
# suffix and just produces object files like `foo.obj`. Thus we have to do our best to
# undo this step and guess the correct language suffix of the object file. This is done
# by trying all language suffixes meson knows and checking if one of them fits.
candidates = [j] # type: T.List[str]
if not any([j.endswith('.' + x) for x in exts]):
mlog.warning('Object files do not contain source file extensions, thus falling back to guessing them.', once=True)
candidates += ['{}.{}'.format(j, x) for x in exts]
if any([x in source_files for x in candidates]):
if linker_workaround:
self._append_objlib_sources(i)
else:
self.includes += i.includes
self.includes = list(OrderedSet(self.includes))
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not any([x.name.endswith('.' + y) for y in obj_suffixes])]
def _append_objlib_sources(self, tgt: 'ConverterTarget') -> None:
self.includes += tgt.includes
self.sources += tgt.sources
self.generated += tgt.generated
self.generated_ctgt += tgt.generated_ctgt
self.includes = list(OrderedSet(self.includes))
self.sources = list(OrderedSet(self.sources))
self.generated = list(OrderedSet(self.generated))
self.generated_ctgt = list(OrderedSet(self.generated_ctgt))
# Inherit compiler arguments since they may be required for building
for lang, opts in tgt.compile_opts.items():
if lang not in self.compile_opts:
self.compile_opts[lang] = []
self.compile_opts[lang] += [x for x in opts if x not in self.compile_opts[lang]]
@lru_cache(maxsize=None)
def _all_source_suffixes(self) -> T.List[str]:
suffixes = [] # type: T.List[str]
for exts in lang_suffixes.values():
suffixes += [x for x in exts]
return suffixes
@lru_cache(maxsize=None)
def _all_lang_stds(self, lang: str) -> T.List[str]:
try:
res = self.env.coredata.options[OptionKey('std', machine=MachineChoice.BUILD, lang=lang)].choices
except KeyError:
return []
# TODO: Get rid of this once we have propper typing for options
assert isinstance(res, list)
for i in res:
assert isinstance(i, str)
return res
def process_inter_target_dependencies(self) -> None:
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def cleanup_dependencies(self) -> None:
# Clear the dependencies from targets that where moved from
if self.meson_func() in transfer_dependencies_from:
self.depends = []
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir.as_posix() if self.install_dir else ''))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sys_includes: ', mlog.bold(str(self.sys_includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- generated_ctgt: ', mlog.bold(str(self.generated_ctgt)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int) -> None:
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
out_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget, env: 'Environment', for_machine: MachineChoice) -> None:
assert target.current_bin_dir is not None
assert target.current_src_dir is not None
self.name = target.name
if not self.name:
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
ConverterCustomTarget.tgt_counter += 1
self.cmake_name = str(self.name)
self.original_outputs = list(target.outputs)
self.outputs = [x.name for x in self.original_outputs]
self.conflict_map = {} # type: T.Dict[str, str]
self.command = [] # type: T.List[T.List[T.Union[str, ConverterTarget]]]
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = [] # type: T.List[T.Union[str, CustomTargetReference]]
self.depends = [] # type: T.List[T.Union[ConverterTarget, ConverterCustomTarget]]
self.current_bin_dir = target.current_bin_dir # type: Path
self.current_src_dir = target.current_src_dir # type: Path
self.env = env
self.for_machine = for_machine
self._raw_target = target
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
def __repr__(self) -> str:
return '<{}: {} {}>'.format(self.__class__.__name__, self.name, self.outputs)
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: Path, all_outputs: T.List[str], trace: CMakeTraceParser) -> None:
# Default the working directory to ${CMAKE_CURRENT_BINARY_DIR}
if self.working_dir is None:
self.working_dir = self.current_bin_dir
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR}
if not self.working_dir.is_absolute():
self.working_dir = self.current_bin_dir / self.working_dir
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
def ensure_absolute(x: Path) -> Path:
if x.is_absolute():
return x
else:
return self.current_bin_dir / x
self.original_outputs = [ensure_absolute(x) for x in self.original_outputs]
# Ensure that there is no duplicate output in the project so
# that meson can handle cases where the same filename is
# generated in multiple directories
temp_outputs = [] # type: T.List[str]
for i in self.outputs:
if i in all_outputs:
old = str(i)
i = 'c{}_{}'.format(ConverterCustomTarget.out_counter, i)
ConverterCustomTarget.out_counter += 1
self.conflict_map[old] = i
all_outputs += [i]
temp_outputs += [i]
self.outputs = temp_outputs
# Check if the command is a build target
commands = [] # type: T.List[T.List[T.Union[str, ConverterTarget]]]
for curr_cmd in self._raw_target.command:
assert(isinstance(curr_cmd, list))
cmd = [] # type: T.List[T.Union[str, ConverterTarget]]
for j in curr_cmd:
if not j:
continue
target = output_target_map.executable(j)
if target:
# When cross compiling, binaries have to be executed with an exe_wrapper (for instance wine for mingw-w64)
if self.env.exe_wrapper is not None and self.env.properties[self.for_machine].get_cmake_use_exe_wrapper():
from ..dependencies import ExternalProgram
assert isinstance(self.env.exe_wrapper, ExternalProgram)
cmd += self.env.exe_wrapper.get_command()
cmd += [target]
continue
elif j in trace.targets:
trace_tgt = trace.targets[j]
if trace_tgt.type == 'EXECUTABLE' and 'IMPORTED_LOCATION' in trace_tgt.properties:
cmd += trace_tgt.properties['IMPORTED_LOCATION']
continue
mlog.debug('CMake: Found invalid CMake target "{}" --> ignoring \n{}'.format(j, trace_tgt))
# Fallthrough on error
cmd += [j]
commands += [cmd]
self.command = commands
# If the custom target does not declare any output, create a dummy
# one that can be used as dependency.
if not self.outputs:
self.outputs = [self.name + '.h']
# Check dependencies and input files
for i in self.depends_raw:
if not i:
continue
raw = Path(i)
art = output_target_map.artifact(i)
tgt = output_target_map.target(i)
gen = output_target_map.generated(raw)
rel_to_root = None
try:
rel_to_root = raw.relative_to(root_src_dir)
except ValueError:
rel_to_root = None
# First check for existing files. Only then check for existing
# targets, etc. This reduces the chance of misdetecting input files
# as outputs from other targets.
# See https://github.com/mesonbuild/meson/issues/6632
if not raw.is_absolute() and (self.current_src_dir / raw).exists():
self.inputs += [(self.current_src_dir / raw).relative_to(root_src_dir).as_posix()]
elif raw.is_absolute() and raw.exists() and rel_to_root is not None:
self.inputs += [rel_to_root.as_posix()]
elif art:
self.depends += [art]
elif tgt:
self.depends += [tgt]
elif gen:
ctgt_ref = gen.get_ref(raw)
assert ctgt_ref is not None
self.inputs += [ctgt_ref]
def process_inter_target_dependencies(self) -> None:
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def get_ref(self, fname: Path) -> T.Optional[CustomTargetReference]:
name = fname.name
try:
if name in self.conflict_map:
name = self.conflict_map[name]
idx = self.outputs.index(name)
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- conflict_map: ', mlog.bold(str(self.conflict_map)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeAPI(Enum):
SERVER = 1
FILE = 2
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: Path, src_dir: Path, install_prefix: Path, env: 'Environment', backend: 'Backend'):
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = subdir / '__CMake_build'
self.build_dir = Path(env.get_build_dir()) / self.build_dir_rel
self.install_prefix = install_prefix
self.env = env
self.for_machine = MachineChoice.HOST # TODO make parameter
self.backend_name = backend.name
self.linkers = set() # type: T.Set[str]
self.cmake_api = CMakeAPI.SERVER
self.client = CMakeClient(self.env)
self.fileapi = CMakeFileAPI(self.build_dir)
# Raw CMake results
self.bs_files = [] # type: T.List[Path]
self.codemodel_configs = None # type: T.Optional[T.List[CMakeConfiguration]]
self.raw_trace = None # type: T.Optional[str]
# Analysed data
self.project_name = ''
self.languages = [] # type: T.List[str]
self.targets = [] # type: T.List[ConverterTarget]
self.custom_targets = [] # type: T.List[ConverterCustomTarget]
self.trace = CMakeTraceParser('', Path('.')) # Will be replaced in analyse
self.output_target_map = OutputTargetMap(self.build_dir)
# Generated meson data
self.generated_targets = {} # type: T.Dict[str, T.Dict[str, T.Optional[str]]]
self.internal_name_map = {} # type: T.Dict[str, str]
# Do some special handling for object libraries for certain configurations
self._object_lib_workaround = False
if self.backend_name.startswith('vs'):
for comp in self.env.coredata.compilers[self.for_machine].values():
if comp.get_linker_id() == 'link':
self._object_lib_workaround = True
break
def configure(self, extra_cmake_options: T.List[str]) -> CMakeExecutor:
# Find CMake
# TODO: Using MachineChoice.BUILD should always be correct here, but also evaluate the use of self.for_machine
cmake_exe = CMakeExecutor(self.env, '>=3.7', MachineChoice.BUILD)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
self.trace = CMakeTraceParser(cmake_exe.version(), self.build_dir, permissive=True)
preload_file = mesondata['cmake/data/preload.cmake'].write_to_private(self.env)
toolchain = CMakeToolchain(self.env, self.for_machine, CMakeExecScope.SUBPROJECT, self.build_dir.parent, preload_file)
toolchain_file = toolchain.write()
# TODO: drop this check once the deprecated `cmake_args` kwarg is removed
extra_cmake_options = check_cmake_args(extra_cmake_options)
generator = backend_generator_map[self.backend_name]
cmake_args = []
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += extra_cmake_options
trace_args = self.trace.trace_args()
cmcmp_args = ['-DCMAKE_POLICY_WARNING_{}=OFF'.format(x) for x in disable_policy_warnings]
if version_compare(cmake_exe.version(), '>=3.14'):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running CMake with:'), ' '.join(cmake_args))
mlog.log(mlog.bold(' - build directory: '), self.build_dir.as_posix())
mlog.log(mlog.bold(' - source directory: '), self.src_dir.as_posix())
mlog.log(mlog.bold(' - toolchain file: '), toolchain_file.as_posix())
mlog.log(mlog.bold(' - preload file: '), preload_file.as_posix())
mlog.log(mlog.bold(' - trace args: '), ' '.join(trace_args))
mlog.log(mlog.bold(' - disabled policy warnings:'), '[{}]'.format(', '.join(disable_policy_warnings)))
mlog.log()
self.build_dir.mkdir(parents=True, exist_ok=True)
os_env = environ.copy()
os_env['LC_ALL'] = 'C'
final_args = cmake_args + trace_args + cmcmp_args + toolchain.get_cmake_args() + [self.src_dir.as_posix()]
cmake_exe.set_exec_mode(print_cmout=True, always_capture_stderr=self.trace.requires_stderr())
rc, _, self.raw_trace = cmake_exe.call(final_args, self.build_dir, env=os_env, disable_cache=True)
mlog.log()
h = mlog.green('SUCCEEDED') if rc == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if rc != 0:
raise CMakeException('Failed to configure the CMake subproject')
return cmake_exe
def initialise(self, extra_cmake_options: T.List[str]) -> None:
# Run configure the old way because doing it
# with the server doesn't work for some reason
# Additionally, the File API requires a configure anyway
cmake_exe = self.configure(extra_cmake_options)
# Continue with the file API If supported
if self.cmake_api is CMakeAPI.FILE:
# Parse the result
self.fileapi.load_reply()
# Load the buildsystem file list
cmake_files = self.fileapi.get_cmake_sources()
self.bs_files = [x.file for x in cmake_files if not x.is_cmake and not x.is_temp]
self.bs_files = [relative_to_if_possible(x, Path(self.env.get_source_dir())) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
# Load the codemodel configurations
self.codemodel_configs = self.fileapi.get_cmake_configurations()
return
with self.client.connect(cmake_exe):
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
assert isinstance(bs_reply, ReplyCMakeInputs)
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
assert isinstance(cm_reply, ReplyCodeModel)
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [relative_to_if_possible(src_dir / x, Path(self.env.get_source_dir()), resolve=True) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
self.codemodel_configs = cm_reply.configs
def analyse(self) -> None:
if self.codemodel_configs is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
added_target_names = [] # type: T.List[str]
for i_0 in self.codemodel_configs:
for j_0 in i_0.projects:
if not self.project_name:
self.project_name = j_0.name
for k_0 in j_0.targets:
# Avoid duplicate targets from different configurations and known
# dummy CMake internal target types
if k_0.type not in skip_targets and k_0.name not in added_target_names:
added_target_names += [k_0.name]
self.targets += [ConverterTarget(k_0, self.env, self.for_machine)]
# Add interface targets from trace, if not already present.
# This step is required because interface targets were removed from
# the CMake file API output.
api_target_name_list = [x.name for x in self.targets]
for i_1 in self.trace.targets.values():
if i_1.type != 'INTERFACE' or i_1.name in api_target_name_list or i_1.imported:
continue
dummy = CMakeTarget({
'name': i_1.name,
'type': 'INTERFACE_LIBRARY',
'sourceDirectory': self.src_dir,
'buildDirectory': self.build_dir,
})
self.targets += [ConverterTarget(dummy, self.env, self.for_machine)]
for i_2 in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i_2, self.env, self.for_machine)]
# generate the output_target_map
for i_3 in [*self.targets, *self.custom_targets]:
assert isinstance(i_3, (ConverterTarget, ConverterCustomTarget))
self.output_target_map.add(i_3)
# First pass: Basic target cleanup
object_libs = []
custom_target_outputs = [] # type: T.List[str]
for ctgt in self.custom_targets:
ctgt.postprocess(self.output_target_map, self.src_dir, custom_target_outputs, self.trace)
for tgt in self.targets:
tgt.postprocess(self.output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if tgt.type == 'OBJECT_LIBRARY':
object_libs += [tgt]
self.languages += [x for x in tgt.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for tgt in self.targets:
tgt.process_object_libs(object_libs, self._object_lib_workaround)
# Third pass: Reassign dependencies to avoid some loops
for tgt in self.targets:
tgt.process_inter_target_dependencies()
for ctgt in self.custom_targets:
ctgt.process_inter_target_dependencies()
# Fourth pass: Remove rassigned dependencies
for tgt in self.targets:
tgt.cleanup_dependencies()
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self, options: TargetOptions) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val: TYPE_mixed = '') -> Token:
return Token(tid, self.subdir.as_posix(), 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value: TYPE_mixed_list) -> BaseNode:
if isinstance(value, str):
return string(value)
if isinstance(value, Path):
return string(value.as_posix())
elif isinstance(value, bool):
return BooleanNode(token(val=value))
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
elif isinstance(value, BaseNode):
return value
raise RuntimeError('invalid type of value: {} ({})'.format(type(value).__name__, str(value)))
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements: TYPE_mixed_list) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args: T.Optional[TYPE_mixed_list] = None, kwargs: T.Optional[TYPE_mixed_kwargs] = None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
assert isinstance(args, (str, int, bool, Path, BaseNode))
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
func_n = FunctionNode(self.subdir.as_posix(), 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args: T.Optional[TYPE_mixed_list] = None, kwargs: T.Optional[TYPE_mixed_kwargs] = None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
assert isinstance(args, (str, int, bool, Path, BaseNode))
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
return MethodNode(self.subdir.as_posix(), 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir.as_posix(), 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
# Add the targets
processing = [] # type: T.List[str]
processed = {} # type: T.Dict[str, T.Dict[str, T.Optional[str]]]
name_map = {} # type: T.Dict[str, str]
def extract_tgt(tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert(tgt_name is not None and tgt_name in processed)
res_var = processed[tgt_name]['tgt']
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException('Cycle in CMake inputs/dependencies detected')
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> T.Union[IdNode, IndexNode]:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget) -> None:
detect_cycle(tgt)
# First handle inter target dependencies
link_with = [] # type: T.List[IdNode]
objec_libs = [] # type: T.List[IdNode]
sources = [] # type: T.List[Path]
generated = [] # type: T.List[T.Union[IdNode, IndexNode]]
generated_filenames = [] # type: T.List[str]
custom_targets = [] # type: T.List[ConverterCustomTarget]
dependencies = [] # type: T.List[IdNode]
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
sources += tgt.sources
sources += tgt.generated
for ctgt_ref in tgt.generated_ctgt:
ctgt = ctgt_ref.ctgt
if ctgt.name not in processed:
process_custom_target(ctgt)
generated += [resolve_ctgt_ref(ctgt_ref)]
generated_filenames += [ctgt_ref.filename()]
if ctgt not in custom_targets:
custom_targets += [ctgt]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for ctgt in custom_targets:
for j in ctgt.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(ctgt.get_ref(Path(j)))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = '{}_inc'.format(tgt.name)
dir_var = '{}_dir'.format(tgt.name)
sys_var = '{}_sys'.format(tgt.name)
src_var = '{}_src'.format(tgt.name)
dep_var = '{}_dep'.format(tgt.name)
tgt_var = tgt.name
install_tgt = options.get_install(tgt.cmake_name, tgt.install)
# Generate target kwargs
tgt_kwargs = {
'build_by_default': install_tgt,
'link_args': options.get_link_args(tgt.cmake_name, tgt.link_flags + tgt.link_libraries),
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': install_tgt,
'override_options': options.get_override_options(tgt.cmake_name, tgt.override_options),
'objects': [method(x, 'extract_all_objects') for x in objec_libs],
} # type: TYPE_mixed_kwargs
# Only set if installed and only override if it is set
if install_tgt and tgt.install_dir:
tgt_kwargs['install_dir'] = tgt.install_dir
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = options.get_compile_args(tgt.cmake_name, key, val)
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
} # type: TYPE_mixed_kwargs
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function('include_directories', tgt.includes))
sys_node = assign(sys_var, function('include_directories', tgt.sys_includes, {'is_system': True}))
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [tgt_var, id_node(src_var), *generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
elif tgt_func in ['shared_module']:
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Union[str, ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> T.Union[str, IdNode, IndexNode]:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = [] # type: T.List[T.Union[str, IdNode, IndexNode]]
command += mesonlib.meson_command
command += ['--internal', 'cmake_run_ctgt']
command += ['-o', '@OUTPUT@']
if tgt.original_outputs:
command += ['-O'] + [x.as_posix() for x in tgt.original_outputs]
command += ['-d', tgt.working_dir.as_posix()]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
} # type: TYPE_mixed_kwargs
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
name_map[tgt.cmake_name] = tgt.name
# Now generate the target function calls
for ctgt in self.custom_targets:
if ctgt.name not in processed:
process_custom_target(ctgt)
for tgt in self.targets:
if tgt.name not in processed:
process_target(tgt)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
def target_info(self, target: str) -> T.Optional[T.Dict[str, str]]:
# Try resolving the target name
# start by checking if there is a 100% match (excluding the name prefix)
prx_tgt = _sanitize_cmake_name(target)
if prx_tgt in self.generated_targets:
return self.generated_targets[prx_tgt]
# check if there exists a name mapping
if target in self.internal_name_map:
target = self.internal_name_map[target]
assert(target in self.generated_targets)
return self.generated_targets[target]
return None
def target_list(self) -> T.List[str]:
return list(self.internal_name_map.keys())
|
py | 1a533ddd9b349e1520545badc6625f9ddad01241 | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from tests.utils import assert_equal_dict
from polyaxon.schemas.polyflow.init import InitConfig
@pytest.mark.init_mark
class TestInitConfigs(TestCase):
def test_init_config(self):
config_dict = {}
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add artifact_refs
config_dict["artifacts"] = [
{"name": "data2"},
{"name": "data3", "paths": ["/subpath1", "subpath2"]},
{"name": "artifact2", "paths": ["/subpath1", "subpath2"]},
]
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add repos
config_dict["repos"] = [
{"name": "repo1"},
{"name": "repo1", "commit": "commit-hash"},
{"name": "repo2", "branch": "dev"},
]
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add build context
config_dict = {
"build": {
"image": "tensorflow:1.3.0",
"run": ["pip install tensor2tensor"],
"env": [["LC_ALL", "en_US.UTF-8"]],
"shell": "foo",
"name": "foo.yaml",
"workdir": "/test",
}
}
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
|
py | 1a533e9fd26e55a38db4e9240d99f147ca544819 | from rqt_gui_py.plugin import Plugin
import python_qt_binding.QtGui as QtGui
from python_qt_binding.QtGui import (QAction, QIcon, QMenu, QWidget,
QPainter, QColor, QFont, QBrush,
QPen, QMessageBox, QSizePolicy,
QImage, QPixmap, qRgb, QComboBox,
QDialog, QPushButton)
from python_qt_binding.QtCore import (Qt, QTimer, qWarning, Slot,
QEvent, QSize, pyqtSignal,
pyqtSlot)
from threading import Lock, Thread
import rospy
import python_qt_binding.QtCore as QtCore
from std_msgs.msg import Bool, Time
import time
import math
from resource_retriever import get_filename
import yaml
import os, sys
import numpy as np
import cv2, cv
from cv_bridge import CvBridge, CvBridgeError
from image_view2.msg import MouseEvent
from sensor_msgs.msg import Image
class ComboBoxDialog(QDialog):
def __init__(self, parent=None):
super(ComboBoxDialog, self).__init__()
self.number = 0
vbox = QtGui.QVBoxLayout(self)
self.combo_box = QComboBox(self)
self.combo_box.activated.connect(self.onActivated)
vbox.addWidget(self.combo_box)
button = QPushButton()
button.setText("Done")
button.clicked.connect(self.buttonCallback)
vbox.addWidget(button)
self.setLayout(vbox)
def buttonCallback(self, event):
self.close()
def onActivated(self, number):
self.number = number
class ImageView2Plugin(Plugin):
"""
rqt wrapper for image_view2
"""
def __init__(self, context):
super(ImageView2Plugin, self).__init__(context)
self.setObjectName("ImageView2Plugin")
self._widget = ImageView2Widget()
context.add_widget(self._widget)
def save_settings(self, plugin_settings, instance_settings):
self._widget.save_settings(plugin_settings, instance_settings)
def restore_settings(self, plugin_settings, instance_settings):
self._widget.restore_settings(plugin_settings, instance_settings)
def trigger_configuration(self):
self._widget.trigger_configuration()
class ScaledLabel(QtGui.QLabel):
def __init__(self, *args, **kwargs):
QtGui.QLabel.__init__(self)
self._pixmap = QtGui.QPixmap(self.pixmap())
def resizeEvent(self, event):
self.setPixmap(self._pixmap.scaled(
self.width(), self.height(),
QtCore.Qt.KeepAspectRatio))
class ImageView2Widget(QWidget):
"""
Qt widget to communicate with image_view2
"""
cv_image = None
pixmap = None
repaint_trigger = pyqtSignal()
def __init__(self):
super(ImageView2Widget, self).__init__()
self.left_button_clicked = False
self.repaint_trigger.connect(self.redraw)
self.lock = Lock()
self.need_to_rewrite = False
self.bridge = CvBridge()
self.image_sub = None
self.event_pub = None
self.label = ScaledLabel()
self.label.setAlignment(Qt.AlignCenter)
self.label.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored))
#self.label.installEventFilter(self)
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(self.label)
self.setLayout(vbox)
self._image_topics = []
self._update_topic_thread = Thread(target=self.updateTopics)
self._update_topic_thread.start()
self._active_topic = None
self.setMouseTracking(True)
self.label.setMouseTracking(True)
self._dialog = ComboBoxDialog()
self.show()
def trigger_configuration(self):
self._dialog.exec_()
self.setupSubscriber(self._image_topics[self._dialog.number])
def setupSubscriber(self, topic):
if self.image_sub:
self.image_sub.unregister()
rospy.loginfo("Subscribing %s" % (topic + "/marked"))
self.image_sub = rospy.Subscriber(topic + "/marked",
Image,
self.imageCallback)
self.event_pub = rospy.Publisher(topic + "/event", MouseEvent)
self._active_topic = topic
def onActivated(self, number):
self.setupSubscriber(self._image_topics[number])
def imageCallback(self, msg):
with self.lock:
if msg.width == 0 or msg.height == 0:
rospy.logdebug("Looks input images is invalid")
return
cv_image = self.bridge.imgmsg_to_cv2(msg, msg.encoding)
if msg.encoding == "bgr8":
self.cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
elif msg.encoding == "rgb8":
self.cv_image = cv_image
self.numpy_image = np.asarray(self.cv_image)
self.need_to_rewrite = True
self.repaint_trigger.emit()
def updateTopics(self):
need_to_update = False
for (topic, topic_type) in rospy.get_published_topics():
if topic_type == "sensor_msgs/Image":
with self.lock:
if not topic in self._image_topics:
self._image_topics.append(topic)
need_to_update = True
if need_to_update:
with self.lock:
self._image_topics = sorted(self._image_topics)
self._dialog.combo_box.clear()
for topic in self._image_topics:
self._dialog.combo_box.addItem(topic)
if self._active_topic:
self._dialog.combo_box.setCurrentIndex(self._image_topics.index(self._active_topic))
time.sleep(1)
@pyqtSlot()
def redraw(self):
with self.lock:
if not self.need_to_rewrite:
return
if self.cv_image != None:
size = self.cv_image.shape
img = QImage(self.cv_image.data,
size[1], size[0], size[2] * size[1],
QImage.Format_RGB888)
# convert to QPixmap
self.pixmap = QPixmap(size[1], size[0])
self.pixmap.convertFromImage(img)
self.label.setPixmap(self.pixmap.scaled(
self.label.width(), self.label.height(),
QtCore.Qt.KeepAspectRatio))
#self.label.setPixmap(self.pixmap)
def mousePosition(self, e):
label_x = self.label.x()
label_y = self.label.y()
label_width = self.label.width()
label_height = self.label.height()
pixmap_width = self.label.pixmap().width()
pixmap_height = self.label.pixmap().height()
x_offset = (label_width - pixmap_width) / 2.0 + label_x
y_offset = (label_height - pixmap_height) / 2.0 + label_y
return (e.x() - x_offset, e.y()- y_offset)
def mouseMoveEvent(self, e):
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
msg.type = MouseEvent.MOUSE_MOVE
msg.x, msg.y = self.mousePosition(e)
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
if self.event_pub:
self.event_pub.publish(msg)
def mousePressEvent(self, e):
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
if e.button() == Qt.LeftButton:
msg.type = MouseEvent.MOUSE_LEFT_DOWN
self.left_button_clicked = True
elif e.button() == Qt.RightButton:
msg.type = MouseEvent.MOUSE_RIGHT_DOWN
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
msg.x, msg.y = self.mousePosition(e)
if self.event_pub:
self.event_pub.publish(msg)
def mouseReleaseEvent(self, e):
if e.button() == Qt.LeftButton:
self.left_button_clicked = False
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
msg.type = MouseEvent.MOUSE_LEFT_UP
msg.x, msg.y = self.mousePosition(e)
if self.event_pub:
self.event_pub.publish(msg)
def save_settings(self, plugin_settings, instance_settings):
if self._active_topic:
instance_settings.set_value("active_topic", self._active_topic)
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.value("active_topic"):
topic = instance_settings.value("active_topic")
self._dialog.combo_box.addItem(topic)
self.setupSubscriber(topic)
|
py | 1a533eec60f5a950848a098db27bcd4a17ed4ed8 | #!/usr/bin/env python3.4
"""
wsgi.py
This is the file that is responsible for running the application and
and other commands like the database migrations.
It shouldn't have any extensive logic.
"""
from townsquare import TownSquare
from flask_script import Manager
#from townsquare.api import apimanager
import townsquare.api
application = TownSquare.create_app()
manager = Manager(application)
townsquare.api.apimanager.init_app(application)
if __name__ == '__main__':
manager.run(default_command='runserver')
|
py | 1a53402022e0d5ce2f7e8942379207308cc3d749 | """
"""
# Created on 2014.10.05
#
# Author: Giovanni Cannata
#
# Copyright 2014 - 2019 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
import re
from ..utils.log import log, log_enabled, NETWORK
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
class CertificateError(ValueError): # fix for Python 2, code from Python 3.5 standard library
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Backported from Python 3.4.3 standard library
Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
if log_enabled(NETWORK):
log(NETWORK, "matching dn %s with hostname %s", dn, hostname)
pats = []
if not dn:
return False
pieces = dn.split(r'.')
leftmost = pieces[0]
remainder = pieces[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Backported from Python 3.4.3 standard library.
Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
py | 1a53406c9debac0b1c163b124047a4a2bf65d287 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_less_equal_normal(test_case, device):
input1 = flow.tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.le(input1, input2)
np_out = np.less_equal(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_symbol(test_case, device):
input1 = flow.tensor(
np.array([1, 1, 4]).astype(np.float32),
dtype=flow.float32,
device=flow.device(device),
)
input2 = flow.tensor(
np.array([1, 2, 3]).astype(np.float32),
dtype=flow.float32,
device=flow.device(device),
)
of_out = input1 <= input2
np_out = np.less_equal(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_int_scalar(test_case, device):
np_arr = np.random.randn(2, 3, 4, 5)
input1 = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
input2 = 1
of_out = input1 <= input2
np_out = np.less_equal(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_int_tensor_int_scalr(test_case, device):
np_arr = np.random.randint(2, size=(2, 3, 4, 5))
input1 = flow.tensor(np_arr, dtype=flow.int, device=flow.device(device))
input2 = 1
of_out = input1 <= input2
np_out = np.less_equal(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_float_scalar(test_case, device):
np_arr = np.random.randn(3, 2, 5, 7)
input1 = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
input2 = 2.3
of_out = input1 <= input2
np_out = np.less_equal(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@flow.unittest.skip_unless_1n1d()
class TestLessEqual(flow.unittest.TestCase):
def test_less_equal(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_less_equal_normal,
_test_less_equal_symbol,
_test_less_equal_int_scalar,
_test_less_equal_int_tensor_int_scalr,
_test_less_equal_float_scalar,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
|
py | 1a5340a43fbbc539e7b2f41de6031477b1b03be6 | import tensorflow as tf
class MultiHeadSelfAttLayer(tf.keras.layers.Layer):
def __init__(self, n_heads, input_size, hidd_size, level):
super(MultiHeadSelfAttLayer, self).__init__()
self.hidd_size = hidd_size
self.n_heads = n_heads
self.w_output = tf.get_variable(name='w_output', shape=(hidd_size * n_heads, input_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
# regularizer=tf.keras.regularizers.l2(l2=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=level),
trainable=True)
self.layernorm0 = tf.keras.layers.LayerNormalization(axis=-1)
self.layernorm1 = tf.keras.layers.LayerNormalization(axis=-1)
self.output_layer = Ffnn(hidd_size * n_heads, input_size * 3, input_size, level)
self.layers = []
for n in range(n_heads):
with tf.variable_scope("self_att_layer_%d_%d" % (n, level)):
# Create sublayers for each layer.
self_attention_layer = SelfAttentionLayer(input_size, hidd_size, n)
self.layers.append(self_attention_layer)
def call(self, x, training):
att_heads_results = []
att_weights_results = []
# multi-head attention
for n, self_attention_layer in enumerate(self.layers):
with tf.variable_scope("self_att_layer_%d" % n):
interaction_weights, layer_out = self_attention_layer(x, training)
att_heads_results.append(layer_out)
att_weights_results.append(interaction_weights)
# concat
embedded_output = tf.stack(att_heads_results, axis=-1)
hidd_doc_repr = tf.reshape(embedded_output, (-1, tf.shape(embedded_output)[1], self.hidd_size * self.n_heads))
# add and norm
hidd_doc_repr = self.layernorm0(hidd_doc_repr + x)
hidd_doc_repr = tf.layers.dropout(hidd_doc_repr, rate=0.5, training=training)
# position-ff
output = self.output_layer(hidd_doc_repr, training)
# add and norm
output = self.layernorm1(output + hidd_doc_repr)
output = tf.layers.dropout(output, rate=0.5, training=training)
return tf.stack(att_weights_results, axis=-1), output
class Ffnn(tf.keras.layers.Layer):
def __init__(self, input_size, w1_hidd_size, w2_hidd_size, seed):
super(Ffnn, self).__init__()
# self.bn = tf.keras.layers.BatchNormalization(axis=-1)
self.w1 = tf.get_variable(name='w1', shape=(input_size, w1_hidd_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
self.b1 = tf.get_variable(name='b1', shape=w1_hidd_size,
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
self.w2 = tf.get_variable(name='w2', shape=(w1_hidd_size, w2_hidd_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
self.b2 = tf.get_variable(name='b2', shape=w2_hidd_size,
# regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
def call(self, x, training):
p1 = tf.nn.leaky_relu(tf.einsum('bse, eo->bso', x, self.w1) + self.b1)
# print('add dropout in ffnn in between the layers')
# p1 = tf.layers.dropout(p1, training=training)
# print('replaced l2 norm in ffnn with bn layer')
p1 = tf.nn.l2_normalize(p1, axis=-1)
# p1 = self.bn(p1)
p2 = tf.einsum('bse, eo->bso', p1, self.w2) + self.b2
return p2
class SelfAttentionLayer(tf.keras.layers.Layer):
def __init__(self, input_data_size, proj_space_size, seed):
super(SelfAttentionLayer, self).__init__()
self.proj_space_size = proj_space_size
self.k = tf.get_variable(name='K', shape=(input_data_size, self.proj_space_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.q = tf.get_variable(name='Q', shape=(input_data_size, self.proj_space_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.v = tf.get_variable(name='V', shape=(input_data_size, self.proj_space_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
def call(self, embdedded_features_vectors, training):
Q = tf.einsum('eo, bse->bso', self.q, embdedded_features_vectors)
K = tf.einsum('eo, bse->bso', self.k, embdedded_features_vectors)
V = tf.einsum('eo, bse->bso', self.v, embdedded_features_vectors)
QK = tf.matmul(Q, K, transpose_b=True)
QK = QK / tf.sqrt(tf.cast(self.proj_space_size, tf.float32))
interaction_weights = tf.reduce_sum(QK, axis=-1)
att_w = tf.nn.softmax(interaction_weights, axis=-1)
output = tf.layers.dropout(tf.einsum('bso,bs->bso', V, att_w), rate=0.5, training=training)
output = tf.nn.l2_normalize(output)
return att_w, output
class FFNetCombo(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, seed, rate=0.5):
super(FFNetCombo, self).__init__()
self.proj_matrix = tf.get_variable(name='W_ffncombo', shape=(input_size, output_size), dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.bias = tf.get_variable(name='b_ffncombo', shape=output_size, dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.dropout = tf.keras.layers.Dropout(rate)
self.bn = tf.keras.layers.BatchNormalization(momentum=0.4, axis=-1)
# self.hidd_l = tfp.layers.DenseFlipout(1, activation=tf.nn.leaky_relu)
def call(self, inputs, **kwargs):
norm_inputs = self.bn(inputs)
output = tf.nn.leaky_relu(tf.einsum('bsf, fo->bso', norm_inputs, self.proj_matrix) + self.bias)
# output = self.hidd_l(norm_inputs)
output = self.dropout(output)
return output
class FCReluBN(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, seed, rate=0.5):
super(FCReluBN, self).__init__()
self.proj_matrix = tf.get_variable(name='W_ffncombo', shape=(input_size, output_size), dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.bias = tf.get_variable(name='b_ffncombo', shape=output_size, dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.dropout = tf.keras.layers.Dropout(rate)
self.bn = tf.keras.layers.BatchNormalization(momentum=0.4, axis=-1)
# self.hidd_l = tfp.layers.DenseFlipout(1, activation=tf.nn.leaky_relu)
def call(self, inputs, **kwargs):
output = tf.nn.leaky_relu(tf.einsum('bsf, fo->bso', inputs, self.proj_matrix) + self.bias)
output = self.bn(output)
return output
|
py | 1a5340e248458e344e6ecf4b3b4f6b37f21db26f | __version_tuple__ = (0, 2, 0)
__version_tuple_js__ = (0, 2, 0)
__version__ = '0.2.0'
__version_js__ = '0.2.0'
version_info = __version_tuple__ # kept for backward compatibility |
py | 1a534160fa43dfaec79ebcdf0bb98c6887f0462a | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-usehd=0']] * self.num_nodes)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid cadex address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 KDX to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-usehd=0']] * self.num_nodes)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 12)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
#############################
# Test address reuse option #
#############################
result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False})
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# frt should not have removed the key from the keypool
assert(changeaddress == nextaddr)
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
# Disable BIP69 sorting of inputs and outputs
self.nodes[3].setbip69enabled(False)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
keys = list(outputs.keys())
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
# Reenable BIP69 sorting of inputs and outputs
self.nodes[3].setbip69enabled(True)
if __name__ == '__main__':
RawTransactionsTest().main()
|
py | 1a53417a793fe53375bd9c4e54195cb2916eedfc | # Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import plugin_utils as pu
from sahara.plugins.cdh.v5 import config_helper as c_helper
from sahara.plugins.cdh.v5 import db_helper
class PluginUtilsV5(pu.AbstractPluginUtils):
def __init__(self):
self.c_helper = c_helper
self.db_helper = db_helper
def configure_spark(self, cluster):
spark = self.get_spark_historyserver(cluster)
with spark.remote() as r:
r.execute_command(
'sudo su - -c "hdfs dfs -mkdir -p '
'/user/spark/applicationHistory" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -mkdir -p '
'/user/spark/share/lib" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -put /usr/lib/spark/assembly/lib/'
'spark-assembly-hadoop* '
'/user/spark/share/lib/spark-assembly.jar" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -chown -R '
'spark:spark /user/spark" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -chmod 0751 /user/spark" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -chmod 1777 /user/spark/'
'applicationHistory" hdfs')
def create_hive_hive_directory(self, cluster):
# Hive requires /tmp/hive-hive directory
namenode = self.get_namenode(cluster)
with namenode.remote() as r:
r.execute_command(
'sudo su - -c "hadoop fs -mkdir -p /tmp/hive-hive" hdfs')
r.execute_command(
'sudo su - -c "hadoop fs -chown hive /tmp/hive-hive" hdfs')
def start_cloudera_manager(self, cluster):
self._start_cloudera_manager(
cluster, c_helper.AWAIT_MANAGER_STARTING_TIMEOUT)
def get_config_value(self, service, name, cluster=None):
configs = c_helper.get_plugin_configs()
return self._get_config_value(service, name, configs, cluster)
|
py | 1a5341a3a979e8aedf5fe74a3c575a8709452d5e | import re
str = "小明年龄18岁,工资10k"
# search匹配到第一个匹配的数据
res = re.search(r"\d+", str).group()
print("search结果", res)
res = re.findall(r"\d+", str)
print("findall结果", res)
# 匹配以“小明”开头的字符串
res = re.match("小明", str).group()
print("match结果", res)
# str字符串不是以工资开头,所以匹配报错
res = re.match(r"\d+", str)
print("Error, 不加group为none,匹配不到", res)
res = re.match("工资", str).group()
print("match结果", res)
|
py | 1a5342ef3aacb91441368099f1e9519a8ecc504b | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import viztracer
import subprocess
import os
import time
import sys
import multiprocessing
from viztracer import VizTracer, ignore_function
from .cmdline_tmpl import CmdlineTmpl
from .base_tmpl import BaseTmpl
class TestIssue1(BaseTmpl):
def test_datetime(self):
tracer = viztracer.VizTracer()
tracer.start()
from datetime import timedelta
timedelta(hours=5)
tracer.stop()
tracer.parse()
tracer.save(output_file="tmp.json")
tracer = viztracer.VizTracer()
tracer.start()
from datetime import timedelta
timedelta(hours=5)
tracer.stop()
tracer.parse()
tracer.save(output_file="tmp.json")
os.remove("tmp.json")
class TestStackOptimization(BaseTmpl):
# There's an order issue in tracefunc to skip the FEE log
# If the stack is empty(stack_top is NULL), and we entered
# into an ignored function, ignore_stack_depth will increment.
# However, when its corresponding exit comes, ignore_stack_depth
# won't be decrement because the function is skipped when
# stack is empty and it's a return function
def test_instant(self):
def s():
return 0
tracer = VizTracer()
tracer.start()
# This is a library function which will be ignored, but
# this could trick the system into a ignoring status
tracer.add_instant("name", {"a": 1})
s()
s()
s()
tracer.stop()
entries = tracer.parse()
tracer.save()
self.assertEqual(entries, 4)
class TestSegFaultRegression(BaseTmpl):
# Without parsing, cleanup of C function had caused segfault
def test_cleanup(self):
tracer = VizTracer()
tracer.start()
_ = len([1, 2, 3])
_ = sum([2, 3, 4])
try:
raise Exception("lol")
except Exception:
pass
tracer.stop()
tracer.cleanup()
class TestFunctionArg(BaseTmpl):
def test_functionarg(self):
def f(n):
tracer.add_func_args("input", n)
if n < 2:
return 1
return f(n - 1) + f(n - 2)
tracer = VizTracer()
tracer.start()
f(5)
tracer.stop()
tracer.parse()
inputs = set()
for d in tracer.data["traceEvents"]:
if d["ph"] == "X":
inputs.add(d["args"]["input"])
self.assertEqual(inputs, set([0, 1, 2, 3, 4, 5]))
issue21_code = \
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--script_option", action="store_true")
parser.add_argument("-o", action="store_true")
options = parser.parse_args()
print(options)
if not options.script_option:
exit(1)
"""
class TestIssue21(CmdlineTmpl):
# viztracer --run my_script --script_option
# is not parsed correctly because the program gets confused
# about --script_option
def test_issue21(self):
self.template(["viztracer", "--include_files", "/", "--run", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "--include_files", "/", "--", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "--run", "cmdline_test.py", "-o", "--script_option"], script=issue21_code)
self.template(["viztracer", "--", "cmdline_test.py", "-o", "--script_option"], script=issue21_code)
self.template(["viztracer", "--run"], script=issue21_code, success=False, expected_output_file=None)
self.template(["viztracer", "--"], script=issue21_code, success=False, expected_output_file=None)
term_code = \
"""
import time
a = []
a.append(1)
for i in range(10):
time.sleep(1)
"""
class TestTermCaught(CmdlineTmpl):
def test_term(self):
if sys.platform == "win32":
return
self.build_script(term_code)
cmd = ["viztracer", "-o", "term.json", "cmdline_test.py"]
if os.getenv("COVERAGE_RUN"):
cmd = ["coverage", "run", "--parallel-mode", "--pylib", "-m"] + cmd
p = subprocess.Popen(cmd)
time.sleep(0.5)
p.terminate()
p.wait(timeout=10)
self.assertTrue(os.path.exists("term.json"))
self.cleanup(output_file="term.json")
class TestIssue42(BaseTmpl):
def test_issue42(self):
@ignore_function
def f():
lst = []
lst.append(1)
tracer = VizTracer()
tracer.start()
f()
tracer.stop()
tracer.parse()
self.assertEventNumber(tracer.data, 0)
issue47_code = \
"""
import sys
import gc
class C:
def __init__(self):
self.data = bytearray()
def change(self):
b = memoryview(self.data).tobytes()
self.data += b"123123"
del self.data[:1]
c = C()
c.change()
"""
class TestIssue47(CmdlineTmpl):
def test_issue47(self):
self.template(["viztracer", "cmdline_test.py", "-o", "result.json"], script=issue47_code, expected_output_file="result.json", expected_entries=7)
class TestIssue58(CmdlineTmpl):
def test_issue58(self):
if multiprocessing.get_start_method() == "fork":
self.template(["viztracer", "--log_multiprocess", "-m", "tests.modules.issue58"], expected_output_file="result.html")
|
py | 1a5343375f93e233ab761419ca76240214a1f687 | import setuptools
VERSION = '0.1'
setuptools.setup(
name='TTWeb',
version=VERSION,
description='Web framework for The Tale',
long_description='Web framework for The Tale',
url='https://github.com/Tiendil/the-tale',
author='Aleksey Yeletsky <Tiendil>',
author_email='[email protected]',
license='BSD',
packages=setuptools.find_packages(),
install_requires=['aiohttp==3.7.4',
'cchardet==2.1.5',
'aiodns==2.0.0',
'aiopg==1.0.0',
'Django==3.0.11',
'yarl==1.4.2',
'protobuf==3.11.1'],
entry_points={'console_scripts': ['tt_service=tt_web.commands.tt_service:main']},
include_package_data=True,
test_suite='tests')
|
py | 1a53436e370518f375194985adc144d816310cbd | # Self Driving Car
# Importing the libraries
import numpy as np
from random import random, randint
import matplotlib.pyplot as plt
import time
# Importing the Kivy packages
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.graphics import Color, Ellipse, Line
from kivy.config import Config
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
# Importing the Dqn object from our AI in ai.py
from ai import Dqn
# Adding this line if we don't want the right click to put a red point
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
# Introducing last_x and last_y, used to keep the last point in memory when we draw the sand on the map
# Sand is our punishment
last_x = 0
last_y = 0
n_points = 0 # the total number of points in the last drawing
length = 0 # the length of the last drawing
# Getting our AI which contains NN that represents Q-function
brain = Dqn(5,3,0.9) # 5 sensors states, 3 actions, gama = 0.9
action2rotation = [0,20,-20]
last_reward = 0
scores = []
# Initializing the map
first_update = True # To initialize the map only once
def init():
global sand # sand is an array that has as many cells as our graphic interface has pixels. Each cell has a one if there is sand, 0 otherwise
global goal_x # x-coordinate of the goal (where the car has to go, that is the airport or the downtown)
global goal_y # y-coordinate of the goal (where the car has to go, that is the airport or the downtown)
global first_update
sand = np.zeros((longueur,largeur)) # initializing the sand array with zeros
goal_x = 20 # the goal is at the upper left of the map (the x-coordinate is 20 and not 0 because the car gets bad reward if it touches the wall)
goal_y = largeur - 20 # the goal is at the upper left of the map (y-coordinate)
first_update = False # initialize the map only once
# Initializing the last distance
last_distance = 0
# Creating the car class
class Car(Widget):
angle = NumericProperty(0) # initializing the angle of the car (angle between the x-axis of the map and the axis of the car)
rotation = NumericProperty(0) # initializing the last rotation of the car (after playing the action, the car does a rotation of 0, 20 or -20 degrees)
velocity_x = NumericProperty(0) # initializing the x-coordinate of the velocity vector
velocity_y = NumericProperty(0) # initializing the y-coordinate of the velocity vector
velocity = ReferenceListProperty(velocity_x, velocity_y) # velocity vector
sensor1_x = NumericProperty(0) # initializing the x-coordinate of the first sensor (the one that looks forward)
sensor1_y = NumericProperty(0) # initializing the y-coordinate of the first sensor (the one that looks forward)
sensor1 = ReferenceListProperty(sensor1_x, sensor1_y) # first sensor vector
sensor2_x = NumericProperty(0)
sensor2_y = NumericProperty(0)
sensor2 = ReferenceListProperty(sensor2_x, sensor2_y)
sensor3_x = NumericProperty(0)
sensor3_y = NumericProperty(0)
sensor3 = ReferenceListProperty(sensor3_x, sensor3_y)
signal1 = NumericProperty(0)
signal2 = NumericProperty(0)
signal3 = NumericProperty(0)
def move(self, rotation):
self.pos = Vector(*self.velocity) + self.pos # updating the position of the car according to its last position and velocity
self.rotation = rotation # getting the rotation of the car
self.angle = self.angle + self.rotation # updating the angle
self.sensor1 = Vector(30, 0).rotate(self.angle) + self.pos # updating the position of sensor 1, 30 is distance between car and sensor
self.sensor2 = Vector(30, 0).rotate((self.angle+30)%360) + self.pos # updating the position of sensor 2
self.sensor3 = Vector(30, 0).rotate((self.angle-30)%360) + self.pos # sensor 3
self.signal1 = int(np.sum(sand[int(self.sensor1_x)-10:int(self.sensor1_x)+10, int(self.sensor1_y)-10:int(self.sensor1_y)+10]))/400. # getting the signal received by sensor 1 (density of sand around sensor 1)
self.signal2 = int(np.sum(sand[int(self.sensor2_x)-10:int(self.sensor2_x)+10, int(self.sensor2_y)-10:int(self.sensor2_y)+10]))/400.
self.signal3 = int(np.sum(sand[int(self.sensor3_x)-10:int(self.sensor3_x)+10, int(self.sensor3_y)-10:int(self.sensor3_y)+10]))/400.
if self.sensor1_x>longueur-10 or self.sensor1_x<10 or self.sensor1_y>largeur-10 or self.sensor1_y<10: # if sensor 1 is out of the map (the car is facing one edge of the map)
self.signal1 = 1. # sensor 1 detects full sand
if self.sensor2_x>longueur-10 or self.sensor2_x<10 or self.sensor2_y>largeur-10 or self.sensor2_y<10:
self.signal2 = 1.
if self.sensor3_x>longueur-10 or self.sensor3_x<10 or self.sensor3_y>largeur-10 or self.sensor3_y<10:
self.signal3 = 1.
class Ball1(Widget): # sensor 1
pass
class Ball2(Widget): # sensor 2
pass
class Ball3(Widget): # sensor 3
pass
# Creating the game class
class Game(Widget):
car = ObjectProperty(None) # getting the car object from our kivy file
ball1 = ObjectProperty(None) # getting the sensor 1 object from our kivy file
ball2 = ObjectProperty(None)
ball3 = ObjectProperty(None)
def serve_car(self): # starting the car when we launch the application
self.car.center = self.center # the car will start at the center of the map
self.car.velocity = Vector(6, 0) # the car will start to go horizontally to the right with a speed of 6
def update(self, dt): # update function that updates everything that needs to be updated at each discrete time t when reaching a new state (getting new signals from the sensors)
global brain # specifying the global variables (the brain of the car, that is our AI)
global last_reward # specifying the global variables (the last reward)
global scores # specifying the global variables (the means of the rewards)
global last_distance # specifying the global variables (the last distance from the car to the goal)
global goal_x # specifying the global variables (x-coordinate of the goal)
global goal_y # specifying the global variables (y-coordinate of the goal)
global longueur # specifying the global variables (width of the map)
global largeur # specifying the global variables (height of the map)
longueur = self.width # width of the map (horizontal edge)
largeur = self.height # height of the map (vertical edge)
if first_update: # initialize the map only once
init()
xx = goal_x - self.car.x # difference of x-coordinates between the goal and the car
yy = goal_y - self.car.y # difference of y-coordinates between the goal and the car
orientation = Vector(*self.car.velocity).angle((xx,yy))/180. # direction of the car with respect to the goal (if the car is heading perfectly towards the goal, then orientation = 0)
last_signal = [self.car.signal1, self.car.signal2, self.car.signal3, orientation, -orientation] # our input state vector, composed of the three signals received by the three sensors, plus the orientation and -orientation
action = brain.update(last_reward, last_signal) # playing the action from our ai (the object brain of the dqn class)
scores.append(brain.score()) # appending the score (mean of the last 100 rewards to the reward window)
rotation = action2rotation[action] # converting the action played (0, 1 or 2) into the rotation angle (0°, 20° or -20°)
self.car.move(rotation) # moving the car according to this last rotation angle
distance = np.sqrt((self.car.x - goal_x)**2 + (self.car.y - goal_y)**2) # getting the new distance between the car and the goal right after the car moved
self.ball1.pos = self.car.sensor1 # updating the position of the first sensor (ball1) right after the car moved
self.ball2.pos = self.car.sensor2
self.ball3.pos = self.car.sensor3
if sand[int(self.car.x),int(self.car.y)] > 0: # if the car is on the sand
self.car.velocity = Vector(1, 0).rotate(self.car.angle) # it is slowed down (speed = 1)
last_reward = -1 # and reward = -1
else: # otherwise
self.car.velocity = Vector(6, 0).rotate(self.car.angle) # it goes to a normal speed (speed = 6)
last_reward = -0.2 # and it gets bad reward (-0.2)
if distance < last_distance: # if it getting close to the goal
last_reward = 0.1 # it still gets slightly positive reward 0.1
if self.car.x < 10: # if the car is in the left edge of the frame
self.car.x = 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if self.car.x > self.width - 10: # if the car is in the right edge of the frame
self.car.x = self.width - 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if self.car.y < 10: # if the car is in the bottom edge of the frame
self.car.y = 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if self.car.y > self.height - 10: # if the car is in the upper edge of the frame
self.car.y = self.height - 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if distance < 100: # when the car reaches its goal
goal_x = self.width-goal_x # the goal becomes the bottom right corner of the map (the downtown), and vice versa (updating of the x-coordinate of the goal)
goal_y = self.height-goal_y # the goal becomes the bottom right corner of the map (the downtown), and vice versa (updating of the y-coordinate of the goal)
last_distance = distance # Updating the last distance from the car to the goal
# Adding the painting tools
class MyPaintWidget(Widget):
def on_touch_down(self, touch): # putting some sand when we do a left click
global length, n_points, last_x, last_y
with self.canvas:
Color(0.8,0.7,0)
d = 10.
touch.ud['line'] = Line(points = (touch.x, touch.y), width = 10)
last_x = int(touch.x)
last_y = int(touch.y)
n_points = 0
length = 0
sand[int(touch.x),int(touch.y)] = 1
def on_touch_move(self, touch): # putting some sand when we move the mouse while pressing left
global length, n_points, last_x, last_y
if touch.button == 'left':
touch.ud['line'].points += [touch.x, touch.y]
x = int(touch.x)
y = int(touch.y)
length += np.sqrt(max((x - last_x)**2 + (y - last_y)**2, 2))
n_points += 1.
density = n_points/(length)
touch.ud['line'].width = int(20 * density + 1)
sand[int(touch.x) - 10 : int(touch.x) + 10, int(touch.y) - 10 : int(touch.y) + 10] = 1
last_x = x
last_y = y
# Adding the API Buttons (clear, save and load)
class CarApp(App):
def build(self):
parent = Game()
parent.serve_car()
Clock.schedule_interval(parent.update, 1.0/60.0)
self.painter = MyPaintWidget()
clearbtn = Button(text = 'clear')
savebtn = Button(text = 'save', pos = (parent.width, 0))
loadbtn = Button(text = 'load', pos = (2 * parent.width, 0))
clearbtn.bind(on_release = self.clear_canvas)
savebtn.bind(on_release = self.save)
loadbtn.bind(on_release = self.load)
parent.add_widget(self.painter)
parent.add_widget(clearbtn)
parent.add_widget(savebtn)
parent.add_widget(loadbtn)
return parent
def clear_canvas(self, obj): # clear button
global sand
self.painter.canvas.clear()
sand = np.zeros((longueur,largeur))
def save(self, obj): # save button
print("saving brain...")
brain.save()
plt.plot(scores)
plt.show()
def load(self, obj): # load button
print("loading last saved brain...")
brain.load()
# Running the whole thing
if __name__ == '__main__':
CarApp().run()
|
py | 1a5343b526ad529617fb11cacd7b777edca02078 | #!/usr/bin/env python3
import json
import os
from os import path
import sys
import zipfile
import hashlib
import urllib.parse
BLOCKSIZE = 65536
def sha256(file):
hasher = hashlib.sha256()
with open(file, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def guess_mod_name(file_name):
file_name, _ = os.path.splitext(file_name)
parts = []
for p in file_name.split('-'):
if len(p) > 0 and p[0].isdigit():
break
parts.append(p)
return "-".join(parts)
def apply_mod_count(modcount, modid):
if modid in modcount:
count = modcount[modid]
modcount[modid] = count + 1
return "{}-{}".format(modid, count)
else:
modcount[modid] = 1
return modid
def generate_mod(mod_file, url_base, flags, writer, modcount):
zip = zipfile.ZipFile(mod_file)
name = None
version = None
if 'mcmod.info' in zip.namelist():
try:
f = zip.open('mcmod.info')
data = json.load(f)
if 'modListVersion' in data and data['modListVersion'] == 2:
data = data['modList']
name = data[0]['modid']
if 'version' in data[0]:
version = data[0]['version']
else:
print("Warning: Mod {} is apparently incapable of specifying a version number in their mcmod.info. Using 'unknown', this may have weird side effects".format(name))
version = 'unknown'
except ValueError as e:
print("Warning: Mod {} does not contain mcmod.info (or it does not follow correct format). Guessing information, this may have weird side effects".format(mod_file))
except json.decoder.JSONDecodeError as e:
print("Warning: Author of mod {} is apparently incapable of writing correctly formatted json. Guessing information, this may have weird side effects ({})".format(mod_file, e))
except Exception as e:
print("Irgendwas kaputt: {}".format(e))
else:
print("Warning: Mod {} does not contain mcmod.info (or it does not follow correct format). Guessing information, this may have weird side effects".format(mod_file))
if name == None:
name = guess_mod_name(path.basename(mod_file))
version = ''
name = apply_mod_count(modcount, name)
our_flags = flags[name] if name in flags else ''
writer.write("{},{},{}/mods/{},mod,{},{}\n".format(name, version, url_base, urllib.parse.quote(path.basename(mod_file)), sha256(mod_file), our_flags))
def make_configs(url_base, writer, exclude):
"""
Creates a configs.zip from the config/ directory.
Can be given a list of filenames to exclude
"""
with zipfile.ZipFile('configs.zip', 'w') as zip:
for (dirname, dirs, files) in os.walk("config"):
if dirname in exclude:
print("Skipping " + dirname + " and all files in it")
continue
for dir in dirs:
filename = path.join(dirname, dir)
arcname = filename[7:]
if filename not in exclude:
zip.write(filename, arcname)
for file in files:
filename = path.join(dirname, file)
if filename in exclude:
print("Skipping " + filename)
continue
arcname = filename[7:]
zip.write(filename, arcname)
writer.write("Configs,{1},{0}/configs.zip,config,{1}\n".format(url_base, sha256('configs.zip')))
def path_to_tree(path):
ret = set([])
total_path = ""
for el in path.split("/"):
total_path += el + "/"
ret.add(total_path)
return ret
def make_resources(list, url_base, writer):
dirs = set([])
for p in list:
dirname = path.dirname(p)
if len(dirname) > 0:
dirs = dirs.union(path_to_tree(dirname))
with zipfile.ZipFile('resources.zip', 'w') as zip:
for dir in dirs:
zip.write(dir, dir)
for file in list:
file = file.rstrip()
zip.write(file, file)
writer.write("Resources,{1},{0}/resources.zip,resources,{1}\n".format(url_base, sha256('resources.zip')))
if len(sys.argv) != 3:
print("Usage: {} <url_base> <out_file>".format(sys.argv[0]))
sys.exit(1)
base_url = sys.argv[1]
out_file = sys.argv[2]
exclude = []
if path.isfile('exclude.packupdate'):
with open('exclude.packupdate') as file:
for line in file.readlines():
exclude.append(line.strip())
with open(out_file, 'w') as out_file:
make_configs(base_url, out_file, exclude)
if path.isfile('resources.packupdate'):
with open('resources.packupdate') as file:
make_resources(file.readlines(), base_url, out_file)
if path.isfile('forge.packupdate'):
with open('forge.packupdate') as file:
out_file.write("Minecraft Forge,{},,forge\n".format(file.read().strip()))
flags = {}
if path.isfile('flags.packupdate'):
with open('flags.packupdate') as file:
for line in file.readlines():
key, val = line.split(',')
flags[key] = val.rstrip()
modpath = 'mods/'
modcount = {}
for f in os.listdir(modpath):
mod_file = os.path.join(modpath, f)
if mod_file in exclude:
continue
if os.path.isfile(mod_file):
generate_mod(mod_file, base_url, flags, out_file, modcount)
|
py | 1a5343e9cf13e53040595fdd46a3067561d5dddc | import click
import click_completion
from .main import completion
@completion.command()
@click.option(
"-i", "--case-insensitive/--no-case-insensitive", help="Case insensitive completion"
)
@click.argument(
"shell",
required=False,
type=click_completion.DocumentedChoice(click_completion.core.shells),
)
def show(shell, case_insensitive):
"""Show the click-completion-command completion code"""
extra_env = (
{"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE": "ON"}
if case_insensitive
else {}
)
click.echo(click_completion.core.get_code(shell, extra_env=extra_env))
@completion.command()
@click.option(
"--append/--overwrite", help="Append the completion code to the file", default=None
)
@click.option(
"-i", "--case-insensitive/--no-case-insensitive", help="Case insensitive completion"
)
@click.argument(
"shell",
required=False,
type=click_completion.DocumentedChoice(click_completion.core.shells),
)
@click.argument("path", required=False)
def install(append, case_insensitive, shell, path):
"""Install the click-completion-command completion"""
extra_env = (
{"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE": "ON"}
if case_insensitive
else {}
)
shell, path = click_completion.core.install(
shell=shell, path=path, append=append, extra_env=extra_env
)
click.echo("%s completion installed in %s" % (shell, path))
|
py | 1a53443518a27e7cd031eac7cdd5197e79bfba81 | from __future__ import print_function
import tempfile
import os
import shutil
from b3get.utils import tmp_location
def test_has_tempdir():
assert tempfile.gettempdir()
def test_create_tempdir():
assert tempfile.gettempdir()
tdir = tempfile.mkdtemp()
assert os.path.exists(tdir)
print("\n", tdir)
shutil.rmtree(tdir)
def test_b3get_tempdir():
tdir = tmp_location()
assert os.path.exists(tdir)
assert os.path.isdir(tdir)
shutil.rmtree(tdir)
def test_b3get_tempdir_reuse():
tmp = tempfile.gettempdir()
exp = os.path.join(tmp, 'random-b3get')
os.makedirs(exp)
tdir = tmp_location()
assert tdir == exp
shutil.rmtree(tdir)
def test_b3get_tempdir_double_call():
exp = tmp_location()
tdir = tmp_location()
assert tdir == exp
shutil.rmtree(tdir)
|
py | 1a5344861d4e969561014c68c7c75f35cf85bddd | import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
os.makedirs("visual", exist_ok=True)
def show_mnist(n=20):
from tensorflow import keras
(x, y), _ = keras.datasets.mnist.load_data()
idx = np.random.randint(0, len(x), n)
x, y = x[idx], y[idx]
n_col = 5
n_row = len(x) // n_col
if x.ndim > 3:
x = np.squeeze(x, axis=-1)
plt.figure(0, (5, n_row))
for c in range(n_col):
for r in range(n_row):
i = r*n_col+c
plt.subplot(n_row, n_col, i+1)
plt.imshow(x[i], cmap="gray_r")
plt.axis("off")
# plt.xlabel(y[i])
plt.tight_layout()
plt.savefig("visual/mnist.png")
# plt.show()
def save_gan(model, ep, **kwargs):
name = model.__class__.__name__.lower()
if name in ["dcgan", "wgan", "wgangp", "lsgan", "wgandiv", "sagan", "pggan"]:
imgs = model.call(100, training=False).numpy()
_save_gan(name, ep, imgs, show_label=False)
elif name == "gan":
data = model.call(5, training=False).numpy()
plt.plot(data.T)
plt.xticks((), ())
dir_ = "visual/{}".format(name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/{}.png".format(ep)
plt.savefig(path)
elif name == "cgan" or name == "acgan":
img_label = np.arange(0, 10).astype(np.int32).repeat(10, axis=0)
imgs = model.predict(img_label)
_save_gan(name, ep, imgs, show_label=True)
elif name in ["infogan"]:
img_label = np.arange(0, model.label_dim).astype(np.int32).repeat(10, axis=0)
img_style = np.concatenate(
[np.linspace(-model.style_scale, model.style_scale, 10)] * 10).reshape((100, 1)).repeat(model.style_dim, axis=1).astype(np.float32)
img_info = img_label, img_style
imgs = model.predict(img_info)
_save_gan(name, ep, imgs, show_label=False)
elif name in ["ccgan", "pix2pix"]:
if "img" not in kwargs:
raise ValueError
input_img = kwargs["img"][:100]
mask_width = np.random.randint(model.mask_range[0], model.mask_range[1], len(input_img))
mask = np.ones(input_img.shape, np.float32)
for i, w in enumerate(mask_width):
mask_xy = np.random.randint(0, model.img_shape[0] - w, 2)
x0, x1 = mask_xy[0], w + mask_xy[0]
y0, y1 = mask_xy[1], w + mask_xy[1]
mask[i, x0:x1, y0:y1] = 0
masked_img = input_img * mask
imgs = model.predict(masked_img)
_save_img2img_gan(name, ep, masked_img, imgs)
elif name == "cyclegan":
if "img6" not in kwargs or "img9" not in kwargs:
raise ValueError
img6, img9 = kwargs["img6"][:50], kwargs["img9"][:50]
img9_, img6_ = model.g12.call(img6, training=False), model.g21.call(img9, training=False)
img = np.concatenate((img6.numpy(), img9.numpy()), axis=0)
imgs = np.concatenate((img9_.numpy(), img6_.numpy()), axis=0)
_save_img2img_gan(name, ep, img, imgs)
elif name in ["srgan"]:
if "img" not in kwargs:
raise ValueError
input_img = kwargs["img"][:100]
imgs = model.predict(input_img)
_save_img2img_gan(name, ep, input_img, imgs)
elif name == "stylegan":
n = 12
global z1, z2 # z1 row, z2 col
if "z1" not in globals():
z1 = np.random.normal(0, 1, size=(n, 1, model.latent_dim))
if "z2" not in globals():
z2 = np.random.normal(0, 1, size=(n, 1, model.latent_dim))
imgs = model.predict([
np.concatenate(
(z1.repeat(n, axis=0).repeat(1, axis=1), np.repeat(np.concatenate([z2 for _ in range(n)], axis=0), 2, axis=1)),
axis=1),
np.zeros([len(z1)*n, model.img_shape[0], model.img_shape[1]], dtype=np.float32)])
z1_imgs = -model.predict([z1.repeat(model.n_style, axis=1), np.zeros([len(z1), model.img_shape[0], model.img_shape[1]], dtype=np.float32)])
z2_imgs = -model.predict([z2.repeat(model.n_style, axis=1), np.zeros([len(z2), model.img_shape[0], model.img_shape[1]], dtype=np.float32)])
imgs = np.concatenate([z2_imgs, imgs], axis=0)
rest_imgs = np.concatenate([np.ones([1, 28, 28, 1], dtype=np.float32), z1_imgs], axis=0)
for i in range(len(rest_imgs)):
imgs = np.concatenate([imgs[:i*(n+1)], rest_imgs[i:i+1], imgs[i*(n+1):]], axis=0)
_save_gan(name, ep, imgs, show_label=False, nc=n+1, nr=n+1)
else:
raise ValueError(name)
plt.clf()
plt.close()
def _img_recenter(img):
return (img + 1) * 255 / 2
def _save_img2img_gan(model_name, ep, img1, img2):
if not isinstance(img1, np.ndarray):
img1 = img1.numpy()
if not isinstance(img2, np.ndarray):
img2 = img2.numpy()
if img1.ndim > 3:
img1 = np.squeeze(img1, axis=-1)
if img2.ndim > 3:
img2 = np.squeeze(img2, axis=-1)
img1, img2 = _img_recenter(img1), _img_recenter(img2)
plt.clf()
nc, nr = 20, 10
plt.figure(0, (nc * 2, nr * 2))
i = 0
for c in range(0, nc, 2):
for r in range(nr):
n = r * nc + c
plt.subplot(nr, nc, n + 1)
plt.imshow(img1[i], cmap="gray")
plt.axis("off")
plt.subplot(nr, nc, n + 2)
plt.imshow(img2[i], cmap="gray_r")
plt.axis("off")
i += 1
plt.tight_layout()
dir_ = "visual/{}".format(model_name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/{}.png".format(ep)
plt.savefig(path)
def _save_gan(model_name, ep, imgs, show_label=False, nc=10, nr=10):
if not isinstance(imgs, np.ndarray):
imgs = imgs.numpy()
if imgs.ndim > 3:
imgs = np.squeeze(imgs, axis=-1)
imgs = _img_recenter(imgs)
plt.clf()
plt.figure(0, (nc * 2, nr * 2))
for c in range(nc):
for r in range(nr):
i = r * nc + c
plt.subplot(nr, nc, i + 1)
plt.imshow(imgs[i], cmap="gray_r")
plt.axis("off")
if show_label:
plt.text(23, 26, int(r), fontsize=23)
plt.tight_layout()
dir_ = "visual/{}".format(model_name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/{}.png".format(ep)
plt.savefig(path)
def infogan_comp():
import tensorflow as tf
from infogan import InfoGAN
STYLE_DIM = 2
LABEL_DIM = 10
RAND_DIM = 88
IMG_SHAPE = (28, 28, 1)
FIX_STD = True
model = InfoGAN(RAND_DIM, STYLE_DIM, LABEL_DIM, IMG_SHAPE, FIX_STD)
model.load_weights("./models/infogan/model.ckpt").expect_partial()
img_label = np.arange(0, 10).astype(np.int32).repeat(10, axis=0)
noise = tf.repeat(tf.random.normal((1, model.rand_dim)), len(img_label), axis=0)
def plot(noise, img_label, img_style, n):
img_label = tf.convert_to_tensor(img_label, dtype=tf.int32)
img_style = tf.convert_to_tensor(img_style, dtype=tf.float32)
imgs = model.g.call([noise, img_label, img_style], training=False).numpy()
if imgs.ndim > 3:
imgs = np.squeeze(imgs, axis=-1)
plt.clf()
nc, nr = 10, 10
plt.figure(0, (nc * 2, nr * 2))
for c in range(nc):
for r in range(nr):
i = r * nc + c
plt.subplot(nc, nr, i + 1)
plt.imshow(imgs[i], cmap="gray_r")
plt.axis("off")
plt.text(23, 26, int(r), fontsize=23)
plt.tight_layout()
model_name = model.__class__.__name__.lower()
dir_ = "visual/{}".format(model_name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/style{}.png".format(n)
plt.savefig(path)
img_style = np.concatenate(
[np.linspace(-model.style_scale, model.style_scale, 10)] * 10).reshape((100, 1)).astype(np.float32)
plot(noise, img_label, np.concatenate((img_style, np.zeros_like(img_style)), axis=1), 1)
plot(noise, img_label, np.concatenate((np.zeros_like(img_style), img_style), axis=1), 2)
def cvt_gif(folders_or_gan, shrink=10):
if not isinstance(folders_or_gan, list):
folders_or_gan = [folders_or_gan.__class__.__name__.lower()]
for folder in folders_or_gan:
folder = "visual/"+folder
fs = [folder+"/" + f for f in os.listdir(folder)]
imgs = []
for f in sorted(fs, key=os.path.getmtime):
if not f.endswith(".png"):
continue
try:
int(os.path.basename(f).split(".")[0])
except ValueError:
continue
img = Image.open(f)
img = img.resize((img.width//shrink, img.height//shrink), Image.ANTIALIAS)
imgs.append(img)
path = "{}/generating.gif".format(folder)
if os.path.exists(path):
os.remove(path)
imgs[-1].save(path, append_images=imgs, optimize=False, save_all=True, duration=400, loop=0)
print("saved ", path)
if __name__ == "__main__":
# show_mnist(20)
# cgan_res()
# save_infogan(None, 1)
# infogan_comp()
cvt_gif(["wgangp", "wgandiv", "wgan", "cgan", "acgan", "dcgan", "lsgan", "infogan", "ccgan", "cyclegan", "pix2pix", "stylegan"]) |
py | 1a53449898fbaa8d80955126b9d1a10926a153b4 | # -*- coding: utf-8 -*-
import requests
from io import BytesIO
from pyzbar import pyzbar
from PIL import Image
import openpyxl
import os
import sys
def openpyxl_write(path):
workbook = openpyxl.load_workbook(path)
worksheet = workbook.worksheets[0]
for index, row in enumerate(worksheet.rows):
url = row[2].value
if url == "二维码URL" :
continue
data_url = ""
try:
data_url= get_ewm(url)
except BaseException as e:
print url
if index==0 :
row[14].value = "URL"
else:
row[14].value = data_url
if index %200 ==0:
print index
workbook.save(filename=path)
def get_ewm(img_adds):
""" 读取二维码的内容: img_adds:二维码地址(可以是网址也可是本地地址 """
if os.path.isfile(img_adds):
# 从本地加载二维码图片
img = Image.open(img_adds)
else:
# 从网络下载并加载二维码图片
rq_img = requests.get(img_adds).content
img = Image.open(BytesIO(rq_img))
# img.show() # 显示图片,测试用
txt_list = pyzbar.decode(img)
for txt in txt_list:
barcodeData = txt.data.decode("utf-8")
return barcodeData
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
openpyxl_write("./data2.xlsx")
|
py | 1a5344ce5e69fed8b5643d1fb516b4e704e7a258 | # Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from sklearn.manifold import TSNE
def plot(data, useful_embedding):
tsne_input = []
labels = []
curr_dim = 0
for list in data:
for word in list:
tsne_input.append(useful_embedding[word])
labels.append(str(curr_dim))
curr_dim += 1
tsne_input = np.array(tsne_input)
model = TSNE(n_components=2, init='pca')
tsne_result = model.fit_transform(tsne_input)
scatter(zip(tsne_result, labels))
def scatter(data):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
dic = {}
for psi_2d, label in data:
if label == 'N/A':
continue
if label not in dic:
dic[label] = []
dic[label].append(psi_2d)
labels = list(dic.keys())
labels.sort(key=lambda l: len(dic[l]), reverse=True)
for label in labels[10:]:
del dic[label]
labels = dic.keys()
colors = cm.rainbow(np.linspace(0, 1, len(dic)))
plotpoints = []
for label, color in zip(labels, colors):
x = list(map(lambda s: s[0], dic[label]))
y = list(map(lambda s: s[1], dic[label]))
plotpoints.append(plt.scatter(x, y, color=color))
plt.show()
# returns latent dimensionality sized list of list of words
def read_dataset(clargs):
word_lines = []
# skip first 8 lines, information lines
info_lines = 8
file = open(clargs.data_file)
lines = file.readlines()
file.close()
# 6 lines for each dimension
dimensionality = (len(lines) - info_lines) / 6
start_line = info_lines + 1 # 0-based
for i in range(int(dimensionality)):
word_lines.append(lines[start_line + i * 6].strip())
list_of_words = []
import ast
for word_line in word_lines:
list_of_words.append(ast.literal_eval(word_line))
return list_of_words
# returns of a dict: token -> embedding (list of floats)
def get_useful_embedding(clargs, tokens):
file = open(clargs.embedding_file)
lines = file.readlines()
file.close()
embedding = {}
for line in lines:
splits = line.split(' ', 1)
embedding[splits[0]] = splits[1]
del lines
useful_embedding = {}
for token in tokens:
useful_embedding[token] = [float(i) for i in embedding[token].split(' ')]
del embedding
return useful_embedding
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_file', type=str)
parser.add_argument('embedding_file', type=str)
clargs = parser.parse_args()
data = read_dataset(clargs)
import itertools
tokens = list(itertools.chain.from_iterable(data))
useful_embedding = get_useful_embedding(clargs, tokens)
plot(data, useful_embedding)
|
py | 1a53453efd002819eeefdbdf13774c87719c0959 | # -*- coding:utf-8 -*-
import os
import warnings
import git
import torch
import torchvision.transforms as transforms
import yolov2
import visdom
from yolov2 import detection_loss_4_yolo
from torchsummary.torchsummary import summary
from utilities.dataloader import detection_collate
from utilities.dataloader import VOC
from utilities.utils import save_checkpoint
from utilities.utils import create_vis_plot
from utilities.utils import update_vis_plot
from utilities.utils import visualize_GT
from utilities.augmentation import Augmenter
from imgaug import augmenters as iaa
warnings.filterwarnings("ignore")
# plt.ion() # interactive mode
# model = torch.nn.DataParallel(net, device_ids=[0]).cuda()
def train(params):
# future work variable
dataset = params["dataset"]
input_height = params["input_height"]
input_width = params["input_width"]
data_path = params["data_path"]
val_data_path = params["val_data_path"]
val_datalist_path = params["val_datalist_path"]
datalist_path = params["datalist_path"]
class_path = params["class_path"]
batch_size = params["batch_size"]
num_epochs = params["num_epochs"]
learning_rate = params["lr"]
checkpoint_path = params["checkpoint_path"]
USE_AUGMENTATION = params["use_augmentation"]
USE_GTCHECKER = params["use_gtcheck"]
USE_VISDOM = params["use_visdom"]
USE_GITHASH = params["use_githash"]
num_class = params["num_class"]
num_gpus = [i for i in range(1)]
with open(class_path) as f:
class_list = f.read().splitlines()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
if (USE_GITHASH):
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=7)
if USE_VISDOM:
viz = visdom.Visdom(use_incoming_socket=False)
vis_title = 'YOLOv2'
vis_legend_Train = ['Train Loss']
vis_legend_Val = ['Val Loss']
iter_plot = create_vis_plot(viz, 'Iteration', 'Total Loss', vis_title, vis_legend_Train)
val_plot = create_vis_plot(viz, 'Iteration', 'Validation Loss', vis_title, vis_legend_Val)
# 2. Data augmentation setting
if (USE_AUGMENTATION):
seq = iaa.SomeOf(2, [
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect BBs
iaa.Affine(
translate_px={"x": 3, "y": 10},
scale=(0.9, 0.9)
), # translate by 40/60px on x/y axis, and scale to 50-70%, affects BBs
iaa.AdditiveGaussianNoise(scale=0.1 * 255),
iaa.CoarseDropout(0.02, size_percent=0.15, per_channel=0.5),
iaa.Affine(rotate=45),
iaa.Sharpen(alpha=0.5)
])
else:
seq = iaa.Sequential([])
composed = transforms.Compose([Augmenter(seq)])
# 3. Load Dataset
# composed
# transforms.ToTensor
#TODO : Datalist가 있을때 VOC parsing
# import pdb;pdb.set_trace()
train_dataset = VOC(root=data_path,transform=composed, class_path=class_path, datalist_path=datalist_path)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=detection_collate)
val_dataset = VOC(root=val_data_path,transform=composed, class_path=class_path, datalist_path=val_datalist_path)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=detection_collate)
# 5. Load YOLOv2
net = yolov2.YOLOv2()
model = torch.nn.DataParallel(net, device_ids=num_gpus).cuda()
print("device : ", device)
if device.type == 'cpu':
model = torch.nn.DataParallel(net)
else:
model = torch.nn.DataParallel(net, device_ids=num_gpus).cuda()
# 7.Train the model
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
# Train the model
total_step = len(train_loader)
total_train_step = num_epochs * total_step
# for epoch in range(num_epochs):
for epoch in range(1, num_epochs + 1):
train_loss =0
total_val_loss = 0
train_total_conf_loss = 0
train_total_xy_loss = 0
train_total_wh_loss = 0
train_total_c_loss = 0
val_total_conf_loss = 0
val_total_xy_loss = 0
val_total_wh_loss = 0
val_total_c_loss = 0
if(epoch %500 ==0 and epoch <1000):
learning_rate /= 10
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
if (epoch == 200) or (epoch == 400) or (epoch == 600) or (epoch == 20000) or (epoch == 30000):
scheduler.step()
model.train()
for i, (images, labels, sizes) in enumerate(train_loader):
current_train_step = (epoch) * total_step + (i + 1)
if USE_GTCHECKER:
visualize_GT(images, labels, class_list)
images = images.to(device)
labels = labels.to(device)
dog = labels[0,4,7,:]
human = labels[0,6,6,:]
# Forward pass
outputs = model(images)
# Calc Loss
one_loss,conf_loss,xy_loss,wh_loss,class_loss = detection_loss_4_yolo(outputs, labels, device.type)
# objness1_loss = detection_loss_4_yolo(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
one_loss.backward()
optimizer.step()
train_loss += one_loss.item()
train_total_conf_loss += conf_loss.item()
train_total_xy_loss += xy_loss.item()
train_total_wh_loss += wh_loss.item()
train_total_c_loss += class_loss.item()
train_total_conf_loss = train_total_conf_loss / len(train_loader)
train_total_xy_loss= train_total_xy_loss / len(train_loader)
train_total_wh_loss = train_total_wh_loss /len(train_loader)
train_total_c_loss = train_total_c_loss /len(train_loader)
train_epoch_loss = train_loss / len(train_loader)
update_vis_plot(viz, epoch + 1, train_epoch_loss, iter_plot, None, 'append')
model.eval()
with torch.no_grad():
for j, (v_images, v_labels, v_sizes) in enumerate(val_loader):
v_images = v_images.to(device)
v_labels = v_labels.to(device)
# Forward pass
v_outputs = model(v_images)
# Calc Loss
val_loss,conf_loss,xy_loss,wh_loss,class_loss = detection_loss_4_yolo(v_outputs, v_labels, device.type)
total_val_loss += val_loss.item()
val_total_conf_loss += conf_loss.item()
val_total_xy_loss += xy_loss.item()
val_total_wh_loss += wh_loss.item()
val_total_c_loss += class_loss.item()
val_epoch_loss = total_val_loss / len(val_loader)
val_total_conf_loss = val_total_conf_loss / len(val_loader)
val_total_xy_loss= val_total_xy_loss / len(val_loader)
val_total_wh_loss = val_total_wh_loss /len(val_loader)
val_total_c_loss = val_total_c_loss /len(val_loader)
update_vis_plot(viz, epoch + 1, val_epoch_loss, val_plot, None, 'append')
if (((current_train_step) % 100) == 0) or (current_train_step % 1 == 0 and current_train_step < 300):
print(
'epoch: [{}/{}], total step: [{}/{}], batch step [{}/{}], lr: {},one_loss: {:.4f},val_loss: {:.4f}'
.format(epoch + 1, num_epochs, current_train_step, total_train_step, i + 1, total_step,
([param_group['lr'] for param_group in optimizer.param_groups])[0],
one_loss,val_loss ))
print('train loss',train_epoch_loss,'val loss',val_epoch_loss)
print('train conf loss',train_total_conf_loss,'val conf loss',val_total_conf_loss)
print('train xy loss',train_total_xy_loss,'val xy loss',val_total_xy_loss)
print('train wh loss',train_total_wh_loss,'val wh loss',val_total_wh_loss)
print('train class loss',train_total_c_loss,'val class loss',val_total_c_loss)
if not USE_GITHASH:
short_sha = 'noHash'
# if ((epoch % 1000) == 0) and (epoch != 0):
# if ((epoch % 100) == 0) :
if ((epoch % 10) == 0) :
#if (one_loss <= 1) :
save_checkpoint({
'epoch': epoch + 1,
'arch': "YOLOv2",
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, False, filename=os.path.join(checkpoint_path, 'ckpt_{}_ep{:05d}_loss{:.04f}_lr{}.pth.tar'.format(short_sha, epoch, one_loss.item(), ([param_group['lr'] for param_group in optimizer.param_groups])[0])))
# print(dir(model))
filename = os.path.join(checkpoint_path, 'ckpt_{}_ep{:05d}_loss{:.04f}_lr{}model.pth.tar'.format(short_sha, epoch, one_loss.item(), ([param_group['lr'] for param_group in optimizer.param_groups])[0]))
torch.save(model.module.state_dict(),filename)
|
py | 1a53458ad3f2c083e67a9049c90ccbebb2db289d | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestRangerUsersync(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "RANGER/0.4.0/package"
STACK_VERSION = "2.2"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "configure",
config_file="ranger-admin-default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "start",
config_file="ranger-admin-default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', ('/usr/bin/ranger-usersync-start',),
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = 'ps -ef | grep proc_rangerusersync | grep -v grep',
sudo = True,
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "stop",
config_file="ranger-admin-default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', ('/usr/bin/ranger-usersync-stop',),
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
sudo = True
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "configure",
config_file="ranger-admin-secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "start",
config_file="ranger-admin-secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', ('/usr/bin/ranger-usersync-start',),
environment= {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = 'ps -ef | grep proc_rangerusersync | grep -v grep',
sudo = True
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "stop",
config_file="ranger-admin-secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', ('/usr/bin/ranger-usersync-stop',),
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
sudo = True
)
self.assertNoMoreResources()
@patch("setup_ranger.setup_usersync")
def test_upgrade(self, setup_usersync_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "restart",
config_file="ranger-usersync-upgrade.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertTrue(setup_usersync_mock.called)
self.assertResourceCalled("Execute", ("/usr/bin/ranger-usersync-stop",),
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_67'},
sudo = True
)
self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-usersync', '2.2.2.0-2399'), sudo=True)
@patch("setup_ranger.setup_usersync")
def test_upgrade_23(self, setup_usersync_mock):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
json_content['commandParams']['version'] = '2.3.0.0-1234'
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py",
classname = "RangerUsersync",
command = "restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None), (0, None)],
mocks_dict = mocks_dict)
self.assertTrue(setup_usersync_mock.called)
self.assertResourceCalled("Execute", ("/usr/bin/ranger-usersync-stop",),
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_67'},
sudo = True)
self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-usersync', '2.3.0.0-1234'), sudo=True)
self.assertEquals(2, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'ranger-usersync', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'ranger-usersync', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
def assert_configure_default(self):
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/ranger-usersync/install.properties',
properties = self.getConfig()['configurations']['usersync-properties'],
)
self.assertResourceCalled('Execute', 'cd /usr/hdp/current/ranger-usersync && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/ranger-usersync/setup.sh',
logoutput = True,
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
)
self.assertResourceCalled('File', '/usr/bin/ranger-usersync-start',
owner = 'ranger',
)
self.assertResourceCalled('File', '/usr/bin/ranger-usersync-stop',
owner = 'ranger',
)
self.assertResourceCalled('File', '/usr/hdp/current/ranger-usersync/ranger-usersync-services.sh',
mode = 0755,
)
def assert_configure_secured(self):
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/ranger-usersync/install.properties',
properties = self.getConfig()['configurations']['usersync-properties'],
)
self.assertResourceCalled('Execute', 'cd /usr/hdp/current/ranger-usersync && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/ranger-usersync/setup.sh',
logoutput = True,
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
)
self.assertResourceCalled('File', '/usr/bin/ranger-usersync-start',
owner = 'ranger',
)
self.assertResourceCalled('File', '/usr/bin/ranger-usersync-stop',
owner = 'ranger',
)
self.assertResourceCalled('File', '/usr/hdp/current/ranger-usersync/ranger-usersync-services.sh',
mode = 0755,
)
|
py | 1a534627086cb5af7643d10a81e566da3f557554 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "contour"
_path_str = "contour.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.contour.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
new_plotly.graph_objs.contour.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-time-format#locale_format We add
one item to d3's date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of new_plotly.graph_objs.contour.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[new_plotly.graph_objs.contour.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.contour.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
contour.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.contour.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
new_plotly.graph_objs.contour.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.contour.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
new_plotly.graph_objs.contour.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use contour.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.contour.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use contour.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`new_plotly.graph_objects.contour.colorba
r.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.contou
r.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
contour.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`new_plotly.graph_objects.contour.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use contour.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use contour.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.contour.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`new_plotly.graph_objects.contour.colorba
r.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.contou
r.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
contour.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`new_plotly.graph_objects.contour.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use contour.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use contour.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.contour.ColorBar
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.contour.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py | 1a53463647df5fc0c186c5e1dac5508fa934fc1a | from __future__ import unicode_literals
import tablib
from django.test import TestCase
from import_export import instance_loaders
from import_export import resources
from core.models import Book
class CachedInstanceLoaderTest(TestCase):
def setUp(self):
self.resource = resources.modelresource_factory(Book)()
self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email'])
self.book = Book.objects.create(name="Some book")
self.book2 = Book.objects.create(name="Some other book")
row = [str(self.book.pk), 'Some book', '[email protected]']
self.dataset.append(row)
self.instance_loader = instance_loaders.CachedInstanceLoader(
self.resource, self.dataset)
def test_all_instances(self):
self.assertTrue(self.instance_loader.all_instances)
self.assertEqual(len(self.instance_loader.all_instances), 1)
self.assertEqual(list(self.instance_loader.all_instances.keys()),
[self.book.pk])
def test_get_instance(self):
obj = self.instance_loader.get_instance(self.dataset.dict[0])
self.assertEqual(obj, self.book)
|
py | 1a5346977a5b3f9f6ac4f40899653cb2d705d671 | import pytest
from cashaddress.convert import InvalidAddress
from freetx.format import (
address_to_public_key_hash, bytes_to_wif, coords_to_public_key,
get_version, point_to_public_key, public_key_to_coords,
public_key_to_address, public_key_to_address, verify_sig,
wif_checksum_check, wif_to_bytes
)
from .samples import (
BITCOIN_ADDRESS, BITCOIN_ADDRESS_COMPRESSED, BITCOIN_CASHADDRESS_PAY2SH,
BITCOIN_ADDRESS_TEST_COMPRESSED, BITCOIN_ADDRESS_TEST,
BITCOIN_CASHADDRESS_TEST_PAY2SH, PRIVATE_KEY_BYTES, PUBKEY_HASH,
BITCOIN_CASHADDRESS, BITCOIN_CASHADDRESS_COMPRESSED,
BITCOIN_CASHADDRESS_TEST, BITCOIN_CASHADDRESS_TEST_COMPRESSED,
PUBKEY_HASH_COMPRESSED, PUBLIC_KEY_COMPRESSED, PUBLIC_KEY_UNCOMPRESSED,
PUBLIC_KEY_X, PUBLIC_KEY_Y,
WALLET_FORMAT_COMPRESSED_MAIN, WALLET_FORMAT_COMPRESSED_TEST,
WALLET_FORMAT_MAIN, WALLET_FORMAT_TEST
)
VALID_SIGNATURE = (b'0E\x02!\x00\xd7y\xe0\xa4\xfc\xea\x88\x18sDit\x9d\x01\xf3'
b'\x03\xa0\xceO\xab\x80\xe8PY.*\xda\x11w|\x9fq\x02 u\xdaR'
b'\xd8\x84a\xad\xfamN\xae&\x91\xfd\xd6\xbd\xe1\xb0e\xfe\xf4'
b'\xc5S\xd9\x02D\x1d\x0b\xba\xe0=@')
INVALID_SIGNATURE = (b'0D\x02 `\x03D^\xa7\xab\xdc\xa6_\xb6&\xcbN\xc8S\xa4\xcf'
b'\x9a8\x02\x99\xc4\xe9\x02\xb3\x14k\xfa\x15J\xb9\x03\x02'
b' !\xfd\xb2\xa0:\xb3\xba\xb1\xdc\x1a]ZWb\xa5\x9d\x8a5\x1c'
b'\xaeQ.\xa7`\xf6V\x11\xf1\xe0iJ7')
SIGNATURE_HIGH_S = (b'0E\x02 \x18NeS,"r\x1e\x01?\xa5\xa8C\xe4\xba\x07x \xc9\xf6'
b'\x8fe\x17\xa3\x03\'\xac\xd8\x97\x97\x1b\xd0\x02!\x00\xdc^'
b'\xf2M\xe7\x0e\xbaz\xd3\xa3\x94\xcc\xef\x17\x04\xb2\xfb0!'
b'\n\xc3\x1fa3\x83\x01\xc9\xbf\xbd\r)\x82')
DATA = b'data'
class TestGetVersion:
def test_mainnet(self):
assert get_version(BITCOIN_CASHADDRESS) == 'main'
assert get_version(BITCOIN_CASHADDRESS_COMPRESSED) == 'main'
def test_testnet(self):
assert get_version(BITCOIN_CASHADDRESS_TEST) == 'test'
assert get_version(BITCOIN_CASHADDRESS_TEST_COMPRESSED) == 'test'
def test_invalid(self):
with pytest.raises(InvalidAddress):
get_version('dg2dNAjuezub6iJVPNML5pW5ZQvtA9ocL')
def test_mainnet_pay2sh(self):
with pytest.raises(ValueError):
get_version(BITCOIN_CASHADDRESS_PAY2SH)
def test_testnet_pay2sh(self):
with pytest.raises(ValueError):
get_version(BITCOIN_CASHADDRESS_TEST_PAY2SH)
class TestVerifySig:
def test_valid(self):
assert verify_sig(VALID_SIGNATURE, DATA, PUBLIC_KEY_COMPRESSED)
def test_invalid(self):
assert not verify_sig(INVALID_SIGNATURE, DATA, PUBLIC_KEY_COMPRESSED)
class TestBytesToWIF:
def test_mainnet(self):
assert bytes_to_wif(PRIVATE_KEY_BYTES) == WALLET_FORMAT_MAIN
def test_testnet(self):
assert bytes_to_wif(PRIVATE_KEY_BYTES, version='test') == WALLET_FORMAT_TEST
def test_compressed(self):
assert bytes_to_wif(PRIVATE_KEY_BYTES, compressed=True) == WALLET_FORMAT_COMPRESSED_MAIN
def test_compressed_testnet(self):
assert bytes_to_wif(
PRIVATE_KEY_BYTES, version='test', compressed=True
) == WALLET_FORMAT_COMPRESSED_TEST
class TestWIFToBytes:
def test_mainnet(self):
assert wif_to_bytes(WALLET_FORMAT_MAIN) == (PRIVATE_KEY_BYTES, False, 'main')
def test_testnet(self):
assert wif_to_bytes(WALLET_FORMAT_TEST) == (PRIVATE_KEY_BYTES, False, 'test')
def test_compressed(self):
assert wif_to_bytes(WALLET_FORMAT_COMPRESSED_MAIN) == (PRIVATE_KEY_BYTES, True, 'main')
def test_invalid_network(self):
with pytest.raises(ValueError):
wif_to_bytes(BITCOIN_ADDRESS)
class TestWIFChecksumCheck:
def test_wif_checksum_check_main_success(self):
assert wif_checksum_check(WALLET_FORMAT_MAIN)
def test_wif_checksum_check_test_success(self):
assert wif_checksum_check(WALLET_FORMAT_TEST)
def test_wif_checksum_check_compressed_success(self):
assert wif_checksum_check(WALLET_FORMAT_COMPRESSED_MAIN)
def test_wif_checksum_check_decode_failure(self):
assert not wif_checksum_check(BITCOIN_ADDRESS[:-1])
def test_wif_checksum_check_other_failure(self):
assert not wif_checksum_check(BITCOIN_ADDRESS)
class TestPublicKeyToCoords:
def test_public_key_to_coords_compressed(self):
assert public_key_to_coords(PUBLIC_KEY_COMPRESSED) == (PUBLIC_KEY_X, PUBLIC_KEY_Y)
def test_public_key_to_coords_uncompressed(self):
assert public_key_to_coords(PUBLIC_KEY_UNCOMPRESSED) == (PUBLIC_KEY_X, PUBLIC_KEY_Y)
def test_public_key_to_coords_incorrect_length(self):
with pytest.raises(ValueError):
public_key_to_coords(PUBLIC_KEY_COMPRESSED[1:])
class TestPublicKeyToAddress:
def test_public_key_to_address_compressed(self):
assert public_key_to_address(PUBLIC_KEY_COMPRESSED) == BITCOIN_CASHADDRESS_COMPRESSED
def test_public_key_to_address_uncompressed(self):
assert public_key_to_address(PUBLIC_KEY_UNCOMPRESSED) == BITCOIN_CASHADDRESS
def test_public_key_to_address_incorrect_length(self):
with pytest.raises(ValueError):
public_key_to_address(PUBLIC_KEY_COMPRESSED[:-1])
def test_public_key_to_address_test_compressed(self):
assert public_key_to_address(PUBLIC_KEY_COMPRESSED, version='test') == BITCOIN_ADDRESS_TEST_COMPRESSED
def test_public_key_to_address_test_uncompressed(self):
assert public_key_to_address(PUBLIC_KEY_UNCOMPRESSED, version='test') == BITCOIN_ADDRESS_TEST
def test_public_key_to_address_test_compressed(self):
assert public_key_to_address(PUBLIC_KEY_COMPRESSED, version='test') == BITCOIN_CASHADDRESS_TEST_COMPRESSED
def test_public_key_to_address_test_uncompressed(self):
assert public_key_to_address(PUBLIC_KEY_UNCOMPRESSED, version='test') == BITCOIN_CASHADDRESS_TEST
class TestCoordsToPublicKey:
def test_coords_to_public_key_compressed(self):
assert coords_to_public_key(PUBLIC_KEY_X, PUBLIC_KEY_Y) == PUBLIC_KEY_COMPRESSED
def test_coords_to_public_key_uncompressed(self):
assert coords_to_public_key(PUBLIC_KEY_X, PUBLIC_KEY_Y, compressed=False) == PUBLIC_KEY_UNCOMPRESSED
def test_point_to_public_key():
class Point:
x = PUBLIC_KEY_X
y = PUBLIC_KEY_Y
assert point_to_public_key(Point) == coords_to_public_key(Point.x, Point.y)
def test_address_to_public_key_hash():
assert address_to_public_key_hash(BITCOIN_CASHADDRESS) == PUBKEY_HASH
assert address_to_public_key_hash(BITCOIN_CASHADDRESS_COMPRESSED) == PUBKEY_HASH_COMPRESSED
with pytest.raises(ValueError):
address_to_public_key_hash(BITCOIN_CASHADDRESS_PAY2SH)
with pytest.raises(ValueError):
address_to_public_key_hash(BITCOIN_CASHADDRESS_TEST_PAY2SH)
|
py | 1a53473a785d7b26ce8cb62e34cd14367899012d | # Copyright 2018 Gregory Szorc <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .node import (
bin,
hex,
)
from .i18n import _
from .thirdparty import (
attr,
)
from . import (
error,
util,
)
from .utils import (
interfaceutil,
)
# Names of the SSH protocol implementations.
SSHV1 = 'ssh-v1'
# These are advertised over the wire. Increment the counters at the end
# to reflect BC breakages.
SSHV2 = 'exp-ssh-v2-0003'
HTTP_WIREPROTO_V2 = 'exp-http-v2-0003'
NARROWCAP = 'exp-narrow-1'
ELLIPSESCAP = 'exp-ellipses-1'
# All available wire protocol transports.
TRANSPORTS = {
SSHV1: {
'transport': 'ssh',
'version': 1,
},
SSHV2: {
'transport': 'ssh',
# TODO mark as version 2 once all commands are implemented.
'version': 1,
},
'http-v1': {
'transport': 'http',
'version': 1,
},
HTTP_WIREPROTO_V2: {
'transport': 'http',
'version': 2,
}
}
class bytesresponse(object):
"""A wire protocol response consisting of raw bytes."""
def __init__(self, data):
self.data = data
class ooberror(object):
"""wireproto reply: failure of a batch of operation
Something failed during a batch call. The error message is stored in
`self.message`.
"""
def __init__(self, message):
self.message = message
class pushres(object):
"""wireproto reply: success with simple integer return
The call was successful and returned an integer contained in `self.res`.
"""
def __init__(self, res, output):
self.res = res
self.output = output
class pusherr(object):
"""wireproto reply: failure
The call failed. The `self.res` attribute contains the error message.
"""
def __init__(self, res, output):
self.res = res
self.output = output
class streamres(object):
"""wireproto reply: binary stream
The call was successful and the result is a stream.
Accepts a generator containing chunks of data to be sent to the client.
``prefer_uncompressed`` indicates that the data is expected to be
uncompressable and that the stream should therefore use the ``none``
engine.
"""
def __init__(self, gen=None, prefer_uncompressed=False):
self.gen = gen
self.prefer_uncompressed = prefer_uncompressed
class streamreslegacy(object):
"""wireproto reply: uncompressed binary stream
The call was successful and the result is a stream.
Accepts a generator containing chunks of data to be sent to the client.
Like ``streamres``, but sends an uncompressed data for "version 1" clients
using the application/mercurial-0.1 media type.
"""
def __init__(self, gen=None):
self.gen = gen
# list of nodes encoding / decoding
def decodelist(l, sep=' '):
if l:
return [bin(v) for v in l.split(sep)]
return []
def encodelist(l, sep=' '):
try:
return sep.join(map(hex, l))
except TypeError:
raise
# batched call argument encoding
def escapebatcharg(plain):
return (plain
.replace(':', ':c')
.replace(',', ':o')
.replace(';', ':s')
.replace('=', ':e'))
def unescapebatcharg(escaped):
return (escaped
.replace(':e', '=')
.replace(':s', ';')
.replace(':o', ',')
.replace(':c', ':'))
# mapping of options accepted by getbundle and their types
#
# Meant to be extended by extensions. It is extensions responsibility to ensure
# such options are properly processed in exchange.getbundle.
#
# supported types are:
#
# :nodes: list of binary nodes
# :csv: list of comma-separated values
# :scsv: list of comma-separated values return as set
# :plain: string with no transformation needed.
GETBUNDLE_ARGUMENTS = {
'heads': 'nodes',
'bookmarks': 'boolean',
'common': 'nodes',
'obsmarkers': 'boolean',
'phases': 'boolean',
'bundlecaps': 'scsv',
'listkeys': 'csv',
'cg': 'boolean',
'cbattempted': 'boolean',
'stream': 'boolean',
}
class baseprotocolhandler(interfaceutil.Interface):
"""Abstract base class for wire protocol handlers.
A wire protocol handler serves as an interface between protocol command
handlers and the wire protocol transport layer. Protocol handlers provide
methods to read command arguments, redirect stdio for the duration of
the request, handle response types, etc.
"""
name = interfaceutil.Attribute(
"""The name of the protocol implementation.
Used for uniquely identifying the transport type.
""")
def getargs(args):
"""return the value for arguments in <args>
For version 1 transports, returns a list of values in the same
order they appear in ``args``. For version 2 transports, returns
a dict mapping argument name to value.
"""
def getprotocaps():
"""Returns the list of protocol-level capabilities of client
Returns a list of capabilities as declared by the client for
the current request (or connection for stateful protocol handlers)."""
def getpayload():
"""Provide a generator for the raw payload.
The caller is responsible for ensuring that the full payload is
processed.
"""
def mayberedirectstdio():
"""Context manager to possibly redirect stdio.
The context manager yields a file-object like object that receives
stdout and stderr output when the context manager is active. Or it
yields ``None`` if no I/O redirection occurs.
The intent of this context manager is to capture stdio output
so it may be sent in the response. Some transports support streaming
stdio to the client in real time. For these transports, stdio output
won't be captured.
"""
def client():
"""Returns a string representation of this client (as bytes)."""
def addcapabilities(repo, caps):
"""Adds advertised capabilities specific to this protocol.
Receives the list of capabilities collected so far.
Returns a list of capabilities. The passed in argument can be returned.
"""
def checkperm(perm):
"""Validate that the client has permissions to perform a request.
The argument is the permission required to proceed. If the client
doesn't have that permission, the exception should raise or abort
in a protocol specific manner.
"""
class commandentry(object):
"""Represents a declared wire protocol command."""
def __init__(self, func, args='', transports=None,
permission='push', cachekeyfn=None, extracapabilitiesfn=None):
self.func = func
self.args = args
self.transports = transports or set()
self.permission = permission
self.cachekeyfn = cachekeyfn
self.extracapabilitiesfn = extracapabilitiesfn
def _merge(self, func, args):
"""Merge this instance with an incoming 2-tuple.
This is called when a caller using the old 2-tuple API attempts
to replace an instance. The incoming values are merged with
data not captured by the 2-tuple and a new instance containing
the union of the two objects is returned.
"""
return commandentry(func, args=args, transports=set(self.transports),
permission=self.permission)
# Old code treats instances as 2-tuples. So expose that interface.
def __iter__(self):
yield self.func
yield self.args
def __getitem__(self, i):
if i == 0:
return self.func
elif i == 1:
return self.args
else:
raise IndexError('can only access elements 0 and 1')
class commanddict(dict):
"""Container for registered wire protocol commands.
It behaves like a dict. But __setitem__ is overwritten to allow silent
coercion of values from 2-tuples for API compatibility.
"""
def __setitem__(self, k, v):
if isinstance(v, commandentry):
pass
# Cast 2-tuples to commandentry instances.
elif isinstance(v, tuple):
if len(v) != 2:
raise ValueError('command tuples must have exactly 2 elements')
# It is common for extensions to wrap wire protocol commands via
# e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
# doing this aren't aware of the new API that uses objects to store
# command entries, we automatically merge old state with new.
if k in self:
v = self[k]._merge(v[0], v[1])
else:
# Use default values from @wireprotocommand.
v = commandentry(v[0], args=v[1],
transports=set(TRANSPORTS),
permission='push')
else:
raise ValueError('command entries must be commandentry instances '
'or 2-tuples')
return super(commanddict, self).__setitem__(k, v)
def commandavailable(self, command, proto):
"""Determine if a command is available for the requested protocol."""
assert proto.name in TRANSPORTS
entry = self.get(command)
if not entry:
return False
if proto.name not in entry.transports:
return False
return True
def supportedcompengines(ui, role):
"""Obtain the list of supported compression engines for a request."""
assert role in (util.CLIENTROLE, util.SERVERROLE)
compengines = util.compengines.supportedwireengines(role)
# Allow config to override default list and ordering.
if role == util.SERVERROLE:
configengines = ui.configlist('server', 'compressionengines')
config = 'server.compressionengines'
else:
# This is currently implemented mainly to facilitate testing. In most
# cases, the server should be in charge of choosing a compression engine
# because a server has the most to lose from a sub-optimal choice. (e.g.
# CPU DoS due to an expensive engine or a network DoS due to poor
# compression ratio).
configengines = ui.configlist('experimental',
'clientcompressionengines')
config = 'experimental.clientcompressionengines'
# No explicit config. Filter out the ones that aren't supposed to be
# advertised and return default ordering.
if not configengines:
attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
return [e for e in compengines
if getattr(e.wireprotosupport(), attr) > 0]
# If compression engines are listed in the config, assume there is a good
# reason for it (like server operators wanting to achieve specific
# performance characteristics). So fail fast if the config references
# unusable compression engines.
validnames = set(e.name() for e in compengines)
invalidnames = set(e for e in configengines if e not in validnames)
if invalidnames:
raise error.Abort(_('invalid compression engine defined in %s: %s') %
(config, ', '.join(sorted(invalidnames))))
compengines = [e for e in compengines if e.name() in configengines]
compengines = sorted(compengines,
key=lambda e: configengines.index(e.name()))
if not compengines:
raise error.Abort(_('%s config option does not specify any known '
'compression engines') % config,
hint=_('usable compression engines: %s') %
', '.sorted(validnames))
return compengines
@attr.s
class encodedresponse(object):
"""Represents response data that is already content encoded.
Wire protocol version 2 only.
Commands typically emit Python objects that are encoded and sent over the
wire. If commands emit an object of this type, the encoding step is bypassed
and the content from this object is used instead.
"""
data = attr.ib()
@attr.s
class alternatelocationresponse(object):
"""Represents a response available at an alternate location.
Instances are sent in place of actual response objects when the server
is sending a "content redirect" response.
Only compatible with wire protocol version 2.
"""
url = attr.ib()
mediatype = attr.ib()
size = attr.ib(default=None)
fullhashes = attr.ib(default=None)
fullhashseed = attr.ib(default=None)
serverdercerts = attr.ib(default=None)
servercadercerts = attr.ib(default=None)
@attr.s
class indefinitebytestringresponse(object):
"""Represents an object to be encoded to an indefinite length bytestring.
Instances are initialized from an iterable of chunks, with each chunk being
a bytes instance.
"""
chunks = attr.ib()
|
py | 1a5347acfa12569b5cc2053d91055d1e8b8723ec | #
# findPath.py finds the optimal path, p
#
# create by: Samuel King
#
from . import getRandomData, field, timeCosts
class Path ( field.Field ) :
def __init__ ( self ) :
super( Path, self ).__init__()
self.__finalPathString = []
self.__finalPathCoords = []
self.__overallTimeCost = 0.0
self.__zLen = self.getzLen()
self.__timeCosts = timeCosts.TimeCosts()
# find opt path, p, s.t. p.overallTimeCost is minimized
def findOverallPath ( self ) :
fieldFloor = self.getFieldFloor()
itemLocations = self.getItemLocations()
oneItemLocation = itemLocations[0]
start = ( oneItemLocation[0], oneItemLocation[1], oneItemLocation[2] + 2 )
# find opt path to item
pathToItem = self.findPathTrajectoryToItem( fieldFloor, start, False, start, False )
# Recursively find cost by flipping the graph on its head and traversing it from finish to start.
# This will allow us to avoid issues of getting stuck behind a 'mountain' and not taking the optimal path.
def findPathTrajectoryToItem ( self, fieldFloor, startLocation, pickedUp, currCoords, finished ) :
try :
self.__finalPathCoords.append( currCoords )
# return final path
if finished == True :
return self.__finalPathString.insert( 0, "Start!" )
# fist pick up the item
if startLocation == currCoords and pickedUp == False :
self.__overallTimeCost += self.__timeCosts.pickupTimeCost
self.__finalPathString.insert( 0, "Pick up Item from " + str( startLocation ) )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, currCoords, False )
# can move diagonally
if currCoords[0] - 1 >= 0 and \
currCoords[1] - 1 >= 0 and \
fieldFloor[ currCoords[0] - 1 ][ currCoords[1] - 1 ] < currCoords[2] and \
fieldFloor[ currCoords[0] - 1 ][ currCoords[1] ] < currCoords[2] and \
fieldFloor[ currCoords[0] ][ currCoords[1] - 1 ] < currCoords[2] :
self.__overallTimeCost += self.__timeCosts.moveDiagTimeCost
nextCoords = ( currCoords[0] - 1, currCoords[1] - 1, currCoords[2] )
self.__finalPathString.insert( 0, "Move Diagonally from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
# can move forward
if currCoords[0] - 1 >= 0 and fieldFloor[ currCoords[0] - 1 ][ currCoords[1] ] < currCoords[2] :
self.__overallTimeCost += self.__timeCosts.moveForwardTimeCost
nextCoords = ( currCoords[0] - 1, currCoords[1], currCoords[2] )
self.__finalPathString.insert( 0, "Move Forward from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
# can move to the right
if currCoords[1] - 1 >= 0 and fieldFloor[ currCoords[0] ][ currCoords[1] - 1 ] < currCoords[2] :
self.__overallTimeCost += self.__timeCosts.moveSideTimeCost
nextCoords = ( currCoords[0], currCoords[1] - 1, currCoords[2] )
self.__finalPathString.insert( 0, "Move To the Right from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
# can move down
if currCoords[2] + 1 <= self.__zLen :
self.__overallTimeCost += self.__timeCosts.moveDownTimeCost
nextCoords = ( currCoords[0], currCoords[1], currCoords[2] + 1 )
self.__finalPathString.insert( 0, "Move Down from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
raise ValueError("INVALID PATH!")
except ValueError as e :
print( "There was an invalid path at " + str(startLocation) + str(pickedUp) + str(currCoords) + str(finished))
# setter methods
def changeOverallTimeCost ( self, cost ) :
self.__overallTimeCost += cost
def changeTimeCosts (
self,
moveDiagTimeCost,
moveForwardTimeCost,
moveSideTimeCost,
moveUpTimeCost,
moveDownTimeCost,
pickupTimeCost ) :
self.__timeCosts.setTimeCosts(
moveDiagTimeCost,
moveForwardTimeCost,
moveSideTimeCost,
moveUpTimeCost,
moveDownTimeCost,
pickupTimeCost
)
# getter methods
def getOverallTimeCost ( self ) :
return self.__overallTimeCost
def getFinalPathCoords ( self ) :
return self.__finalPathCoords
def getFinalPathString ( self ) :
return self.__finalPathString
|
py | 1a534885d10d93f897af12806d93cd2d15ad1818 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow import DAG
from airflow.models import TaskInstance
from airflow.contrib.operators.spark_submit_operator import SparkSubmitOperator
from airflow.utils import timezone
from datetime import timedelta
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSparkSubmitOperator(unittest.TestCase):
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'driver_class_path': 'parquet.jar',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/[email protected]',
'proxy_user': 'sample_user',
'name': '{{ task_instance.task_id }}',
'num_executors': 10,
'verbose': True,
'application': 'test_application.py',
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--start', '{{ macros.ds_add(ds, -1)}}',
'--end', '{{ ds }}',
'--with-spaces', 'args should keep embdedded spaces',
]
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
def test_execute(self):
# Given / When
conn_id = 'spark_default'
operator = SparkSubmitOperator(
task_id='spark_submit_job',
spark_binary="sparky",
dag=self.dag,
**self._config
)
# Then expected results
expected_dict = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'driver_class_path': 'parquet.jar',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/[email protected]',
'proxy_user': 'sample_user',
'name': '{{ task_instance.task_id }}',
'num_executors': 10,
'verbose': True,
'application': 'test_application.py',
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--start', '{{ macros.ds_add(ds, -1)}}',
'--end', '{{ ds }}',
'--with-spaces', 'args should keep embdedded spaces',
],
'spark_binary': 'sparky'
}
self.assertEqual(conn_id, operator._conn_id)
self.assertEqual(expected_dict['application'], operator._application)
self.assertEqual(expected_dict['conf'], operator._conf)
self.assertEqual(expected_dict['files'], operator._files)
self.assertEqual(expected_dict['py_files'], operator._py_files)
self.assertEqual(expected_dict['archives'], operator._archives)
self.assertEqual(expected_dict['driver_class_path'], operator._driver_class_path)
self.assertEqual(expected_dict['jars'], operator._jars)
self.assertEqual(expected_dict['packages'], operator._packages)
self.assertEqual(expected_dict['exclude_packages'], operator._exclude_packages)
self.assertEqual(expected_dict['repositories'], operator._repositories)
self.assertEqual(expected_dict['total_executor_cores'],
operator._total_executor_cores)
self.assertEqual(expected_dict['executor_cores'], operator._executor_cores)
self.assertEqual(expected_dict['executor_memory'], operator._executor_memory)
self.assertEqual(expected_dict['keytab'], operator._keytab)
self.assertEqual(expected_dict['principal'], operator._principal)
self.assertEqual(expected_dict['proxy_user'], operator._proxy_user)
self.assertEqual(expected_dict['name'], operator._name)
self.assertEqual(expected_dict['num_executors'], operator._num_executors)
self.assertEqual(expected_dict['verbose'], operator._verbose)
self.assertEqual(expected_dict['java_class'], operator._java_class)
self.assertEqual(expected_dict['driver_memory'], operator._driver_memory)
self.assertEqual(expected_dict['application_args'], operator._application_args)
self.assertEqual(expected_dict['spark_binary'], operator._spark_binary)
def test_render_template(self):
# Given
operator = SparkSubmitOperator(task_id='spark_submit_job',
dag=self.dag, **self._config)
ti = TaskInstance(operator, DEFAULT_DATE)
# When
ti.render_templates()
# Then
expected_application_args = [u'-f', 'foo',
u'--bar', 'bar',
u'--start', (DEFAULT_DATE - timedelta(days=1))
.strftime("%Y-%m-%d"),
u'--end', DEFAULT_DATE.strftime("%Y-%m-%d"),
u'--with-spaces',
u'args should keep embdedded spaces',
]
expected_name = 'spark_submit_job'
self.assertListEqual(expected_application_args,
getattr(operator, '_application_args'))
self.assertEqual(expected_name, getattr(operator, '_name'))
if __name__ == '__main__':
unittest.main()
|
py | 1a5348c64c96607c6fc43c88e3c81f25855a6feb | # Here's some new strange stuff, remember type it exactly.
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days:", days
print "Here are the months:", months
print '''
There's something going on here.
With the three double-quotes.
WE'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6
'''
|
py | 1a53492f1365ae234d31bb79ceb1f94b8bb131b4 | #!/usr/bin/env python
import unittest
import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath
from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
class TestMAP(VppTestCase):
""" MAP Test Case """
def setUp(self):
super(TestMAP, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
# pg0 is 'inside' IPv4
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
# pg1 is 'outside' IPv6
self.pg1.admin_up()
self.pg1.config_ip6()
self.pg1.generate_remote_hosts(4)
self.pg1.configure_ipv6_neighbors()
def tearDown(self):
super(TestMAP, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
def send_and_assert_encapped(self, tx, ip6_src, ip6_dst, dmac=None):
if not dmac:
dmac = self.pg1.remote_mac
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
rx = rx[0]
self.assertEqual(rx[Ether].dst, dmac)
self.assertEqual(rx[IP].src, tx[IP].src)
self.assertEqual(rx[IPv6].src, ip6_src)
self.assertEqual(rx[IPv6].dst, ip6_dst)
def test_map_e(self):
""" MAP-E """
#
# Add a route to the MAP-BR
#
map_br_pfx = "2001::"
map_br_pfx_len = 64
map_route = VppIpRoute(self,
map_br_pfx,
map_br_pfx_len,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
map_route.add_vpp_config()
#
# Add a domain that maps from pg0 to pg1
#
map_dst = socket.inet_pton(socket.AF_INET6, map_br_pfx)
map_src = "3001::1"
map_src_n = socket.inet_pton(socket.AF_INET6, map_src)
client_pfx = socket.inet_pton(socket.AF_INET, "192.168.0.0")
self.vapi.map_add_domain(map_dst,
map_br_pfx_len,
map_src_n,
128,
client_pfx,
16)
#
# Fire in a v4 packet that will be encapped to the BR
#
v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst='192.168.1.1') /
UDP(sport=20000, dport=10000) /
Raw('\xa5' * 100))
self.send_and_assert_encapped(v4, map_src, "2001::c0a8:0:0")
#
# Fire in a V6 encapped packet.
# expect a decapped packet on the inside ip4 link
#
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(dst=map_src, src="2001::1") /
IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
UDP(sport=20000, dport=10000) /
Raw('\xa5' * 100))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx = rx[0]
self.assertFalse(rx.haslayer(IPv6))
self.assertEqual(rx[IP].src, p[IP].src)
self.assertEqual(rx[IP].dst, p[IP].dst)
#
# Pre-resolve. No API for this!!
#
self.vapi.ppcli("map params pre-resolve ip6-nh 4001::1")
self.send_and_assert_no_replies(self.pg0, v4,
"resovled via default route")
#
# Add a route to 4001::1. Expect the encapped traffic to be
# sent via that routes next-hop
#
pre_res_route = VppIpRoute(
self, "4001::1", 128,
[VppRoutePath(self.pg1.remote_hosts[2].ip6,
self.pg1.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
pre_res_route.add_vpp_config()
self.send_and_assert_encapped(v4, map_src,
"2001::c0a8:0:0",
dmac=self.pg1.remote_hosts[2].mac)
#
# change the route to the pre-solved next-hop
#
pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6,
self.pg1.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)])
pre_res_route.add_vpp_config()
self.send_and_assert_encapped(v4, map_src,
"2001::c0a8:0:0",
dmac=self.pg1.remote_hosts[3].mac)
#
# cleanup. The test infra's object registry will ensure
# the route is really gone and thus that the unresolve worked.
#
pre_res_route.remove_vpp_config()
self.vapi.ppcli("map params pre-resolve del ip6-nh 4001::1")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
py | 1a5349b3d82987b38f7189ba07fb1dbb34e1827f | #!/usr/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD-Script / 测试_test
Author: Curtin
功能:邀请5人得60豆(每天最多10次600豆),被邀请完成开卡30豆,一次性任务。ck1助力Author,其他助力ck1
Date: 2021/11/14 下午6:21
TG交流 https://t.me/topstyle996
TG频道 https://t.me/TopStyle2021
cron: 30 6,12,15,20 11-17 11 *
new Env('品牌联合开卡 11.11-11.17');
活动入口:16:/#A5eHpAAyC12xuX%,☂
'''
import requests
import random
import re
import sys
from time import sleep
import datetime
from urllib.parse import quote
try:
from jd_cookie import getJDCookie
getCk = getJDCookie()
except:
print("请先下载依赖脚本,\n下载链接:https://ghproxy.com/https://raw.githubusercontent.com/kongbg/JD-Script/main/jd_tool_dl.py")
sys.exit(3)
if datetime.datetime.now() > datetime.datetime.strptime('2021-11-18', "%Y-%m-%d"):
print("品牌联合开卡 11.11-11.17---活动结束\n请删掉脚本:jd_kk_test.py")
exit(3)
UserAgent = ''
activityId='96475ceebdf0418ab524c9bc68a789e8'
def userAgent():
"""
随机生成一个UA
:return:
"""
if not UserAgent:
uuid = ''.join(random.sample('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
iosVer = ''.join(random.sample(["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iPhone = ''.join(random.sample(["8", "9", "10", "11", "12", "13"], 1))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/8679C062-A41A-4A25-88F1-50A7A3EEF34A;model/iPhone{iPhone},1;addressid/3723896896;appBuild/167707;jdSupportDarkMode/0'
else:
return UserAgent
def isvObfuscator(ck):
headers = {
'J-E-H': '%7B%22ciphertype%22:5,%22cipher%22:%7B%22User-Agent%22:%22IuG0aVLeb25vBzO2Dzq2CyUyCMrfUQrlbwU7TJSmaU9JTJSmCJUkCJivCtLJY2PiZI8zBtKmAG==%22%7D,%22ts%22:1636865800,%22hdid%22:%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw=%22,%22version%22:%221.0.3%22,%22appname%22:%22com.360buy.jdmobile%22,%22ridx%22:-1%7D',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'JD4iPhone/167863%20(iPhone;%20iOS;%20Scale/3.00)',
'Cookie': ck,
'Host': 'api.m.jd.com',
'Referer': '',
'J-E-C': '%7B%22ciphertype%22:5,%22cipher%22:%7B%22pin%22:%22TUU5TJuyTJvQTUU3TUOnTJu1TUU1TUSmTUSnTUU2TJu4TUPQTUU0TUS4TJrOTUU1TUSmTJq2TUU1TUSmTUSn%22%7D,%22ts%22:1636884564,%22hdid%22:%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw=%22,%22version%22:%221.0.3%22,%22appname%22:%22com.360buy.jdmobile%22,%22ridx%22:-1%7D',
'Accept-Language': 'zh-Hans-CN;q=1',
'Accept': '*/*'
}
url = 'https://api.m.jd.com/client.action?functionId=isvObfuscator'
body = 'body={"url":"https:\/\/cjhydz-isv.isvjcloud.com","id":""}&build=167863&client=apple&clientVersion=10.2.2&d_brand=apple&d_model=iPhone14,3&ef=1&eid=&ep={"ciphertype":5,"cipher":{"screen":"CJS4DMeyDzc4","wifiBssid":"","osVersion":"CJUkCG==","area":"","openudid":"DtVwZtvvZJcmZwPtDtc5DJSmCtZvDzLsCzK2DJG2DtU1EWG5Dzc2ZK==","uuid":""},"ts":1636884530,"hdid":"","version":"1.0.3","appname":"com.360buy.jdmobile","ridx":-1}&ext={"prstate":"0"}&isBackground=N&joycious=67&lang=zh_CN&networkType=wifi&networklibtype=JDNetworkBaseAF&partner=apple&rfs=0000&scope=10&sign=0a635010067282017044162e187af9a7&st=1636884564653&sv=112&uemps=0-0'
resp = requests.post(url=url, headers=headers, data=body).json()
if resp['code'] == '0':
return resp['token']
else:
return ''
def buildheaders(ck):
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId=96475ceebdf0418ab524c9bc68a789e8&inviter=kNwcKz+y+wjfE/yhJf7Ph2cLh8yR0FTTtPtNBwC7New+Y72eTaNK0sHryLjn2YvU&inviterImg=http://storage.360buyimg.com/i.imageUpload/31333435303133353830315f7031363134333838323331343238_mid.jpg&inviterNickName=Curtinlv&shareuserid4minipg=kNwcKz%2By%2BwjfE%2FyhJf7Ph2cLh8yR0FTTtPtNBwC7New%2BY72eTaNK0sHryLjn2YvU&shopid=599119&lng=113.367448&lat=23.112787&sid=6ed3dcfe7c0bb6992246a5771fac1aaw&un_area=19_1601_3633_63243'
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Cookie': ck,
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Host': 'cjhydz-isv.isvjcloud.com',
'User-Agent': userAgent(),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9'
}
resp = requests.get(url, headers)
LZ_TOKEN = re.findall(r'(LZ_TOKEN_KEY=.*?;).*?(LZ_TOKEN_VALUE=.*?;)', resp.headers['Set-Cookie'])
return LZ_TOKEN[0][0]+LZ_TOKEN[0][1]
def getMyPing(ck):
sleep(1)
cookie = buildheaders(ck)
token = isvObfuscator(ck)
url = 'https://cjhydz-isv.isvjcloud.com/customer/getMyPing'
headers = {
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://cjhydz-isv.isvjcloud.com',
'User-Agent': userAgent(),
'Cookie': cookie,
'Host': 'cjhydz-isv.isvjcloud.com',
'Referer': 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId=96475ceebdf0418ab524c9bc68a789e8&',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Accept': 'application/json'
}
body = f'userId=599119&token={token}&fromType=APP&riskType=1'
resp = requests.post(url=url, headers=headers, data=body)
try:
pin = resp.json()['data']['pin']
secretPin = resp.json()['data']['secretPin']
userid = resp.json()['data']['id']
yunMidImageUrl = resp.json()['data']['yunMidImageUrl']
except Exception as e:
print("建议请稍等再试~", e)
sys.exit(1)
LZ_TOKEN_KEY = re.findall(r'(LZ_TOKEN_KEY=.*?;)', resp.headers['Set-Cookie'])[0]
LZ_TOKEN_VALUE = re.findall(r'(LZ_TOKEN_VALUE=.*?;)', resp.headers['Set-Cookie'])[0]
AUTH_C_USER = re.findall(r'(AUTH_C_USER=.*?;)', resp.headers['Set-Cookie'])[0]
headers = {
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://cjhydz-isv.isvjcloud.com',
'User-Agent': userAgent(),
'Cookie': LZ_TOKEN_KEY+LZ_TOKEN_VALUE+AUTH_C_USER+'APP_ABBR=CJHY;__jd_ref_cls=Mnpm_ComponentApplied;',
'Host': 'cjhydz-isv.isvjcloud.com',
'Referer': 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId=96475ceebdf0418ab524c9bc68a789e8&',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Accept': 'application/json'
}
return headers, pin, secretPin, userid, yunMidImageUrl
def accessLog(headers, body):
url = 'https://cjhydz-isv.isvjcloud.com/common/accessLog'
resp = requests.post(url=url, headers=headers, data=quote(body))
if resp.status_code == 200:
print('\t└accessLog ---> success')
else:
print('\t└accessLog ---> error')
def getOpenCardAllStatuesNew(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/getOpenCardAllStatuesNew'
body = f'activityId={activityId}&pin={secretPin}&isInvited=1'
resp = requests.post(url=url, headers=headers, data=body).json()
if resp['result']:
shoplist = resp['data']['list']
venderIdList = []
shopIdList = []
channelList = []
shopNameList = []
for i in shoplist:
if not i['statue']:
openCardLink = i['openCardLink']
shopid = re.findall(r'shopId=(\d+)', openCardLink)[0]
venderId = re.findall(r'venderId=(\d+)', openCardLink)[0]
channel = re.findall(r'channel=(\d+)', openCardLink)[0]
shopIdList.append(shopid)
venderIdList.append(venderId)
channelList.append(channel)
shopNameList.append(i['shopName'])
return shopIdList, venderIdList, channelList, shopNameList
def getShopOpenCardInfo(headers, venderId, channe):
url = f'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=getShopOpenCardInfo&body=%7B%22venderId%22%3A%22{venderId}%22%2C%22payUpShop%22%3Atrue%2C%22channel%22%3A{channe}%7D&client=H5&clientVersion=9.2.0&uuid=88888'
resp = requests.get(url=url, headers=headers).json()
if resp['result']['interestsRuleList']:
activityId = resp['result']['interestsRuleList'][0]['interestsInfo']['activityId']
return activityId
else:
return None
def bindWithVender(ck, inviterNickName, inviter):
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'keep-alive',
'Referer': 'https://shopmember.m.jd.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.m.jd.com',
'User-Agent': userAgent(),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9'
}
shopIdList, venderIdList, channelList, shopNameList= getOpenCardAllStatuesNew(ck)
for shopId,venderId,channe,shopName in zip(shopIdList, venderIdList, channelList, shopNameList):
shopcard_url = f'https://shopmember.m.jd.com/shopcard/?venderId={venderId}&shopId={shopId}&channel={channe}&returnUrl=https%3A%2F%2Fcjhydz-isv.isvjcloud.com%2FmicroDz%2Finvite%2Factivity%2Fwx%2Fview%2Findex%2F5986361%3FactivityId%3D{activityId}%26inviter%3D{inviter}%26inviterImg%3D%26inviterNickName%3D{inviterNickName}%26shareuserid4minipg%3D{inviter}%26shopid%3D599119%26lng%3D113.%26lat%3D23.%26sid%3D%26un_area%3D'
requests.get(url=shopcard_url, headers=headers)
sleep(1)
shopactivityId = getShopOpenCardInfo(headers, venderId, channe)
print("shopactivityId:", shopactivityId)
sleep(1)
bindWithVender_url = f'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=bindWithVender&body=%7B%22venderId%22%3A%22{venderId}%22%2C%22shopId%22%3A%22{shopId}%22%2C%22bindByVerifyCodeFlag%22%3A1%2C%22registerExtend%22%3A%7B%7D%2C%22writeChildFlag%22%3A0%2C%22activityId%22%3A{shopactivityId}%2C%22channel%22%3A{channe}%7D&client=H5&clientVersion=9.2.0&uuid=88888&'
resp = requests.get(url=bindWithVender_url, headers=headers).json()
print(f"\t└去开卡【{shopName}】")
if resp['success']:
print(f"\t\t└{resp['message']}")
else:
pass
print(f"\t└完成开卡获得30豆,京东明显查询【微定制-邀请瓜分京豆】。")
def getActivityInfo(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/getActivityInfo'
body = f'activityId={activityId}'
resp = requests.post(url, headers=headers, data=body).json()
# print(resp)
def isInvited(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/isInvited'
body = f'activityId={activityId}&pin={secretPin}'
resp = requests.post(url=url, headers=headers, data=body).json()
print(resp)
# exit(3)
# print(resp)
def inviteRecord(headers, inviter):
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/inviteRecord'
body = f'activityId={activityId}&inviter={inviter}&pageNo=1&pageSize=15&type=0'
resp = requests.post(url=url, headers=headers, data=body).json()
# print(resp)
def acceptInvite(headers, pin, secretPin, inviter, inviterNick, yunMidImageUrl):
inviteRecord(headers, inviter)
body = f'venderId=&code=99&pin={pin}&activityId={activityId}&pageUrl=https%3A%2F%2Fcjhydz-isv.isvjcloud.com%2FmicroDz%2Finvite%2Factivity%2Fwx%2Fview%2Findex%2F5986361%3FactivityId%3D{activityId}%26inviter%3D{inviter}%26inviterImg%3D%26inviterNickName%3D{inviterNick}%26shareuserid4minipg%3D{inviter}%26shopid%3D599119%26lng%3D%26lat%3D%26sid%3D%26un_area%3D&subType='
accessLog(headers, body)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/acceptInvite'
body1 = f'activityId={activityId}&inviter={inviter}&inviterImg=&inviterNick={quote(inviterNick)}&invitee={secretPin}&inviteeImg={yunMidImageUrl}&inviteeNick={quote(pin)}'
headers['Referer'] = f'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId={activityId}&inviter={inviter}&inviterImg=&inviterNickName={inviterNick}&shareuserid4minipg={inviter}&shopid=599119&lng=113.&lat=23.&sid=6ed3dcfe7c0bb6992246a5771fac1aaw&un_area=19_1601_3633_63243'
resp = requests.post(url=url, headers=headers, data=body1).json()
print(f"\t└{resp['errorMessage']}")
def miniProgramShareInfo(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/miniProgramShareInfo/getInfo?activityId=96475ceebdf0418ab524c9bc68a789e8'
resp = requests.get(url=url, headers=headers).json()
# print(resp)
def getSimpleActInfoVo(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/customer/getSimpleActInfoVo'
body = f'activityId={activityId}'
resp = requests.post(url=url, headers=headers, data=body).json()
# print(resp)
def getSystemConfig(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/wxCommonInfo/getSystemConfig'
body = f'activityId={activityId}'
resp = requests.post(url=url, headers=headers, data=body).json()
# print(resp)
def start():
global MasterPin, Mastersecret
cookieList, nameList = getCk.iscookie()
a = 1
try:
for ck, user in zip(cookieList, nameList):
headers, pin, secret, userid, yunMidImageUrl = getMyPing(ck)
print(f"## 用户{a}【{user}】")
getSystemConfig(ck)
getSimpleActInfoVo(ck)
getActivityInfo(ck)
isInvited(ck)
if a == 1:
MasterPin = pin
Mastersecret = secret
print(f"用户{a}[{pin}]>>助力>>>[Curtinlv]")
acceptInvite(headers, MasterPin, Mastersecret, '2vlPNpSNPs2zwEu+07zbf8+iQEinB57W5aMO3vKdRy0Jah8sXZOcx4hozgiV81Rt697ulbLIDOIodMQ2RvALQQ==', 'Curtinlv', yunMidImageUrl)
bindWithVender(ck, MasterPin, Mastersecret)
a += 1
sleep(60)
continue
print(f"用户{a}[{pin}]>>助力>>>[{MasterPin}]")
acceptInvite(headers, pin, secret, Mastersecret, MasterPin, yunMidImageUrl)
body = f'venderId=&code=99&pin={secret}%253D%253D&activityId={activityId}&pageUrl=https%3A%2F%2Fcjhydz-isv.isvjcloud.com%2FmicroDz%2Finvite%2Factivity%2Fwx%2Fview%2Findex%2F5986361%3FactivityId%3D{activityId}%26inviter%3D{Mastersecret}%26inviterImg%3Dhttp%3A%2F%2Fstorage.360buyimg.com%2Fi.imageUpload%2F31333435303133353830315f7031363134333838323331343238_mid.jpg%26inviterNickName%3D{MasterPin}%26shareuserid4minipg%3D{Mastersecret}%26shopid%3D599119%26lng%3D113.%26lat%3D23.%26sid%3D%26un_area%3D&subType='
accessLog(headers,body)
bindWithVender(ck, MasterPin, Mastersecret)
sleep(60)
a += 1
except Exception as e:
pass
if __name__ == '__main__':
try:
start()
except:
print("网络异常,请稍等再试~\n") |
py | 1a534adfca2fd450186fb9ff1e5583316dcbaf6c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0016_flowpagevisitgrade_graded_at_git_commit_sha'),
]
operations = [
migrations.AddField(
model_name='event',
name='all_day',
field=models.BooleanField(default=False, help_text='Only affects the rendering in the class calendar, in that a start time is not shown'),
preserve_default=True,
),
]
|
py | 1a534baed5c9dd49456b32e1f62febe3a653a0c8 | # terrascript/opnsense/__init__.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class opnsense(terrascript.Provider):
pass
|
py | 1a534e301bf02c9cb601dc601c15c5efeea8faaf | from django.shortcuts import render,HttpResponse
from urllib import parse
from apps.test_case.services.HTTP_test_caseService import HTTP_test_caseService
from apps.test_case.services.HTTP_test_case_stepService import HTTP_test_case_stepService
from apps.interface.services.HTTP_interfaceService import HTTP_interfaceService
from apps.test_case.services.HTTP_test_case_debugService import HTTP_test_case_debugService
from apps.test_case.services.HTTP_test_case_step_debugService import HTTP_test_case_step_debugService
from apps.common.config import commonWebConfig
from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from apps.config.services.businessLineService import BusinessService
from apps.config.services.modulesService import ModulesService
from apps.config.services.uriService import UriService
from apps.config.services.serviceConfService import ServiceConfService
from apps.config.services.http_confService import HttpConfService
from apps.config.views.http_conf import getDebugBtn
from apps.common.func.WebFunc import *
from AutotestWebD.settings import isRelease
import json,time
from apps.version_manage.services.common_service import VersionService
def http_testCaseStepCheck(request):
context = {}
context["testCaseStepCheck"] = "current-page"
context["userName"] = request.session.get("userName")
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
if not isRelease:
context["env"] = "test"
#文本
text = {}
text["pageTitle"] = "HTTP用例步骤查看"
context["text"] = text
context["page"] = 1
return render(request,"InterfaceTest/HTTPTestCase/HTTP_testCaseStep_check.html",context)
def http_testCaseStepListCheck(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkVal")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
#根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
if VersionService.isCurrentVersion(request):
tbName = "tb_http_testcase_step"
versionCondition = ""
else:
tbName = "tb_version_http_testcase_step"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT t.*,u.userName,m.moduleName,b.bussinessLineName,mu.userName modByName,tc.id tid from %s t LEFT JOIN tb_user mu ON t.modBy = mu.loginName LEFT JOIN tb_modules m on t.moduleId = m.id LEFT JOIN tb_business_line b on t.businessLineId = b.id LEFT JOIN tb_user u ON t.addBy = u.loginName LEFT JOIN tb_http_testcase tc ON t.caseId = tc.caseId WHERE 1=1 and t.state=1 %s" % (tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "caseFounder" :
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (t.addBy LIKE %s or u.userName LIKE %s) """
continue
elif key == "module":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and m.moduleName LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and b.bussinessLineName LIKE %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql,attrList=checkList,page=page,pageNum=commonWebConfig.testCasePageNum)
response = render(request, "InterfaceTest/HTTPTestCase/SubPages/HTTP_testCaseStep_list_check_page.html",context)
return response
|
py | 1a534ed3d566519a33a5bff3f192ed4ed916f5d2 | import unittest
from formation import AppBuilder
from formation.tests.support import get_resource
class CanvasTestCase(unittest.TestCase):
builder = None
@classmethod
def setUpClass(cls) -> None:
cls.builder = AppBuilder(path=get_resource("canvas.xml"))
cls.canvas1 = cls.builder.canvas1
cls.canvas2 = cls.builder.canvas2
def test_loading(self):
self.assertEqual(len(self.canvas1.find_all()), 19)
self.assertEqual(len(self.canvas2.find_all()), 6)
def test_line(self):
line = self.builder.cv1_line
coords = self.canvas1.coords(line)
self.assertListEqual(
list(coords),
[25, 33, 292, 33, 382, 128, 542, 128, 542, 226]
)
def test_polygon(self):
poly = self.builder.cv1_polygon
coords = self.canvas1.coords(poly)
self.assertListEqual(
list(coords),
[68, 216, 67, 284, 151, 339, 366, 340, 448, 272, 448, 216]
)
self.assertEqual(self.canvas1.itemcget(poly, "fill"), "#1d731d")
def test_rectangle(self):
rec = self.builder.cv2_rectangle
coords = self.canvas2.coords(rec)
self.assertListEqual(list(coords), [372, 88, 423, 136])
self.assertEqual(self.canvas2.itemcget(rec, "stipple"), "gray12")
self.assertEqual(self.canvas2.itemcget(rec, "fill"), "#1d731d")
def test_oval(self):
circle = self.builder.cv1_circle2
coords = self.canvas1.coords(circle)
self.assertListEqual(list(coords), [177, 59, 288, 169])
self.assertEqual(self.canvas1.itemcget(circle, "stipple"), "gray12")
self.assertEqual(self.canvas1.itemcget(circle, "fill"), "#ff0000")
self.assertEqual(self.canvas1.itemcget(circle, "outline"), "#1d731d")
def test_arc(self):
arc = self.builder.cv2_arc1
coords = self.canvas2.coords(arc)
self.assertListEqual(list(coords), [78, 37, 190, 133])
self.assertEqual(float(self.canvas2.itemcget(arc, "extent")), 90.0)
self.assertEqual(float(self.canvas2.itemcget(arc, "start")), 0.0)
self.assertEqual(self.canvas2.itemcget(arc, "style"), "pieslice")
def test_image(self):
image = self.builder.cv1_image
self.assertListEqual(list(self.canvas1.coords(image)), [472, 67])
self.assertTrue(bool(self.canvas1.itemcget(image, "image")))
def test_bitmap(self):
bit = self.builder.cv1_bitmap
self.assertListEqual(list(self.canvas1.coords(bit)), [84, 115])
self.assertEqual(self.canvas1.itemcget(bit, "bitmap"), "gray12")
self.assertEqual(self.canvas1.itemcget(bit, "anchor"), "nw")
self.assertEqual(self.canvas1.itemcget(bit, "background"), "#1d731d")
def test_text(self):
text = self.builder.cv2_text
self.assertListEqual(list(self.canvas2.coords(text)), [280, 114])
self.assertEqual(self.canvas2.itemcget(text, "text"), "yet another layout")
self.assertEqual(self.canvas2.itemcget(text, "fill"), "#1d731d")
|
py | 1a534f0259d686ee53e630b4dfc8e58e40ce1c88 | import numpy as np
from cmath import sqrt
import qutip as qt
from operators import *
tol = 1e-16
def solvePoly(vec):
roots = np.empty(2, dtype=np.complex128)
vec[1]=2*vec[1]
if abs(vec[0]) <= tol:
roots[0] = np.inf
if abs(vec[1]) <= tol:
roots[1] = np.inf
else:
roots[1] = -vec[2]/vec[1]
else:
roots[0] = -0.5*(vec[1]+sqrt(vec[1]**2-4*vec[0]*vec[2]))/vec[0]
roots[1] = -vec[1]/vec[0] - roots[0]
return roots
def root_to_xyz(root):
if root == np.inf:
return [0,0,1]
x = root.real
y = root.imag
den = 1/(1.+(x**2)+(y**2))
return [2*x*den,2*y*den, (1.-(x**2)+(y**2))*den]
def getStars(vec):
#converts 3-spinor into two stars
roots = np.empty(2, dtype=np.complex128)
stars = [[],[],[]] #stores x, y and z coordinates
vec[1] *= -np.sqrt(2)
if abs(vec[0]) <= tol:
roots[0] = np.inf
if abs(vec[1]) <= tol:
roots[1] = np.inf
else:
roots[1] = -vec[2]/vec[1]
else:
roots[0] = -0.5*(vec[1] + sqrt(vec[1]**2-4*vec[0]*vec[2]))/vec[0]
roots[1] = -vec[1]/vec[0] - roots[0]
for r in roots:
if r == np.inf:
stars[0].append(0)
stars[1].append(0)
stars[2].append(-1)
else:
x = r.real
y = r.imag
den = 1/(1.+(x**2)+(y**2))
stars[0].append(2*x*den)
stars[1].append(2*y*den)
stars[2].append((1.-(x**2)-(y**2))*den)
return stars
print(getStars([1,0,1]))
b = qt.Bloch()
b.point_color = ['b','b','r','r','g','g','#CC6600','#CC6600'] #ensures point and line are same colour
b.add_points(getStars([1,sqrt(2),1]))
#b.add_points(getStars([1/sqrt(2),0,1/sqrt(2)]),meth='l')
b.xlabel = ['$<F_x>$','']
b.ylabel = ['$<F_y>$','']
b.zlabel = ['$<F_z>$','']
#b.add_points([[0,0],[-1,1],[0,0]], meth='l')
#b.add_points([[-1,1],[0,0],[0,0]], meth='l')
#b.add_points([0,0])
#b.add_points([0,0,-1])
b.show()
|
py | 1a534f5217bcfe79f6ed371bd8c538b539a7c1db | from django.urls import path
from . import views
from django.conf.urls.static import static
urlpatterns = [
path('register/', views.RegisterView.as_view()),
path('verify_magiclink/',views.VerifyMagicLink.as_view(), name='magiclink-verify'),
path('verify_otp/', views.VerifyOTP.as_view(), name='otp-verify')
] |
py | 1a534f5c97097dce2ecb9ef636cddeeba7c363aa | from tutorial.bot import Bot
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
main() |
py | 1a53500226bf31cbbde473693585be17bd5e62e4 | # -*- coding: utf-8 -*-
import glob
import os
import codecs
import math
from collections import Counter, defaultdict
from itertools import chain, cycle
import torch
import torchtext.data
from torchtext.data import Field
from torchtext.vocab import Vocab
from onmt.inputters.text_dataset import text_fields, TextMultiField
from onmt.inputters.image_dataset import image_fields
from onmt.inputters.audio_dataset import audio_fields
from onmt.utils.logging import logger
# backwards compatibility
from onmt.inputters.text_dataset import _feature_tokenize # noqa: F401
from onmt.inputters.image_dataset import ( # noqa: F401
batch_img as make_img)
import gc
# monkey-patch to make torchtext Vocab's pickleable
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
Vocab.__getstate__ = _getstate
Vocab.__setstate__ = _setstate
def make_src(data, vocab):
#print('in make src', data ,' vocab',vocab)
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
def get_fields(
src_data_type,
n_src_feats,
n_tgt_feats,
pad='<blank>',
bos='<s>',
eos='</s>',
dynamic_dict=False,
src_truncate=None,
tgt_truncate=None
):
"""
Args:
src_data_type: type of the source input. Options are [text|img|audio].
n_src_feats (int): the number of source features (not counting tokens)
to create a :class:`torchtext.data.Field` for. (If
``src_data_type=="text"``, these fields are stored together
as a ``TextMultiField``).
n_tgt_feats (int): See above.
pad (str): Special pad symbol. Used on src and tgt side.
bos (str): Special beginning of sequence symbol. Only relevant
for tgt.
eos (str): Special end of sequence symbol. Only relevant
for tgt.
dynamic_dict (bool): Whether or not to include source map and
alignment fields.
src_truncate: Cut off src sequences beyond this (passed to
``src_data_type``'s data reader - see there for more details).
tgt_truncate: Cut off tgt sequences beyond this (passed to
:class:`TextDataReader` - see there for more details).
Returns:
A dict mapping names to fields. These names need to match
the dataset example attributes.
"""
assert src_data_type in ['text', 'img', 'audio'], \
"Data type not implemented"
assert not dynamic_dict or src_data_type == 'text', \
'it is not possible to use dynamic_dict with non-text input'
fields = {}
fields_getters = {"text": text_fields,
"img": image_fields,
"audio": audio_fields}
src_field_kwargs = {"n_feats": n_src_feats,
"include_lengths": True,
"pad": pad, "bos": None, "eos": None,
"truncate": src_truncate,
"base_name": "src"}
fields["src"] = fields_getters[src_data_type](**src_field_kwargs)
tgt_field_kwargs = {"n_feats": n_tgt_feats,
"include_lengths": False,
"pad": pad, "bos": bos, "eos": eos,
"truncate": tgt_truncate,
"base_name": "tgt"}
fields["tgt"] = fields_getters["text"](**tgt_field_kwargs)
indices = Field(use_vocab=False, dtype=torch.long, sequential=False)
fields["indices"] = indices
if dynamic_dict:
src_map = Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
fields["src_map"] = src_map
align = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["alignment"] = align
return fields
def load_old_vocab(vocab, data_type="text", dynamic_dict=False):
"""Update a legacy vocab/field format.
Args:
vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the
format formerly saved in *.vocab.pt files. Or, text data
not using a :class:`TextMultiField`.
data_type (str): text, img, or audio
dynamic_dict (bool): Used for copy attention.
Returns:
a dictionary whose keys are the field names and whose values Fields.
"""
if _old_style_vocab(vocab):
# List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
vocab = dict(vocab)
n_src_features = sum('src_feat_' in k for k in vocab)
n_tgt_features = sum('tgt_feat_' in k for k in vocab)
fields = get_fields(
data_type, n_src_features, n_tgt_features,
dynamic_dict=dynamic_dict)
for n, f in fields.items():
try:
f_iter = iter(f)
except TypeError:
f_iter = [(n, f)]
for sub_n, sub_f in f_iter:
if sub_n in vocab:
sub_f.vocab = vocab[sub_n]
return fields
if _old_style_field_list(vocab): # upgrade to multifield
# Dict[str, List[Tuple[str, Field]]]
# doesn't change structure - don't return early.
fields = vocab
for base_name, vals in fields.items():
if ((base_name == 'src' and data_type == 'text') or
base_name == 'tgt'):
assert not isinstance(vals[0][1], TextMultiField)
fields[base_name] = [(base_name, TextMultiField(
vals[0][0], vals[0][1], vals[1:]))]
if _old_style_nesting(vocab):
# Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
fields = dict(list(chain.from_iterable(vocab.values())))
return fields
def _old_style_vocab(vocab):
"""Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is a list of pairs where the second object
is a :class:`torchtext.vocab.Vocab` object.
This exists because previously only the vocab objects from the fields
were saved directly, not the fields themselves, and the fields needed to
be reconstructed at training and translation time.
"""
return isinstance(vocab, list) and \
any(isinstance(v[1], Vocab) for v in vocab)
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values())
def _old_style_field_list(vocab):
"""Detect old-style text fields.
Not old style vocab, old nesting, and text-type fields not using
``TextMultiField``.
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is not an :func:`_old_style_vocab` and not
a :class:`TextMultiField` (using an old-style text representation).
"""
# if tgt isn't using TextMultiField, then no text field is.
return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \
(not isinstance(vocab['tgt'][0][1], TextMultiField))
def old_style_vocab(vocab):
"""The vocab/fields need updated."""
return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \
_old_style_nesting(vocab)
def filter_example(ex, use_src_len=True, use_tgt_len=True,
min_src_len=1, max_src_len=float('inf'),
min_tgt_len=1, max_tgt_len=float('inf')):
"""Return whether an example is an acceptable length.
If used with a dataset as ``filter_pred``, use :func:`partial()`
for all keyword arguments.
Args:
ex (torchtext.data.Example): An object with a ``src`` and ``tgt``
property.
use_src_len (bool): Filter based on the length of ``ex.src``.
use_tgt_len (bool): Similar to above.
min_src_len (int): A non-negative minimally acceptable length
(examples of exactly this length will be included).
min_tgt_len (int): Similar to above.
max_src_len (int or float): A non-negative (possibly infinite)
maximally acceptable length (examples of exactly this length
will be included).
max_tgt_len (int or float): Similar to above.
"""
src_len = len(ex.src[0])
tgt_len = len(ex.tgt[0])
return (not use_src_len or min_src_len <= src_len <= max_src_len) and \
(not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return
target_size = int(math.ceil(vocab_size / multiple)) * multiple
padding_tokens = [
"averyunlikelytoken%d" % i for i in range(target_size - vocab_size)]
vocab.extend(Vocab(Counter(), specials=padding_tokens))
return vocab
def _build_field_vocab(field, counter, size_multiple=1, **kwargs):
# this is basically copy-pasted from torchtext.
all_specials = [
field.unk_token, field.pad_token, field.init_token, field.eos_token
]
specials = [tok for tok in all_specials if tok is not None]
field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
if size_multiple > 1:
_pad_vocab_to_multiple(field.vocab, size_multiple)
def _load_vocab(vocab_path, name, counters):
# counters changes in place
vocab = _read_vocab_file(vocab_path, name)
vocab_size = len(vocab)
#print(" vocab size \n", vocab_size)
logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))
for i, token in enumerate(vocab):
# keep the order of tokens specified in the vocab file by
# adding them to the counter with decreasing counting values
counters[name][token] = vocab_size - i
return vocab, vocab_size
def _build_fv_from_multifield(multifield, counters, build_fv_args,
size_multiple=1):
for name, field in multifield:
_build_field_vocab(
field,
counters[name],
size_multiple=size_multiple,
**build_fv_args[name])
logger.info(" * %s vocab size: %d." % (name, len(field.vocab)))
def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,
vocab_size_multiple=1):
"""Build the fields for all data sides.
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict[str, Field]): fields to build vocab for.
data_type (str): A supported data type string.
share_vocab (bool): share source and target vocabulary?
src_vocab_path (str): Path to src vocabulary file.
src_vocab_size (int): size of the source vocabulary.
src_words_min_frequency (int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path (str): Path to tgt vocabulary file.
tgt_vocab_size (int): size of the target vocabulary.
tgt_words_min_frequency (int): the minimum frequency needed to
include a target word in the vocabulary.
vocab_size_multiple (int): ensure that the vocabulary size is a
multiple of this value.
Returns:
Dict of Fields
"""
# print("in build vocab src_vocab_size_*********** \n",src_vocab_size)
counters = defaultdict(Counter)
# print("\n\nin build vocab\n", counters)
if src_vocab_path:
try:
logger.info("Using existing vocabulary...")
vocab = torch.load(src_vocab_path)
# return vocab to dump with standard name
return vocab
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
# empty train_dataset_files so that vocab is only loaded from
# given paths in src_vocab_path, tgt_vocab_path
train_dataset_files = []
# print(src_vocab_path)
# assert False
# Load vocabulary
#print('src path', src_vocab_path)
#assert False
if src_vocab_path:
src_vocab, src_vocab_size = _load_vocab(
src_vocab_path, "src", counters)
# print('src vocab', src_vocab, 'src_vocab_size',src_vocab_size)
# assert False
else:
src_vocab = None
if tgt_vocab_path:
tgt_vocab, tgt_vocab_size = _load_vocab(
tgt_vocab_path, "tgt", counters)
else:
tgt_vocab = None
for i, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and src_vocab) or \
(sub_n == 'tgt' and tgt_vocab)
if sub_f.sequential and not has_vocab:
val = fd
counters[sub_n].update(val)
# Drop the none-using from memory but keep the last
if i < len(train_dataset_files) - 1:
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
build_fv_args = defaultdict(dict)
build_fv_args["src"] = dict(
max_size=src_vocab_size, min_freq=src_words_min_frequency)
build_fv_args["tgt"] = dict(
max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)
tgt_multifield = fields["tgt"]
_build_fv_from_multifield(
tgt_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if data_type == 'text':
src_multifield = fields["src"]
_build_fv_from_multifield(
src_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
src_field = src_multifield.base_field
tgt_field = tgt_multifield.base_field
_merge_field_vocabs(
src_field, tgt_field, vocab_size=src_vocab_size,
min_freq=src_words_min_frequency,
vocab_size_multiple=vocab_size_multiple)
logger.info(" * merged vocab size: %d." % len(src_field.vocab))
return fields # is the return necessary?
def _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,
vocab_size_multiple):
# in the long run, shouldn't it be possible to do this by calling
# build_vocab with both the src and tgt data?
specials = [tgt_field.unk_token, tgt_field.pad_token,
tgt_field.init_token, tgt_field.eos_token]
merged = sum(
[src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()
)
merged_vocab = Vocab(
merged, specials=specials,
max_size=vocab_size, min_freq=min_freq
)
if vocab_size_multiple > 1:
_pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)
src_field.vocab = merged_vocab
tgt_field.vocab = merged_vocab
assert len(src_field.vocab) == len(tgt_field.vocab)
def _read_vocab_file(vocab_path, tag):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line by itself. Tokens must not
contain whitespace (else only before the whitespace
is considered).
tag (str): Used for logging which vocab is being read.
"""
logger.info("Loading {} vocabulary from {}".format(tag, vocab_path))
if not os.path.exists(vocab_path):
raise RuntimeError(
"{} vocabulary not found at {}".format(tag, vocab_path))
else:
with codecs.open(vocab_path, 'r', 'utf-8') as f:
# print('>>>>>>>>>>>>> ', vocab_path, tag)
# assert False
return [line.strip().split()[0] for line in f if line.strip()]
def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if batch_size_multiple > 1:
overflowed += (
(len(minibatch) - overflowed) % batch_size_multiple)
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1, size_so_far)
if minibatch:
yield minibatch
class OrderedIterator(torchtext.data.Iterator):
def __init__(self,
dataset,
batch_size,
batch_size_multiple=1,
**kwargs):
super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)
self.batch_size_multiple = batch_size_multiple
def create_batches(self):
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = batch_iter(
sorted(p, key=self.sort_key),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in batch_iter(
self.data(),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple):
self.batches.append(sorted(b, key=self.sort_key))
class DatasetLazyIter(object):
"""Yield data from sharded dataset files.
Args:
dataset_paths: a list containing the locations of dataset files.
fields (dict[str, Field]): fields dict for the
datasets.
batch_size (int): batch size.
batch_size_fn: custom batch process function.
device: See :class:`OrderedIterator` ``device``.
is_train (bool): train or valid?
"""
def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,
batch_size_multiple, device, is_train, repeat=True,
num_batches_multiple=1):
self._paths = dataset_paths
self.fields = fields
self.batch_size = batch_size
self.batch_size_fn = batch_size_fn
self.batch_size_multiple = batch_size_multiple
self.device = device
self.is_train = is_train
self.repeat = repeat
self.num_batches_multiple = num_batches_multiple
def _iter_dataset(self, path):
cur_dataset = torch.load(path)
logger.info('Loading dataset from %s, number of examples: %d' %
(path, len(cur_dataset)))
cur_dataset.fields = self.fields
cur_iter = OrderedIterator(
dataset=cur_dataset,
batch_size=self.batch_size,
batch_size_multiple=self.batch_size_multiple,
batch_size_fn=self.batch_size_fn,
device=self.device,
train=self.is_train,
sort=False,
sort_within_batch=True,
repeat=False
)
for batch in cur_iter:
yield batch
cur_dataset.examples = None
gc.collect()
del cur_dataset
gc.collect()
def __iter__(self):
num_batches = 0
paths = self._paths
if self.is_train and self.repeat:
# Cycle through the shards indefinitely.
paths = cycle(paths)
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if self.is_train and not self.repeat and \
num_batches % self.num_batches_multiple != 0:
# When the dataset is not repeated, we might need to ensure that
# the number of returned batches is the multiple of a given value.
# This is important for multi GPU training to ensure that all
# workers have the same number of batches to process.
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if num_batches % self.num_batches_multiple == 0:
return
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def build_dataset_iter(corpus_type, fields, opt, is_train=True):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
dataset_paths = list(sorted(
glob.glob(opt.data + '.' + corpus_type + '*.pt')))
if not dataset_paths:
return None
batch_size = opt.batch_size if is_train else opt.valid_batch_size
batch_fn = max_tok_len if is_train and opt.batch_type == "tokens" else None
batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
device = "cuda" if opt.gpu_ranks else "cpu"
return DatasetLazyIter(
dataset_paths,
fields,
batch_size,
batch_fn,
batch_size_multiple,
device,
is_train,
repeat=not opt.single_pass,
num_batches_multiple=opt.accum_count * opt.world_size)
|
py | 1a53517a8c23db75e9d102d89d09ec74dbac16ad | # This is the MIT license: http://www.opensource.org/licenses/mit-license.php
#
# Copyright (c) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>.
# SQLAlchemy is a trademark of Michael Bayer.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from sqlalchemy import pool
from sqlalchemy.engine import default
from requests import Session
from .base import SolrDialect, SolrIdentifierPreparer, SolrCompiler
from sqlalchemy_solr.solrdbapi import api_globals
import logging
from .message_formatter import MessageFormatter
try:
from sqlalchemy.sql.compiler import SQLCompiler
except ImportError:
from sqlalchemy.sql.compiler import DefaultCompiler as SQLCompiler
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.ERROR)
try:
from sqlalchemy.types import BigInteger
except ImportError:
from sqlalchemy.databases.mysql import MSBigInteger as BigInteger
class SolrDialect_http(SolrDialect):
mf = MessageFormatter()
def __init__(self, **kw):
default.DefaultDialect.__init__(self, **kw)
self.supported_extensions = []
def create_connect_args(self, url, **kwargs):
url_port = url.port or 8047
qargs = {'host': url.host, 'port': url_port}
try:
db_parts = url.database.split('/')
db = ".".join(db_parts)
self.proto = "http://"
if 'use_ssl' in kwargs:
if kwargs['use_ssl'] in [True, 'True', 'true']:
self.proto = "https://"
# Mapping server path and collection
if db_parts[0]:
server_path = db_parts[0]
else:
raise AttributeError('Missing server path')
if db_parts[1]:
collection = db_parts[1]
else:
raise AttributeError('Missing collection')
# Save this for later use.
self.host = url.host
self.port = url_port
self.username = url.username
self.password = url.password
self.db = db
self.server_path = server_path
self.collection = collection
qargs.update(url.query)
qargs['db'] = db
qargs['server_path'] = server_path
qargs['collection'] = collection
qargs['username'] = url.username
qargs['password'] = url.password
except Exception as ex:
logging.error(self.mf
.format("Error in SolrDialect_http.create_connect_args :: ", str(ex)))
return [], qargs
def get_table_names(self, connection, schema=None, **kw):
session = Session()
local_payload = api_globals._PAYLOAD.copy()
local_payload['action'] = 'LIST'
try:
result = session.get(
self.proto + self.host + ":" + str(self.port) + "/" +
self.server_path + "/admin/collections",
params=local_payload,
headers=api_globals._HEADER,
auth=(self.username, self.password)
)
tables_names = result.json()['collections']
except Exception as ex:
logging.error("Error in SolrDialect_http.get_table_names :: " + str(ex))
return tuple(tables_names)
def get_columns(self, connection, table_name, schema=None, **kw):
columns = []
session = Session()
local_payload = api_globals._PAYLOAD.copy()
local_payload['action'] = 'LIST'
try:
result = session.get(
self.proto + self.host + ":" + str(self.port) + "/" +
self.server_path + "/" + table_name + "/admin/luke",
params=local_payload,
headers=api_globals._HEADER,
auth=(self.username, self.password)
)
fields = result.json()['fields']
for field in fields:
column = {
"name": field,
"type": self.get_data_type(fields[field]['type']),
"longType": self.get_data_type(fields[field]['type'])
}
columns.append(column)
return columns
except Exception as ex:
logging.error("Error in SolrDialect_http.get_table_names :: " + str(ex)) |
py | 1a53521d7e268d64e11603b2bca53693462b2c64 | from NIENV import *
# API METHODS --------------
# self.main_widget
# self.update_shape()
# Ports
# self.input(index)
# self.set_output_val(index, val)
# self.exec_output(index)
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', target='global')
# self.log_message('that\'s not good', target='error')
# --------------------------
class GetAttributes_NodeInstance(NodeInstance):
def __init__(self, params):
super(GetAttributes_NodeInstance, self).__init__(params)
self.special_actions['generate attribute outputs'] = {'method': M(self.init_attribute_ports)}
self.ready = False
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
def update_event(self, input_called=-1):
if self.ready:
est = self.input(0)
attributes = [i for i in dir(est) if (i[-1] == "_" and i[0] != "_")]
attri = 0
for attr in attributes:
try:
self.set_output_val(attri, getattr(est, attr))
except:
self.set_output_val(attri, None)
attri += 1
def init_attribute_ports(self):
if self.input(0) == None:
return
est = self.input(0)
for i in range(len(self.outputs)):
self.delete_output(0)
attributes = [i for i in dir(est) if (i[-1] == "_" and i[0] != "_")]
attri = 0
for attr in attributes:
self.create_new_output(type_="data", label=attr, pos=-1)
try:
self.set_output_val(attri, getattr(est, attr))
except:
self.set_output_val(attri, None)
attri += 1
self.ready = True
def get_data(self):
data = {}
return data
def set_data(self, data):
pass
def removing(self):
pass
|
py | 1a5352265464bd0c75c9675dd31ae4c8bac08223 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class GRUCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRUCell, self).__init__()
self.hidden_size = hidden_size
# Layers
self.linear_z = nn.Linear(input_size+hidden_size, hidden_size)
self.linear_r = nn.Linear(input_size+hidden_size, hidden_size)
self.linear = nn.Linear(input_size+hidden_size, hidden_size)
self._initialization()
def _initialization(self):
a = -np.sqrt(1/self.hidden_size)
b = np.sqrt(1/self.hidden_size)
torch.nn.init.uniform_(self.linear_z.weight, a, b)
torch.nn.init.uniform_(self.linear_z.bias, a, b)
torch.nn.init.uniform_(self.linear_r.weight, a, b)
torch.nn.init.uniform_(self.linear_r.bias, a, b)
torch.nn.init.uniform_(self.linear.weight, a, b)
torch.nn.init.uniform_(self.linear.bias, a, b)
def forward(self, input_, hidden_state):
inputs_and_prev_state = torch.cat((input_, hidden_state), -1)
# z = sigma(W_z * a + U_z * h(t-1)) (3)
update_gate = self.linear_z(inputs_and_prev_state).sigmoid()
# r = sigma(W_r * a + U_r * h(t-1)) (4)
reset_gate = self.linear_r(inputs_and_prev_state).sigmoid()
# h_hat(t) = tanh(W * a + U*(r o h(t-1))) (5)
new_hidden_state = self.linear(torch.cat((input_, reset_gate * hidden_state), -1)).tanh()
# h(t) = (1-z) o h(t-1) + z o h_hat(t) (6)
output = (1 - update_gate) * hidden_state + update_gate * new_hidden_state
return output
class GGNNModel(nn.Module):
def __init__(self, attr_size, hidden_size, propag_steps):
super(GGNNModel, self).__init__()
self.attr_size = attr_size
self.hidden_size = hidden_size
self.propag_steps = propag_steps
# Layers
self.linear_i = nn.Linear(attr_size,hidden_size)
self.gru = GRUCell(2*hidden_size, hidden_size)
self.linear_o = nn.Linear(hidden_size, 1)
self._initialization()
def _initialization(self):
torch.nn.init.kaiming_normal_(self.linear_i.weight)
torch.nn.init.constant_(self.linear_i.bias, 0)
torch.nn.init.xavier_normal_(self.linear_o.weight)
torch.nn.init.constant_(self.linear_o.bias, 0)
def forward(self, attr_matrix, adj_matrix):
'''
attr_matrix of shape (batch, graph_size, attributes dimension)
adj_matrix of shape (batch, graph_size, graph_size)
> Only 0 (nonexistent) or 1 (existent) edge types
'''
mask = (attr_matrix[:,:,0] != 0)*1
A_in = adj_matrix.float()
A_out = torch.transpose(A_in,-2,-1)
if len(A_in.shape) < 3:
A_in = torch.unsqueeze(A_in,0)
A_out = torch.unsqueeze(A_out,0)
if len(attr_matrix.shape) < 3:
attr_matrix = torch.unsqueeze(attr_matrix,0)
hidden_state = self.linear_i(attr_matrix.float()).relu()
for step in range(self.propag_steps):
# a_v = A_v[h_1 ... h_|V|]
a_in = torch.bmm(A_in, hidden_state)
a_out = torch.bmm(A_out, hidden_state)
# GRU-like update
hidden_state = self.gru(torch.cat((a_in, a_out), -1), hidden_state)
# Output model
output = self.linear_o(hidden_state).squeeze(-1)
output = output + (mask + 1e-45).log() # Mask output
output = output.log_softmax(1)
return output
|
py | 1a53524f16ca73cc0dbaa3d8bde884f803cf5309 | import brainfuck_generator as bfgen
from tests import brainfuck as bf
def test1():
text = 'Hello world.'
assert bf.evaluate(bfgen.string_to_bf(text, False)) == text
def test2():
text = 'Hello world.'
assert bf.evaluate(bfgen.string_to_bf(text, True)) == text
|
py | 1a53526d65fb369f1e6672a72e30294f9f70b006 | from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from tqdm.notebook import tqdm
from helpers import count_unique_words, count_unique_ngrams, \
build_unique_ngrams, create_sentence_vectors, create_sentence_vectors_submission
import sys
import tensorflow as tf
from tensorflow import keras
import gensim # Not sure whether it is better to use gensim or tensorflow :/
import logging
from gensim.models.phrases import Phrases, Phraser
import multiprocessing
from gensim.models import Word2Vec
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sys.path.append('../')
import argparse
parser = argparse.ArgumentParser(description='Builds sentence representation using word vectors.')
parser.add_argument('--w2v_model',
required=True,
help='Word2Vec model pretrained')
parser.add_argument('--filter_size',
nargs='+',
required=True,
help='a list of sizes for the convolutional filters (usually odd numbers. for example 3 5)')
parser.add_argument('--hidden_layers_size',
nargs='+',
required=True,
help='a list of sizes for the hidden layers (usually 50-100)')
parser.add_argument('--output_model',
required=True,
help='path where the model will be saved')
args = parser.parse_args()
# Up to now everything is hardcoded, may be better to use parameters!
df = pd.read_pickle("dataframes/full_df_cleaned_train_0_8_glove200.pickle")
df_test = pd.read_pickle("dataframes/full_df_cleaned_test_0_2_glove200.pickle")
maxlen = 44 # magic number
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
counter_wrong = 0
with open(filepath) as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(vector, dtype=np.float32)[:embedding_dim]
for row in range(embedding_matrix.shape[0]):
if not np.any(embedding_matrix[row,:]):
counter_wrong += 1
embedding_matrix[row,:] = np.random.rand(embedding_dim)
print("The number of times we didn't find a word is {} and should be 0, wtf".format(counter_wrong))
return embedding_matrix
def create_embedding_matrix_w2v(w2v_model, word_index):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
## We can assume love is always present in our vocabulary ahaha
embedding_matrix = np.zeros((vocab_size, w2v_model.wv.word_vec("love").shape[0]))
for word in w2v_model.wv.vocab:
vector = w2v_model.wv.word_vec(word)
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)
for row in range(embedding_matrix.shape[0]):
if not np.any(embedding_matrix[row,:]):
### This should be checked again!!! Not sure it is correct!
embedding_matrix[row,:] = np.random.rand(w2v_model.wv.vectors.shape[1])
return embedding_matrix
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer()
tokenizer.fit_on_texts(df.sentence)
vocab_size = len(tokenizer.word_index) + 1
X_train = tokenizer.texts_to_sequences(df.sentence)
X_test = tokenizer.texts_to_sequences(df_test.sentence)
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
y_train = np.where(df.label == 1, 1, 0)
y_test = np.where(df_test.label == 1, 1, 0)
if args.w2v_model == 'w2v':
# Use word2vec
w2v_model = gensim.models.KeyedVectors.load_word2vec_format('models/GoogleNews-vectors-negative300.bin', binary=True)
embedding_matrix = create_embedding_matrix_w2v(
w2v_model,
tokenizer.word_index)
## Embedding dimension
embedding_dim = w2v_model.wv.vectors.shape[1]
else:
# Use glove
embedding_dim = 200
embedding_matrix = create_embedding_matrix(
'glove/glove.twitter.27B.200d.txt',
tokenizer.word_index, embedding_dim)
# Compile the model
from tensorflow.keras.layers import GlobalMaxPooling1D, concatenate, Dropout, Dense, Embedding, Input, Conv1D
from tensorflow.keras.models import Model
# Specifying the input shape: the input is a sentence of maxlen words
embedding_layer = Embedding(vocab_size, output_dim=embedding_dim, weights=[embedding_matrix], input_length=maxlen,
trainable=True)
sequence_input = Input(shape=(maxlen,), dtype='int32')
# Creating the embedding using the previously constructed embedding matrix
embedded_sequences = embedding_layer(sequence_input)
convs = []
filter_sizes = [int(el) for el in args.filter_size]
for filter_size in filter_sizes:
# Creating the convolutional layer:
# "filters" represents the number of different windows we want (i.e. how many channels to produce),
# therefore in our case we will end up with 200 different convolutions
conv_layer = Conv1D(filters=256,
kernel_size=filter_size,
activation='relu')(embedded_sequences)
# Creating the global max pooling layer
pool_layer = GlobalMaxPooling1D()(conv_layer)
convs.append(pool_layer)
merged_layers = concatenate(convs, axis=1)
# Create dropout leayer: randomly set a fraction of input units to 0, which helps prevent overfitting
x = Dropout(0.2)(merged_layers)
# Create (regular) densely-connected layer
for el in args.hidden_layers_size:
x = Dense(int(el), activation='relu')(x)
x = Dropout(0.2)(x)
preds = Dense(1, activation='sigmoid')(x)
model_tw = Model(sequence_input, preds)
model_tw.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_tw.summary()
from tensorflow.keras.callbacks import ModelCheckpoint
filepath="models/cnn_glove_tw"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Finally fit the model
history = model_tw.fit(X_train, y_train, epochs=15, verbose=True, validation_data=(X_test, y_test), callbacks=callbacks_list, batch_size=512)
loss, accuracy = model_tw.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model_tw.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
plot_history(history) |
py | 1a5353c440e8af6d66fe6633c1d23ee981298258 | # -*- coding: utf-8 -*-
#! /usr/bin/env python
#use python2.7 lang.
#and set env like this.
#export A3RT_API_KEY_Text_Classification="XXXXXXXXXXXXXX"
import os
import json
import string
import codecs
import requests
def getAppInfo(sApiKey, text):
sURL = "https://api.a3rt.recruit-tech.co.jp/text_classification/v1/classify"
params = {'apikey' : sApiKey, 'model_id' : "default",'text' : text}
stRes = requests.get(sURL,params=params)
return stRes
if __name__ == '__main__':
sApiKey = os.environ.get('A3RT_API_KEY_Text_Classification')
text = "馬が走っています。"
stRes = getAppInfo(sApiKey, text)
data = json.loads(stRes.text)
print(data)
|
py | 1a5353fa2497e817e3684a05ce6b9cce2488443f | from typing import Callable
from urllib.parse import urlunsplit
from .typing import ASGIFramework
from .utils import invoke_asgi
class HTTPToHTTPSRedirectMiddleware:
def __init__(self, app: ASGIFramework, host: str) -> None:
self.app = app
self.host = host
async def __call__(self, scope: dict, receive: Callable, send: Callable) -> None:
if scope["type"] == "http" and scope["scheme"] == "http":
await self._send_http_redirect(scope, send)
elif scope["type"] == "websocket" and scope["scheme"] == "ws":
# If the server supports the WebSocket Denial Response
# extension we can send a redirection response, if not we
# can only deny the WebSocket connection.
if "websocket.http.response" in scope.get("extensions", {}):
await self._send_websocket_redirect(scope, send)
else:
await send({"type": "websocket.close"})
else:
return await invoke_asgi(self.app, scope, receive, send)
async def _send_http_redirect(self, scope: dict, send: Callable) -> None:
new_url = urlunsplit(
("https", self.host, scope["path"], scope["query_string"].decode(), "")
)
await send(
{
"type": "http.response.start",
"status": 307,
"headers": [(b"location", new_url.encode())],
}
)
await send({"type": "http.response.body"})
async def _send_websocket_redirect(self, scope: dict, send: Callable) -> None:
# If the HTTP version is 2 we should redirect with a https
# scheme not wss.
scheme = "wss"
if scope.get("http_version", "1.1") == "2.0":
scheme = "https"
new_url = urlunsplit((scheme, self.host, scope["path"], scope["query_string"].decode(), ""))
await send(
{
"type": "websocket.http.response.start",
"status": 307,
"headers": [(b"location", new_url.encode())],
}
)
await send({"type": "websocket.http.response.body"})
|
py | 1a53559d09c65d8507feb4cb803e8877ce91b148 | # A Python program for sending email
import smtplib
import urllib.request as urllib
# Senders email
sender_email = "#"
# Receivers email
rec_email = "#"
message = "Congratulations. The best model has been created."
# Initialize the server variable
server = smtplib.SMTP('smtp.gmail.com', 587)
# Start the server connection
server.starttls()
# Login
server.login("#Username", "#Password")
print("Logged in Successfully!")
# Send Email
server.sendmail("Avik", {rec_email}, message)
print(f"Email has been sent successfully to {rec_email}")
|
py | 1a53559ea676f4ecdaf02de79316dba84810186c | """
ODM package declaration.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from .database import (
aggregate,
establish_db_conn,
get_db_client,
get_db_conn,
get_async_db_conn,
drop_database,
sync_database,
list_datasets,
delete_dataset,
delete_evaluation,
delete_evaluations,
delete_brain_run,
delete_brain_runs,
drop_orphan_collections,
drop_orphan_run_results,
list_collections,
get_collection_stats,
stream_collection,
count_documents,
export_document,
export_collection,
import_document,
import_collection,
insert_documents,
bulk_write,
)
from .dataset import (
create_field,
SampleFieldDocument,
DatasetDocument,
)
from .document import (
Document,
DynamicDocument,
EmbeddedDocument,
DynamicEmbeddedDocument,
SerializableDocument,
)
from .frame import (
DatasetFrameDocument,
NoDatasetFrameDocument,
)
from .mixins import (
get_default_fields,
get_field_kwargs,
get_implied_field_kwargs,
validate_fields_match,
)
from .sample import (
DatasetSampleDocument,
NoDatasetSampleDocument,
)
|
py | 1a5355f1027453f2c0fc8d87153dcdb2d639363e | import logging
import copy
from .._compat import as_unicode
log = logging.getLogger(__name__)
class BaseFilter(object):
"""
Base class for all data filters.
Sub class to implement your own custom filters
"""
column_name = ''
datamodel = None
model = None
name = ''
is_related_view = False
"""
Sets this filter to a special kind for related views.
If true this filter was not set by the user
"""
def __init__(self, column_name, datamodel, is_related_view=False):
"""
Constructor.
:param column_name:
Model field name
:param datamodel:
The datamodel access class
:param is_related_view:
Optional internal parameter to filter related views
"""
self.column_name = column_name
self.datamodel = datamodel
self.model = datamodel.obj
self.is_related_view = is_related_view
def apply(self, query, value):
"""
Override this to implement your own new filters
"""
raise NotImplementedError
def __repr__(self):
return self.name
class FilterRelation(BaseFilter):
"""
Base class for all filters for relations
"""
pass
class BaseFilterConverter(object):
"""
Base Filter Converter, all classes responsible
for the association of columns and possible filters
will inherit from this and override the conversion_table property.
"""
conversion_table = ()
"""
When implementing your own filters you just need to define
the new filters, and register them overriding this property.
This will map a column type to all possible filters.
use something like this::
(('is_text', [FilterCustomForText,
FilterNotContains,
FilterEqual,
FilterNotEqual]
),
('is_string', [FilterContains,
FilterNotContains,
FilterEqual,
FilterNotEqual]),
('is_integer', [FilterEqual,
FilterNotEqual]),
)
"""
def __init__(self, datamodel):
self.datamodel = datamodel
def convert(self, col_name):
for conversion in self.conversion_table:
if getattr(self.datamodel, conversion[0])(col_name):
return [item(col_name, self.datamodel) for item in conversion[1]]
log.warning('Filter type not supported for column: %s' % col_name)
class Filters(object):
filters = []
""" List of instanciated BaseFilter classes """
values = []
""" list of values to apply to filters """
_search_filters = {}
""" dict like {'col_name':[BaseFilter1, BaseFilter2, ...], ... } """
_all_filters = {}
def __init__(self, filter_converter, datamodel, search_columns=None):
"""
:param filter_converter: Accepts BaseFilterConverter class
:param search_columns: restricts possible columns, accepts a list of column names
:param datamodel: Accepts BaseInterface class
"""
search_columns = search_columns or []
self.filter_converter = filter_converter
self.datamodel = datamodel
self.clear_filters()
if search_columns:
self._search_filters = self._get_filters(search_columns)
self._all_filters = self._get_filters(datamodel.get_columns_list())
def get_search_filters(self):
return self._search_filters
def _get_filters(self, cols):
filters = {}
for col in cols:
_filters = self.filter_converter(self.datamodel).convert(col)
if _filters:
filters[col] = _filters
return filters
def clear_filters(self):
self.filters = []
self.values = []
def _add_filter(self, filter_instance, value):
self.filters.append(filter_instance)
self.values.append(value)
def add_filter_index(self, column_name, filter_instance_index, value):
self._add_filter(self._all_filters[column_name][filter_instance_index], value)
def add_filter(self, column_name, filter_class, value):
self._add_filter(filter_class(column_name, self.datamodel), value)
return self
def add_filter_related_view(self, column_name, filter_class, value):
self._add_filter(filter_class(column_name, self.datamodel, True), value)
return self
def add_filter_list(self, active_filter_list=None):
for item in active_filter_list:
column_name, filter_class, value = item
self._add_filter(filter_class(column_name, self.datamodel), value)
return self
def get_joined_filters(self, filters):
"""
Creates a new filters class with active filters joined
"""
retfilters = Filters(self.filter_converter, self.datamodel)
retfilters.filters = self.filters + filters.filters
retfilters.values = self.values + filters.values
return retfilters
def copy(self):
"""
Returns a copy of this object
:return: A copy of self
"""
retfilters = Filters(self.filter_converter, self.datamodel)
retfilters.filters = copy.copy(self.filters)
retfilters.values = copy.copy(self.values)
return retfilters
def get_relation_cols(self):
"""
Returns the filter active FilterRelation cols
"""
retlst = []
for flt, value in zip(self.filters, self.values):
if isinstance(flt, FilterRelation) and value:
retlst.append(flt.column_name)
return retlst
def get_filters_values(self):
"""
Returns a list of tuples [(FILTER, value),(...,...),....]
"""
return [(flt, value) for flt, value in zip(self.filters, self.values)]
def get_filter_value(self, column_name):
"""
Returns the filtered value for a certain column
:param column_name: The name of the column that we want the value from
:return: the filter value of the column
"""
for flt, value in zip(self.filters, self.values):
if flt.column_name == column_name:
return value
def get_filters_values_tojson(self):
return [(flt.column_name, as_unicode(flt.name), value) for flt, value in zip(self.filters, self.values)]
def apply_all(self, query):
for flt, value in zip(self.filters, self.values):
query = flt.apply(query, value)
return query
def __repr__(self):
retstr = "FILTERS \n"
for flt, value in self.get_filters_values():
retstr = retstr + "%s.%s:%s\n" % (flt.model.__table__, str(flt.column_name), str(value))
return retstr
|
py | 1a5357dbadc07fbf3882d31c2cb9783fa23fcd00 | import sys
import logging
import json
from collections import OrderedDict
from redash import settings
logger = logging.getLogger(__name__)
__all__ = [
'BaseQueryRunner',
'InterruptException',
'BaseSQLQueryRunner',
'TYPE_DATETIME',
'TYPE_BOOLEAN',
'TYPE_INTEGER',
'TYPE_STRING',
'TYPE_DATE',
'TYPE_FLOAT',
'SUPPORTED_COLUMN_TYPES',
'register',
'get_query_runner',
'import_query_runners'
]
# Valid types of columns returned in results:
TYPE_INTEGER = 'integer'
TYPE_FLOAT = 'float'
TYPE_BOOLEAN = 'boolean'
TYPE_STRING = 'string'
TYPE_DATETIME = 'datetime'
TYPE_DATE = 'date'
SUPPORTED_COLUMN_TYPES = set([
TYPE_INTEGER,
TYPE_FLOAT,
TYPE_BOOLEAN,
TYPE_STRING,
TYPE_DATETIME,
TYPE_DATE
])
class InterruptException(Exception):
pass
class NotSupported(Exception):
pass
class BaseQueryRunner(object):
noop_query = None
def __init__(self, configuration):
self.syntax = 'sql'
self.configuration = configuration
@classmethod
def name(cls):
return cls.__name__
@classmethod
def type(cls):
return cls.__name__.lower()
@classmethod
def enabled(cls):
return True
@classmethod
def annotate_query(cls):
return True
@classmethod
def configuration_schema(cls):
return {}
def test_connection(self):
if self.noop_query is None:
raise NotImplementedError()
data, error = self.run_query(self.noop_query, None)
if error is not None:
raise Exception(error)
def run_query(self, query, user):
raise NotImplementedError()
def fetch_columns(self, columns):
column_names = []
duplicates_counter = 1
new_columns = []
for col in columns:
column_name = col[0]
if column_name in column_names:
column_name = "{}{}".format(column_name, duplicates_counter)
duplicates_counter += 1
column_names.append(column_name)
new_columns.append({'name': column_name,
'friendly_name': column_name,
'type': col[1]})
return new_columns
def get_schema(self, get_stats=False):
raise NotSupported()
def _run_query_internal(self, query):
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed running query [%s]." % query)
return json.loads(results)['rows']
@classmethod
def to_dict(cls):
return {
'name': cls.name(),
'type': cls.type(),
'configuration_schema': cls.configuration_schema()
}
class BaseSQLQueryRunner(BaseQueryRunner):
def get_schema(self, get_stats=False):
schema_dict = {}
self._get_tables(schema_dict)
if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:
self._get_tables_stats(schema_dict)
return schema_dict.values()
def _get_tables(self, schema_dict):
return []
def _get_tables_stats(self, tables_dict):
for t in tables_dict.keys():
if type(tables_dict[t]) == dict:
res = self._run_query_internal('select count(*) as cnt from %s' % t)
tables_dict[t]['size'] = res[0]['cnt']
query_runners = {}
def register(query_runner_class):
global query_runners
if query_runner_class.enabled():
logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
query_runners[query_runner_class.type()] = query_runner_class
else:
logger.debug("%s query runner enabled but not supported, not registering. Either disable or install missing "
"dependencies.", query_runner_class.name())
def get_query_runner(query_runner_type, configuration):
query_runner_class = query_runners.get(query_runner_type, None)
if query_runner_class is None:
return None
return query_runner_class(configuration)
def get_configuration_schema_for_query_runner_type(query_runner_type):
query_runner_class = query_runners.get(query_runner_type, None)
if query_runner_class is None:
return None
return query_runner_class.configuration_schema()
def import_query_runners(query_runner_imports):
for runner_import in query_runner_imports:
__import__(runner_import)
|
py | 1a5357ff2d66b3afc9cea4e770a1e6162113d276 | import summarizer as nlp
import csv
from sklearn.preprocessing import OneHotEncoder
import numpy as np
from collections import defaultdict, Counter
import math
from myfile import *
from googletrans import Translator
# turns .tsv file into list of lists
def tsv2mat(fname) :
with open(fname) as f:
wss = csv.reader(f, delimiter='\t')
return list(wss)
class Data :
'''
builds dataset from dependency edges in .tsv file associating
<from,link,to> edges and sentences in which they occur;
links are of the form POS_deprel_POS with POS and deprel
tags concatenated
'''
def __init__(self,fname='texts/english') :
edge_file="out/"+fname+".tsv"
if not nlp.exists_file(edge_file) :
nlp.process_file(fname=fname)
wss = tsv2mat(edge_file)
self.sents=tsv2mat("out/"+fname+"_sents.tsv")
occs=defaultdict(set)
sids=set()
lens=[]
for f,ff,r,tt,t,id in wss:
id=int(id)
if len(lens)<=id : lens.append(0)
lens[id]+=1
occs[(f,ff,r,tt,t)].add(id)
sids.add(id)
self.occs=occs # dict where edges occur
self.lens=lens # number of edges in each sentence
X,Y=list(zip(*list(occs.items())))
X = np.array(X)
y0 = np.array(sorted(map(lambda x:[x],sids)))
# make OneHot encoders for X and y
enc_X = OneHotEncoder(handle_unknown='ignore')
enc_y = OneHotEncoder(handle_unknown='ignore')
enc_X.fit(X)
enc_y.fit(y0)
hot_X = enc_X.transform(X).toarray()
self.enc_X = enc_X
self.enc_y = enc_y
self.X=X
# encode y as logical_or of sentence encodings it occurs in
ms=[]
for ys in Y :
m = np.array([[0]],dtype=np.float32)
for v in ys :
m0=enc_y.transform(np.array([[v]])).toarray()
m = np.logical_or(m,m0)
m=np.array(np.logical_or(m,m0),dtype=np.float32)
ms.append(m[0])
hot_y=np.array(ms)
self.hot_X=hot_X
self.hot_y =hot_y
print('\nFINAL DTATA SHAPES','X',hot_X.shape,'y',hot_y.shape)
#print('SENTENCE LENGTHS',lens)
class Query(Data) :
'''
builds <from,link,to> dependency links form a given
text query and matches it against data to retrive
sentences in which most of those edges occur
'''
def __init__(self,fname='texts/english'):
super().__init__(fname=fname)
text = file2text(fname + ".txt")
self.data_lang = nlp.detectLang(text)
self.nlp_engine=nlp.NLP()
def ask(self,text=None,interactive=False, tolang='en'):
'''
compute Jaccard similarity between
set of edges in query and each sentence,
then select the most similar ones
'''
if not text: text = input("Query:")
elif not interactive: print("Query:",text)
self.question_lang = nlp.detectLang(text)
print('qLang:', self.question_lang)
print('Data Lang:',self.data_lang)
if self.question_lang != self.data_lang:
translator = Translator()
if self.data_lang == 'zh':
text= translator.translate(text, dest='zh-cn').text
elif self.data_lang == 'jv':
text= translator.translate(text, dest='jw').text
else:
text= translator.translate(text, dest=self.data_lang).text
print('translated question:\n', text)
self.nlp_engine.from_text(text)
sids=[]
for f,ff,r,tt,t,_ in self.nlp_engine.facts() :
sids.extend(self.occs.get((f,ff,r,tt,t),[]))
self.save_answers(sids, tolang)
def save_answers(self, sids, tolang, k=3):
c = Counter(sids)
qlen=len(list(self.nlp_engine.facts()))
for id in c:
shared=c[id]
union_size=self.lens[id]+qlen-shared
#jaccard=shared/union_size
#c[id]=jaccard
c[id]=shared/math.log(union_size)
print('\nHIT WEIGHTS:', c, "\n")
best = c.most_common(k)
print('save_answers, question_lang:', self.question_lang, ', data_lang:\n', self.data_lang)
translator = Translator()
self.answer = defaultdict(set)
for sid, _ in best:
id, sent = self.sents[sid]
print(id, ':', sent)
if self.data_lang == tolang:
self.answer[id] = sent
else:
sent= translator.translate(sent, dest=tolang).text
self.answer[id] = sent
print("")
def show_answers(self):
print("\nSummary:")
for id in self.answer:
print(id, ':', self.answer[id])
print("")
def interact(self):
while True:
text = input("Query: ")
if not text: return
self.ask(text=text,interactive=True)
### TESTS ###
def qtest() :
q=Query()
q.ask(text="What did Penrose show?", tolang="en")
q.show_answers()
q.ask(text="What was in Roger's 1965 paper?", tolang="en")
q.show_answers()
def dtest() :
d=Data()
print("X",d.hot_X.shape)
print(d.hot_X)
print("y",d.hot_y.shape)
print(d.hot_y)
def dtests():
''' data loading tests'''
dtest('out/texts/english.tsv')
dtest('out/texts/spanish.tsv')
dtest('out/texts/chinese.tsv')
dtest('out/texts/russian.tsv')
def atest() :
''' tests symbolic and neural QA on given document '''
'''
i=Query('texts/english')
print("\n")
print("ALGORITHMICALLY DERIVED ANSWERS:\n")
i.ask("What did Penrose show about black holes?")
i.ask(text="What was in Roger's 1965 paper?")
print("\n")
'''
i=Query('texts/chinese')
print("\n")
print("ALGORITHMICALLY DERIVED ANSWERS:\n")
'''
i.ask("中国藏书有多少年历史?")
i.show_answers()
i.ask(text="设立图书馆情报学本科教育的学校有多少所?")
i.show_answers()
'''
i.ask("How many years is the Chinese collection of books?", tolang="en")
i.show_answers()
i.ask(text="How many schools have established undergraduate education in library and information science?", tolang="en")
i.show_answers()
print("\n")
if __name__=="__main__" :
atest()
|
py | 1a53580d3e136d7e17e07d20b3b15acb62f5b053 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('button09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.write_comment('A1', 'Foo')
worksheet2.insert_button('C2', {})
worksheet1.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
|
py | 1a53596fb5967c713cfef11d296f83c6d0df5106 | from typing import Any, List
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import index as libindex
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
ensure_platform_int,
is_categorical_dtype,
is_interval_dtype,
is_list_like,
is_scalar,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import accessor
from pandas.core.algorithms import take_1d
from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="CategoricalIndex"))
@inherit_names(
[
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
"is_dtype_equal",
"min",
"max",
],
Categorical,
)
@accessor.delegate_names(
delegate=Categorical,
accessors=[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
typ="method",
overwrite=True,
)
class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate):
"""
Index based on an underlying :class:`Categorical`.
CategoricalIndex, like Categorical, can only take on a limited,
and usually fixed, number of possible values (`categories`). Also,
like Categorical, it might have an order, but numerical operations
(additions, divisions, ...) are not possible.
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If `categories` are given, values not in
`categories` will be replaced with NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in `dtype`), they
will be inferred from the `data`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.21.0
copy : bool, default False
Make a copy of input ndarray.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
Index : The base pandas Index type.
Categorical : A categorical array.
CategoricalDtype : Type for categorical data.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`_
for more.
Examples
--------
>>> pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
>>> pd.CategoricalIndex(c)
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa
Ordered ``CategoricalIndex`` can have a min and max value.
>>> ci = pd.CategoricalIndex(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') # noqa
>>> ci.min()
'c'
"""
_typ = "categoricalindex"
_raw_inherit = {
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
}
codes: np.ndarray
categories: Index
@property
def _engine_type(self):
# self.codes can have dtype int8, int16, int32 or int64, so we need
# to return the corresponding engine type (libindex.Int8Engine, etc.).
return {
np.int8: libindex.Int8Engine,
np.int16: libindex.Int16Engine,
np.int32: libindex.Int32Engine,
np.int64: libindex.Int64Engine,
}[self.codes.dtype.type]
_attributes = ["name"]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None
):
dtype = CategoricalDtype._from_values_or_dtype(data, categories, ordered, dtype)
name = maybe_extract_name(name, data, cls)
if not is_categorical_dtype(data):
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
raise cls._scalar_data_error(data)
data = []
data = cls._create_categorical(data, dtype=dtype)
data = data.copy() if copy else data
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, dtype=None, name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
dtype: CategoricalDtype, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
if dtype is None:
dtype = self.dtype
if name is None:
name = self.name
cat = Categorical.from_codes(codes, dtype=dtype)
return CategoricalIndex(cat, name=name)
@classmethod
def _create_categorical(cls, data, dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if isinstance(data, (cls, ABCSeries)) and is_categorical_dtype(data):
data = data.values
if not isinstance(data, ABCCategorical):
return Categorical(data, dtype=dtype)
if isinstance(dtype, CategoricalDtype) and dtype != data.dtype:
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, dtype=None):
result = object.__new__(cls)
values = cls._create_categorical(values, dtype=dtype)
result._data = values
result.name = name
result._reset_identity()
result._no_setting_name = False
return result
# --------------------------------------------------------------------
@Appender(_index_shared_docs["_shallow_copy"])
def _shallow_copy(self, values=None, dtype=None, **kwargs):
if dtype is None:
dtype = self.dtype
return super()._shallow_copy(values=values, dtype=dtype, **kwargs)
def _is_dtype_compat(self, other) -> bool:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError(
"categories must match existing categories when appending"
)
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(other, dtype=self.dtype))
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
return other
def equals(self, other):
"""
Determine if two CategoricalIndex objects contain the same elements.
Returns
-------
bool
If two CategoricalIndex objects have equal elements True,
otherwise False.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
if isinstance(other, type(self)):
other = other._data
return self._data.equals(other)
except (TypeError, ValueError):
pass
return False
# --------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
attrs = [
(
"categories",
ibase.default_pprint(self.categories, max_seq_items=max_categories),
),
("ordered", self.ordered),
]
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
attrs.append(("dtype", f"'{self.dtype.name}'"))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
# --------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "categorical"
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@property
def _has_complex_internals(self):
# used to avoid libreduction code paths, which raise or require conversion
return True
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
# We use _shallow_copy rather than the Index implementation
# (which uses _constructor) in order to preserve dtype.
return self._shallow_copy(result, name=name)
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.hasnans
hash(key)
return contains(self, key, container=self._engine)
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype.update_dtype(dtype)
if dtype == self.dtype:
return self.copy() if copy else self
return Index.astype(self, dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs["fillna"])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves.
# To avoid a reference cycle, bind `codes` to a local variable, so
# `self` is not passed into the lambda.
codes = self.codes
return self._engine_type(lambda: codes, len(self))
@Appender(_index_shared_docs["index_unique"] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self.values.unique()
# CategoricalIndex._shallow_copy keeps original dtype
# if not otherwise specified
return self._shallow_copy(result, dtype=result.dtype)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
codes = self.codes.astype("i8")
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype("object")
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Raises
------
KeyError : if the key is not in the index
Examples
--------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.CategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
code = self.categories.get_loc(key)
code = self.codes.dtype.type(code)
try:
return self._engine.get_loc(code)
except KeyError:
raise KeyError(key)
def get_value(self, series: AnyArrayLike, key: Any):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
Parameters
----------
series : Series, ExtensionArray, Index, or ndarray
1-dimensional array to take values from
key: : scalar
The value of this index at the position of the desired value,
otherwise the positional index of the desired value
Returns
-------
Any
The element of the series at the position indicated by the key
"""
k = key
try:
k = self._convert_scalar_indexer(k, kind="getitem")
indexer = self.get_loc(k)
return series.take([indexer])[0]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super().get_value(series, key)
@Appender(_index_shared_docs["where"])
def where(self, cond, other=None):
# TODO: Investigate an alternative implementation with
# 1. copy the underlying Categorical
# 2. setitem with `cond` and `other`
# 3. Rebuild CategoricalIndex.
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
cat = Categorical(values, dtype=self.dtype)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase.ensure_index(target)
missing: List[int]
if self.equals(target):
indexer = None
missing = []
else:
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes) and indexer is not None:
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase.ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype="intp")
if method == "pad" or method == "backfill":
raise NotImplementedError(
"method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex"
)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet for CategoricalIndex"
)
if isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target):
if self.values.equals(target.values):
# we have the same codes
codes = target.codes
else:
codes = _recode_for_categories(
target.codes, target.categories, self.values.categories
)
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
if isinstance(target, CategoricalIndex):
# Indexing on codes is more efficient if categories are the same:
if target.categories is self.categories:
target = target.codes
indexer, missing = self._engine.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
target = target.values
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
if kind == "loc":
try:
return self.categories._convert_scalar_indexer(key, kind=kind)
except TypeError:
self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
@Appender(_index_shared_docs["_convert_list_indexer"])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only include values that are in the categories"
)
return self.get_indexer(keyarr)
@Appender(_index_shared_docs["_convert_arr_indexer"])
def _convert_arr_indexer(self, keyarr):
keyarr = com.asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs["_convert_index_indexer"])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
def take_nd(self, *args, **kwargs):
"""Alias for `take`"""
warnings.warn(
"CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(*args, **kwargs)
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
def _maybe_cast_slice_bound(self, label, side, kind):
if kind == "loc":
return label
return super()._maybe_cast_slice_bound(label, side, kind)
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc: int, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1) and not (is_scalar(item) and isna(item)):
raise TypeError(
"cannot insert an item into a CategoricalIndex "
"that is not already an existing category"
)
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _delegate_property_get(self, name, *args, **kwargs):
""" method delegation to the ._values """
prop = getattr(self._values, name)
return prop # no wrapping for now
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if "inplace" in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res) or name in self._raw_inherit:
return res
return CategoricalIndex(res, name=self.name)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
|
py | 1a5359ad22447d63937dd82def509e7f6ba77964 | # -*- coding: utf-8 -*-
"""api v2.0"""
from flask import Blueprint, g
api_bp = Blueprint('api_1_0', __name__, url_prefix='/api/v1.0')
@api_bp.before_request
def before_request():
g.token_auth_used = g.http_auth_used = None
# FIXME(hoatle): security check of inactive, not verified users
# TODO(hoatle): exclude debug toolbar for /api/, add rest logger to serve ?debug query
# TODO(hoatle): session usage should be removed from REST
import errors
from .users import UserResource
from .token import TokenResource
from .roles import RoleResource
from .movie import EpisodeResource, MovieResource
UserResource.register(api_bp)
TokenResource.register(api_bp)
RoleResource.register(api_bp)
EpisodeResource.register(api_bp)
MovieResource.register(api_bp)
|
py | 1a535a221f587eb9f578e251312b8feb161ebaa3 | #!/usr/bin/python
""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_vrouterbgp
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to add/remove/modify vrouter-bgp.
description:
- Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a vRouter service that forwards traffic between
networks and implements Layer 4 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add bgp,
'absent' to remove bgp and 'update' to modify bgp.
required: True
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify a name for the vRouter service.
required: True
pn_neighbor:
description:
- Specify a neighbor IP address to use for BGP.
- Required for vrouter-bgp-add.
pn_remote_as:
description:
- Specify the remote Autonomous System(AS) number. This value is between
1 and 4294967295.
- Required for vrouter-bgp-add.
pn_next_hop_self:
description:
- Specify if the next-hop is the same router or not.
pn_password:
description:
- Specify a password, if desired.
pn_ebgp:
description:
- Specify a value for external BGP to accept or attempt BGP connections
to external peers, not directly connected, on the network. This is a
value between 1 and 255.
pn_prefix_listin:
description:
- Specify the prefix list to filter traffic inbound.
pn_prefix_listout:
description:
- Specify the prefix list to filter traffic outbound.
pn_route_reflector:
description:
- Specify if a route reflector client is used.
pn_override_capability:
description:
- Specify if you want to override capability.
pn_soft_reconfig:
description:
- Specify if you want a soft reconfiguration of inbound traffic.
pn_max_prefix:
description:
- Specify the maximum number of prefixes.
pn_max_prefix_warn:
description:
- Specify if you want a warning message when the maximum number of
prefixes is exceeded.
pn_bfd:
description:
- Specify if you want BFD protocol support for fault detection.
pn_multiprotocol:
description:
- Specify a multi-protocol for BGP.
choices: ['ipv4-unicast', 'ipv6-unicast']
pn_weight:
description:
- Specify a default weight value between 0 and 65535 for the neighbor
routes.
pn_default_originate:
description:
- Specify if you want announce default routes to the neighbor or not.
pn_keepalive:
description:
- Specify BGP neighbor keepalive interval in seconds.
pn_holdtime:
description:
- Specify BGP neighbor holdtime in seconds.
pn_route_mapin:
description:
- Specify inbound route map for neighbor.
pn_route_mapout:
description:
- Specify outbound route map for neighbor.
"""
EXAMPLES = """
- name: add vrouter-bgp
pn_vrouterbgp:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_neighbor: 104.104.104.1
pn_remote_as: 1800
- name: remove vrouter-bgp
pn_vrouterbgp:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouterbpg command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterbgp command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
NEIGHBOR_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a BGP neighbor with the given ip exists on the given vRouter,
return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Global flags
global VROUTER_EXISTS, NEIGHBOR_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for BGP neighbors
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if neighbor in out:
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-bgp-add'
if state == 'absent':
command = 'vrouter-bgp-remove'
if state == 'update':
command = 'vrouter-bgp-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_neighbor=dict(type='str'),
pn_remote_as=dict(type='str'),
pn_next_hop_self=dict(type='bool'),
pn_password=dict(type='str', no_log=True),
pn_ebgp=dict(type='int'),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_route_reflector=dict(type='bool'),
pn_override_capability=dict(type='bool'),
pn_soft_reconfig=dict(type='bool'),
pn_max_prefix=dict(type='int'),
pn_max_prefix_warn=dict(type='bool'),
pn_bfd=dict(type='bool'),
pn_multiprotocol=dict(type='str',
choices=['ipv4-unicast', 'ipv6-unicast']),
pn_weight=dict(type='int'),
pn_default_originate=dict(type='bool'),
pn_keepalive=dict(type='str'),
pn_holdtime=dict(type='str'),
pn_route_mapin=dict(type='str'),
pn_route_mapout=dict(type='str')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent",
["pn_vrouter_name", "pn_neighbor"]],
["state", "update",
["pn_vrouter_name", "pn_neighbor"]]
)
)
# Accessing the arguments
state= module.params['state']
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
remote_as = module.params['pn_remote_as']
next_hop_self = module.params['pn_next_hop_self']
password = module.params['pn_password']
ebgp = module.params['pn_ebgp']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
route_reflector = module.params['pn_route_reflector']
override_capability = module.params['pn_override_capability']
soft_reconfig = module.params['pn_soft_reconfig']
max_prefix = module.params['pn_max_prefix']
max_prefix_warn = module.params['pn_max_prefix_warn']
bfd = module.params['pn_bfd']
multiprotocol = module.params['pn_multiprotocol']
weight = module.params['pn_weight']
default_originate = module.params['pn_default_originate']
keepalive = module.params['pn_keepalive']
holdtime = module.params['pn_holdtime']
route_mapin = module.params['pn_route_mapin']
route_mapout = module.params['pn_route_mapout']
# Building the CLI command string
cli = pn_cli(module)
command = get_command_from_state(state)
if command == 'vrouter-bgp-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s does not exist on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
else:
if command == 'vrouter-bgp-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s already exists on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
if remote_as:
cli += ' remote-as ' + str(remote_as)
if next_hop_self is True:
cli += ' next-hop-self '
if next_hop_self is False:
cli += ' no-next-hop-self '
if password:
cli += ' password ' + password
if ebgp:
cli += ' ebgp-multihop ' + str(ebgp)
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
if route_reflector is True:
cli += ' route-reflector-client '
if route_reflector is False:
cli += ' no-route-reflector-client '
if override_capability is True:
cli += ' override-capability '
if override_capability is False:
cli += ' no-override-capability '
if soft_reconfig is True:
cli += ' soft-reconfig-inbound '
if soft_reconfig is False:
cli += ' no-soft-reconfig-inbound '
if max_prefix:
cli += ' max-prefix ' + str(max_prefix)
if max_prefix_warn is True:
cli += ' max-prefix-warn-only '
if max_prefix_warn is False:
cli += ' no-max-prefix-warn-only '
if bfd is True:
cli += ' bfd '
if bfd is False:
cli += ' no-bfd '
if multiprotocol:
cli += ' multi-protocol ' + multiprotocol
if weight:
cli += ' weight ' + str(weight)
if default_originate is True:
cli += ' default-originate '
if default_originate is False:
cli += ' no-default-originate '
if keepalive:
cli += ' neighbor-keepalive-interval ' + keepalive
if holdtime:
cli += ' neighbor-holdtime ' + holdtime
if route_mapin:
cli += ' route-map-in ' + route_mapin
if route_mapout:
cli += ' route-map-out ' + route_mapout
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
py | 1a535c64b91030166fbe7e990ff1d99a6d669ab3 |
import tensorflow as tf
from select_threshold_op import SelectThreshold
import numpy as np
import time
# SelectThreshold(x, pl, rowsplits, threshold=0.5)
nvert=10000
nfeat=128
xs = tf.constant(np.random.rand(nvert) ,dtype='float32')
xs = tf.reshape(xs, [-1,1])
rs = tf.constant([0,int(nvert/4),int(nvert/2),nvert],dtype='int32')
pl = tf.constant( np.random.rand(nvert,nfeat) ,dtype='float32')
print(xs, pl, rs)
newfeat, newrs, scatter_idxs = SelectThreshold(xs,pl,rs,threshold=0.5)
bef = time.time()
for _ in range(20):
newfeat, newrs, scatter_idxs = SelectThreshold(xs,pl,rs,threshold=0.5)
totaltime = time.time() - bef
print('output')
print(newfeat, rs, scatter_idxs)
print('scattered back')
print(tf.scatter_nd(scatter_idxs, newfeat ,shape=pl.shape))
print('total time', totaltime) |
py | 1a535d4347ec0c2ba37a641b3ee4731d36b09f94 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinNickelTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(BitcoinNickelTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generate(100)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader.
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, reject_code=16, reject_reason=b'bad-txns-duplicate')
# Check transactions for duplicate inputs
self.log.info("Test duplicate input block.")
block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0])
block2_orig.vtx[2].rehash()
block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root()
block2_orig.rehash()
block2_orig.solve()
node.p2p.send_blocks_and_test([block2_orig], node, success=False, reject_reason=b'bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, reject_code=16, reject_reason=b'bad-cb-amount')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
py | 1a535d72bab765a62688ce01303e3b07160a464f | # Generated by Django 3.1.5 on 2021-03-22 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20210323_0039'),
]
operations = [
migrations.AlterField(
model_name='sell',
name='timestamp',
field=models.DateTimeField(),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.