ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a355ebf14550db96c2b5c2624b4968aa119a082 | #
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Qemu monitor control classes
"""
import os
import stat
import errno
import socket
import StringIO
import logging
try:
import fdsend # pylint: disable=F0401
except ImportError:
fdsend = None
from bitarray import bitarray
from ganeti import errors
from ganeti import utils
from ganeti import constants
from ganeti import serializer
class QmpCommandNotSupported(errors.HypervisorError):
"""QMP command not supported by the monitor.
This is raised in case a QmpMonitor instance is asked to execute a command
not supported by the instance.
This is a KVM-specific exception, intended to assist in falling back to using
the human monitor for operations QMP does not support.
"""
pass
class QmpMessage(object):
"""QEMU Messaging Protocol (QMP) message.
"""
def __init__(self, data):
"""Creates a new QMP message based on the passed data.
"""
if not isinstance(data, dict):
raise TypeError("QmpMessage must be initialized with a dict")
self.data = data
def __getitem__(self, field_name):
"""Get the value of the required field if present, or None.
Overrides the [] operator to provide access to the message data,
returning None if the required item is not in the message
@return: the value of the field_name field, or None if field_name
is not contained in the message
"""
return self.data.get(field_name, None)
def __setitem__(self, field_name, field_value):
"""Set the value of the required field_name to field_value.
"""
self.data[field_name] = field_value
def __len__(self):
"""Return the number of fields stored in this QmpMessage.
"""
return len(self.data)
def __delitem__(self, key):
"""Delete the specified element from the QmpMessage.
"""
del self.data[key]
@staticmethod
def BuildFromJsonString(json_string):
"""Build a QmpMessage from a JSON encoded string.
@type json_string: str
@param json_string: JSON string representing the message
@rtype: L{QmpMessage}
@return: a L{QmpMessage} built from json_string
"""
# Parse the string
data = serializer.LoadJson(json_string)
return QmpMessage(data)
def __str__(self):
# The protocol expects the JSON object to be sent as a single line.
return serializer.DumpJson(self.data)
def __eq__(self, other):
# When comparing two QmpMessages, we are interested in comparing
# their internal representation of the message data
return self.data == other.data
class MonitorSocket(object):
_SOCKET_TIMEOUT = 5
def __init__(self, monitor_filename):
"""Instantiates the MonitorSocket object.
@type monitor_filename: string
@param monitor_filename: the filename of the UNIX raw socket on which the
monitor (QMP or simple one) is listening
"""
self.monitor_filename = monitor_filename
self._connected = False
def _check_socket(self):
sock_stat = None
try:
sock_stat = os.stat(self.monitor_filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
raise errors.HypervisorError("No monitor socket found")
else:
raise errors.HypervisorError("Error checking monitor socket: %s",
utils.ErrnoOrStr(err))
if not stat.S_ISSOCK(sock_stat.st_mode):
raise errors.HypervisorError("Monitor socket is not a socket")
def _check_connection(self):
"""Make sure that the connection is established.
"""
if not self._connected:
raise errors.ProgrammerError("To use a MonitorSocket you need to first"
" invoke connect() on it")
def connect(self):
"""Connect to the monitor socket if not already connected.
"""
if not self._connected:
self._connect()
def is_connected(self):
"""Return whether there is a connection to the socket or not.
"""
return self._connected
def _connect(self):
"""Connects to the monitor.
Connects to the UNIX socket
@raise errors.HypervisorError: when there are communication errors
"""
if self._connected:
raise errors.ProgrammerError("Cannot connect twice")
self._check_socket()
# Check file existance/stuff
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# We want to fail if the server doesn't send a complete message
# in a reasonable amount of time
self.sock.settimeout(self._SOCKET_TIMEOUT)
self.sock.connect(self.monitor_filename)
except EnvironmentError:
raise errors.HypervisorError("Can't connect to qmp socket")
self._connected = True
def close(self):
"""Closes the socket
It cannot be used after this call.
"""
if self._connected:
self._close()
def _close(self):
self.sock.close()
self._connected = False
def _ensure_connection(fn):
"""Decorator that wraps MonitorSocket external methods"""
def wrapper(*args, **kwargs):
"""Ensure proper connect/close and exception propagation"""
mon = args[0]
already_connected = mon.is_connected()
mon.connect()
try:
ret = fn(*args, **kwargs)
finally:
# In general this decorator wraps external methods.
# Here we close the connection only if we initiated it before,
# to protect us from using the socket after closing it
# in case we invoke a decorated method internally by accident.
if not already_connected:
mon.close()
return ret
return wrapper
class QmpConnection(MonitorSocket):
"""Connection to the QEMU Monitor using the QEMU Monitor Protocol (QMP).
"""
_FIRST_MESSAGE_KEY = "QMP"
_EVENT_KEY = "event"
_ERROR_KEY = "error"
_RETURN_KEY = "return"
_ACTUAL_KEY = ACTUAL_KEY = "actual"
_ERROR_CLASS_KEY = "class"
_ERROR_DESC_KEY = "desc"
_EXECUTE_KEY = "execute"
_ARGUMENTS_KEY = "arguments"
_VERSION_KEY = "version"
_PACKAGE_KEY = "package"
_QEMU_KEY = "qemu"
_CAPABILITIES_COMMAND = "qmp_capabilities"
_QUERY_COMMANDS = "query-commands"
_MESSAGE_END_TOKEN = "\r\n"
# List of valid attributes for the device_add QMP command.
# Extra attributes found in device's hvinfo will be ignored.
_DEVICE_ATTRIBUTES = [
"driver", "id", "bus", "addr", "channel", "scsi-id", "lun"
]
def __init__(self, monitor_filename):
super(QmpConnection, self).__init__(monitor_filename)
self._buf = ""
self.supported_commands = None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def connect(self):
"""Connects to the QMP monitor.
Connects to the UNIX socket and makes sure that we can actually send and
receive data to the kvm instance via QMP.
@raise errors.HypervisorError: when there are communication errors
@raise errors.ProgrammerError: when there are data serialization errors
"""
super(QmpConnection, self).connect()
# Check if we receive a correct greeting message from the server
# (As per the QEMU Protocol Specification 0.1 - section 2.2)
greeting = self._Recv()
if not greeting[self._FIRST_MESSAGE_KEY]:
self._connected = False
raise errors.HypervisorError("kvm: QMP communication error (wrong"
" server greeting")
# Extract the version info from the greeting and make it available to users
# of the monitor.
version_info = greeting[self._FIRST_MESSAGE_KEY][self._VERSION_KEY]
self.version = (version_info[self._QEMU_KEY]["major"],
version_info[self._QEMU_KEY]["minor"],
version_info[self._QEMU_KEY]["micro"])
self.package = version_info[self._PACKAGE_KEY].strip()
# This is needed because QMP can return more than one greetings
# see https://groups.google.com/d/msg/ganeti-devel/gZYcvHKDooU/SnukC8dgS5AJ
self._buf = ""
# Let's put the monitor in command mode using the qmp_capabilities
# command, or else no command will be executable.
# (As per the QEMU Protocol Specification 0.1 - section 4)
self.Execute(self._CAPABILITIES_COMMAND)
self.supported_commands = self._GetSupportedCommands()
def _ParseMessage(self, buf):
"""Extract and parse a QMP message from the given buffer.
Seeks for a QMP message in the given buf. If found, it parses it and
returns it together with the rest of the characters in the buf.
If no message is found, returns None and the whole buffer.
@raise errors.ProgrammerError: when there are data serialization errors
"""
message = None
# Check if we got the message end token (CRLF, as per the QEMU Protocol
# Specification 0.1 - Section 2.1.1)
pos = buf.find(self._MESSAGE_END_TOKEN)
if pos >= 0:
try:
message = QmpMessage.BuildFromJsonString(buf[:pos + 1])
except Exception, err:
raise errors.ProgrammerError("QMP data serialization error: %s" % err)
buf = buf[pos + 1:]
return (message, buf)
def _Recv(self):
"""Receives a message from QMP and decodes the received JSON object.
@rtype: QmpMessage
@return: the received message
@raise errors.HypervisorError: when there are communication errors
@raise errors.ProgrammerError: when there are data serialization errors
"""
self._check_connection()
# Check if there is already a message in the buffer
(message, self._buf) = self._ParseMessage(self._buf)
if message:
return message
recv_buffer = StringIO.StringIO(self._buf)
recv_buffer.seek(len(self._buf))
try:
while True:
data = self.sock.recv(4096)
if not data:
break
recv_buffer.write(data)
(message, self._buf) = self._ParseMessage(recv_buffer.getvalue())
if message:
return message
except socket.timeout, err:
raise errors.HypervisorError("Timeout while receiving a QMP message: "
"%s" % (err))
except socket.error, err:
raise errors.HypervisorError("Unable to receive data from KVM using the"
" QMP protocol: %s" % err)
def _Send(self, message):
"""Encodes and sends a message to KVM using QMP.
@type message: QmpMessage
@param message: message to send to KVM
@raise errors.HypervisorError: when there are communication errors
@raise errors.ProgrammerError: when there are data serialization errors
"""
self._check_connection()
try:
message_str = str(message)
except Exception, err:
raise errors.ProgrammerError("QMP data deserialization error: %s" % err)
try:
self.sock.sendall(message_str)
except socket.timeout, err:
raise errors.HypervisorError("Timeout while sending a QMP message: "
"%s" % err)
except socket.error, err:
raise errors.HypervisorError("Unable to send data from KVM using the"
" QMP protocol: %s" % err)
def _GetSupportedCommands(self):
"""Update the list of supported commands.
"""
result = self.Execute(self._QUERY_COMMANDS)
return frozenset(com["name"] for com in result)
def Execute(self, command, arguments=None):
"""Executes a QMP command and returns the response of the server.
@type command: str
@param command: the command to execute
@type arguments: dict
@param arguments: dictionary of arguments to be passed to the command
@rtype: dict
@return: dictionary representing the received JSON object
@raise errors.HypervisorError: when there are communication errors
@raise errors.ProgrammerError: when there are data serialization errors
"""
self._check_connection()
# During the first calls of Execute, the list of supported commands has not
# yet been populated, so we can't use it.
if (self.supported_commands is not None and
command not in self.supported_commands):
raise QmpCommandNotSupported("Instance does not support the '%s'"
" QMP command." % command)
message = QmpMessage({self._EXECUTE_KEY: command})
if arguments:
message[self._ARGUMENTS_KEY] = arguments
self._Send(message)
ret = self._GetResponse(command)
# log important qmp commands..
if command not in [self._QUERY_COMMANDS, self._CAPABILITIES_COMMAND]:
logging.debug("QMP %s %s: %s\n", command, arguments, ret)
return ret
def _GetResponse(self, command):
"""Parse the QMP response
If error key found in the response message raise HypervisorError.
Ignore any async event and thus return the response message
related to command.
"""
# According the the QMP specification, there are only two reply types to a
# command: either error (containing the "error" key) or success (containing
# the "return" key). There is also a third possibility, that of an
# (unrelated to the command) asynchronous event notification, identified by
# the "event" key.
while True:
response = self._Recv()
err = response[self._ERROR_KEY]
if err:
raise errors.HypervisorError("kvm: error executing the %s"
" command: %s (%s):" %
(command,
err[self._ERROR_DESC_KEY],
err[self._ERROR_CLASS_KEY]))
elif response[self._EVENT_KEY]:
# Filter-out any asynchronous events
continue
return response[self._RETURN_KEY]
def _filter_hvinfo(self, hvinfo):
"""Filter non valid keys of the device's hvinfo (if any)."""
ret = {}
for k in self._DEVICE_ATTRIBUTES:
if k in hvinfo:
ret[k] = hvinfo[k]
return ret
@_ensure_connection
def HotAddNic(self, nic, devid, tapfds=None, vhostfds=None, features=None):
"""Hot-add a NIC
First pass the tapfds, then netdev_add and then device_add
"""
if tapfds is None:
tapfds = []
if vhostfds is None:
vhostfds = []
if features is None:
features = {}
enable_vhost = features.get("vhost", False)
enable_mq, virtio_net_queues = features.get("mq", (False, 1))
fdnames = []
for i, fd in enumerate(tapfds):
fdname = "%s-%d" % (devid, i)
self._GetFd(fd, fdname)
fdnames.append(fdname)
arguments = {
"type": "tap",
"id": devid,
"fds": ":".join(fdnames),
}
if enable_vhost:
fdnames = []
for i, fd in enumerate(vhostfds):
fdname = "%s-vhost-%d" % (devid, i)
self._GetFd(fd, fdname)
fdnames.append(fdname)
arguments.update({
"vhost": "on",
"vhostfds": ":".join(fdnames),
})
self.Execute("netdev_add", arguments)
arguments = {
"netdev": devid,
"mac": nic.mac,
}
# Note that hvinfo that _GenerateDeviceHVInfo() creates
# sould include *only* the driver, id, bus, and addr keys
arguments.update(self._filter_hvinfo(nic.hvinfo))
if enable_mq:
arguments.update({
"mq": "on",
"vectors": (2 * virtio_net_queues + 1),
})
self.Execute("device_add", arguments)
@_ensure_connection
def HotDelNic(self, devid):
"""Hot-del a NIC
"""
self.Execute("device_del", {"id": devid})
self.Execute("netdev_del", {"id": devid})
@_ensure_connection
def HotAddDisk(self, disk, devid, uri, drive_add_fn=None):
"""Hot-add a disk
Try opening the device to obtain a fd and pass it with SCM_RIGHTS. This
will be omitted in case of userspace access mode (open will fail).
Then use blockdev-add QMP command or drive_add_fn() callback if any.
The add the guest device.
"""
if os.path.exists(uri):
fd = os.open(uri, os.O_RDWR)
fdset = self._AddFd(fd)
os.close(fd)
filename = "/dev/fdset/%s" % fdset
else:
# The uri is not a file.
# This can happen if a userspace uri is provided.
filename = uri
fdset = None
# FIXME: Use blockdev-add/blockdev-del when properly implemented in QEMU.
# This is an ugly hack to work around QEMU commits 48f364dd and da2cf4e8:
# * HMP's drive_del is not supported any more on a drive added
# via QMP's blockdev-add
# * Stay away from immature blockdev-add unless you want to help
# with development.
# Using drive_add here must be done via a callback due to the fact that if
# a QMP connection terminates before a drive keeps a reference to the fd
# passed via the add-fd QMP command, then the fd gets closed and
# cannot be used later.
if drive_add_fn:
drive_add_fn(filename)
else:
arguments = {
"options": {
"driver": "raw",
"id": devid,
"file": {
"driver": "file",
"filename": filename,
}
}
}
self.Execute("blockdev-add", arguments)
if fdset is not None:
self._RemoveFdset(fdset)
arguments = {
"drive": devid,
}
# Note that hvinfo that _GenerateDeviceHVInfo() creates
# sould include *only* the driver, id, bus, and
# addr or channel, scsi-id, and lun keys
arguments.update(self._filter_hvinfo(disk.hvinfo))
self.Execute("device_add", arguments)
@_ensure_connection
def HotDelDisk(self, devid):
"""Hot-del a Disk
Note that drive_del is not supported yet in qmp and thus should
be invoked from HMP.
"""
self.Execute("device_del", {"id": devid})
#TODO: uncomment when drive_del gets implemented in upstream qemu
# self.Execute("drive_del", {"id": devid})
def _GetPCIDevices(self):
"""Get the devices of the first PCI bus of a running instance.
"""
self._check_connection()
pci = self.Execute("query-pci")
bus = pci[0]
devices = bus["devices"]
return devices
def _HasPCIDevice(self, devid):
"""Check if a specific device ID exists on the PCI bus.
"""
for d in self._GetPCIDevices():
if d["qdev_id"] == devid:
return True
return False
def _GetBlockDevices(self):
"""Get the block devices of a running instance.
The query-block QMP command returns a list of dictionaries
including information for each virtual disk. For example:
[{"device": "disk-049f140d", "inserted": {"file": ..., "image": ...}}]
@rtype: list of dicts
@return: Info about the virtual disks of the instance.
"""
self._check_connection()
devices = self.Execute("query-block")
return devices
def _HasBlockDevice(self, devid):
"""Check if a specific device ID exists among block devices.
"""
for d in self._GetBlockDevices():
if d["device"] == devid:
return True
return False
@_ensure_connection
def HasDevice(self, devid):
"""Check if a specific device exists or not on a running instance.
It first checks the PCI devices and then the block devices.
"""
if (self._HasPCIDevice(devid) or self._HasBlockDevice(devid)):
return True
return False
@_ensure_connection
def GetFreePCISlot(self):
"""Get the first available PCI slot of a running instance.
"""
slots = bitarray(constants.QEMU_PCI_SLOTS)
slots.setall(False) # pylint: disable=E1101
for d in self._GetPCIDevices():
slot = d["slot"]
slots[slot] = True
return utils.GetFreeSlot(slots)
@_ensure_connection
def CheckDiskHotAddSupport(self):
"""Check if disk hotplug is possible
Hotplug is *not* supported in case:
- fdsend module is missing
- add-fd and blockdev-add qmp commands are not supported
"""
def _raise(reason):
raise errors.HotplugError("Cannot hot-add disk: %s." % reason)
if not fdsend:
_raise("fdsend python module is missing")
if "add-fd" not in self.supported_commands:
_raise("add-fd qmp command is not supported")
if "blockdev-add" not in self.supported_commands:
_raise("blockdev-add qmp command is not supported")
@_ensure_connection
def CheckNicHotAddSupport(self):
"""Check if NIC hotplug is possible
Hotplug is *not* supported in case:
- fdsend module is missing
- getfd and netdev_add qmp commands are not supported
"""
def _raise(reason):
raise errors.HotplugError("Cannot hot-add NIC: %s." % reason)
if not fdsend:
_raise("fdsend python module is missing")
if "getfd" not in self.supported_commands:
_raise("getfd qmp command is not supported")
if "netdev_add" not in self.supported_commands:
_raise("netdev_add qmp command is not supported")
def _GetFd(self, fd, fdname):
"""Wrapper around the getfd qmp command
Use fdsend to send an fd to a running process via SCM_RIGHTS and then use
the getfd qmp command to name it properly so that it can be used
later by NIC hotplugging.
@type fd: int
@param fd: The file descriptor to pass
@raise errors.HypervisorError: If getfd fails for some reason
"""
self._check_connection()
try:
fdsend.sendfds(self.sock, " ", fds=[fd])
arguments = {
"fdname": fdname,
}
self.Execute("getfd", arguments)
except errors.HypervisorError, err:
logging.info("Passing fd %s via SCM_RIGHTS failed: %s", fd, err)
raise
def _AddFd(self, fd):
"""Wrapper around add-fd qmp command
Use fdsend to send fd to a running process via SCM_RIGHTS and then add-fd
qmp command to add it to an fdset so that it can be used later by
disk hotplugging.
@type fd: int
@param fd: The file descriptor to pass
@return: The fdset ID that the fd has been added to
@raise errors.HypervisorError: If add-fd fails for some reason
"""
self._check_connection()
try:
fdsend.sendfds(self.sock, " ", fds=[fd])
# Omit fdset-id and let qemu create a new one (see qmp-commands.hx)
response = self.Execute("add-fd")
fdset = response["fdset-id"]
except errors.HypervisorError, err:
logging.info("Passing fd %s via SCM_RIGHTS failed: %s", fd, err)
raise
return fdset
def _RemoveFdset(self, fdset):
"""Wrapper around remove-fd qmp command
Remove the file descriptor previously passed. After qemu has dup'd the fd
(e.g. during disk hotplug), it can be safely removed.
"""
self._check_connection()
# Omit the fd to cleanup all fds in the fdset (see qemu/qmp-commands.hx)
try:
self.Execute("remove-fd", {"fdset-id": fdset})
except errors.HypervisorError, err:
# There is no big deal if we cannot remove an fdset. This cleanup here is
# done on a best effort basis. Upon next hot-add a new fdset will be
# created. If we raise an exception here, that is after drive_add has
# succeeded, the whole hot-add action will fail and the runtime file will
# not be updated which will make the instance non migrate-able
logging.info("Removing fdset with id %s failed: %s", fdset, err)
|
py | 1a356040f237813893eebc7488a2e030e61358bd | import os
print('Hello World!')
print('I just entered the branch \'develop\'')
x = 1
if -1 != x:
print('Neni rovno!!!')
print('Nachazim se v novem developu, ten stary uz jsem smazal!')
print('Neco jsem dodelat, ale jeste nepridal')
folder = 'C:/Users/Vojta/Documents/git/repo-try-out'
files_dirs = os.listdir(folder)
print(files_dirs)
for file_dir in range(len(files_dirs)):
if not os.path.isdir(os.path.join(folder, str(file_dir))):
print('Je to file!')
else:
print('Neni to file, je to folder!') |
py | 1a356068b5a2fe235be268cce3bee1ecb0287c18 | import os
import math
import random
import librosa
import warnings
import subprocess
import numpy as np
from pathlib import Path
from random import random
# "contextlib" module provides utilities for working with context managers and "with" statements.
# - "contextmanager" from "contextlib" is a decorator that manages resources.
from contextlib import contextmanager
# "kornia" is a python package for Computer Vision.
# - "filter2D" from "kornia" applies a 2D kernel to a tensor.
from kornia.filters import filter2d
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
# Helper Classes and Functions
# calculates exponential moving averages
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if not exists(old):
return new
return old * self.beta + (1 - self.beta) * new
# residual connection
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
# channel normalization
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = ChanNorm(dim)
def forward(self, x):
return self.fn(self.norm(x))
class ChanNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
# flattens a tensor
class Flatten(nn.Module):
def forward(self, x):
return x.reshape(x.shape[0], -1)
# applies a function at random
class RandomApply(nn.Module):
def __init__(self, prob, fn, fn_else = lambda x: x):
super().__init__()
self.fn = fn
self.fn_else = fn_else
self.prob = prob
def forward(self, x):
fn = self.fn if random() < self.prob else self.fn_else
return fn(x)
# applies a 2nd order binominal filter for bilinear sampling
class Blur(nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer('f', f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f [None, :, None]
return filter2d(x, f, normalized=True)
# checks if the given item exists
def exists(val):
return val is not None
# null context
@contextmanager
def null_context():
yield
# returns default value if the given value does not exist
def default(value, d):
return value if exists(value) else d
# returns items from iterable
def cycle(iterable):
while True:
for i in iterable:
yield i
# casts to list
def cast_list(el):
return el if isinstance(el, list) else [el]
# checks if tensor is empty
def is_empty(t):
if isinstance(t, torch.Tensor):
return t.nelement() == 0
return not exists(t)
# raises Nan exception
def raise_if_nan(t):
if torch.isnan(t):
raise ValueError("")
# accumulates contexts periodically
def gradient_accumulate_contexts(gradient_accumulate_every):
contexts = [null_context] * gradient_accumulate_every
for context in contexts:
with context():
yield
# loss is propagated backwards
def loss_backwards(loss, **kwargs):
loss.backward(**kwargs)
# Gradient Penalty
# ----------------
# - Applies gradient penalty to ensure stability in GAN training by preventing exploding gradients in the discriminator.
# - Read about it at https://arxiv.org/pdf/1704.00028.pdf
# - Watch about it at https://www.youtube.com/watch?v=5c57gnaPkA4
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(outputs=output, inputs=images,
grad_outputs=torch.ones(output.size(), device=images.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(batch_size, -1)
return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
# calculates perceptual path length to achieve feature disentanglement by determining the difference between successive images when interpolating between two noise inputs
def calc_pl_lengths(styles, images):
device = images.device
num_pixels = images.shape[2] * images.shape[3]
pl_noise = torch.randn(images.shape, device=device) / math.sqrt(num_pixels)
outputs = (images * pl_noise).sum()
pl_grads = torch_grad(outputs=outputs, inputs=styles,
grad_outputs=torch.ones(outputs.shape, device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
return (pl_grads ** 2).sum(dim=2).mean(dim=1).sqrt()
# returns random noise [N, latent_dim]
def noise(n, latent_dim, device):
return torch.randn(n, latent_dim).to(device)
# returns a list of noise and layers [(N, latent_dim), num_layers]
def noise_list(n, layers, latent_dim, device):
return [(noise(n, latent_dim, device), layers)]
# returns a list of mixed noise generated at random and layers like the previous function
def mixed_list(n, layers, latent_dim, device):
tt = int(torch.rand(()).numpy() * layers)
return noise_list(n, tt, latent_dim, device) + noise_list(n, layers - tt, latent_dim, device)
# returns the style vector after passing latents through the Style Vectorizer or Mapping Network as referred to in the research paper
def latent_to_w(style_vectorizer, latent_descr):
return [(style_vectorizer(z), num_layers) for z, num_layers in latent_descr]
# returns uniformly distributed noise [N, H, W, C]
def image_noise(n, im_size, device):
return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).to(device)
# Leaky ReLU
# ----------
# - Leaky ReLU is an activation function that fixes the "dyling ReLU" problem - max(0.1x, x)
# - Read about it at https://towardsdatascience.com/the-dying-relu-problem-clearly-explained-42d0c54e0d24#0863
# - Watch a video about it at https://www.youtube.com/watch?v=Y-ruNSdpZ0Q
def leaky_relu(p=0.2):
return nn.LeakyReLU(p, inplace=True)
# model evalutes the batches in chunks
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
# concatenates all styles [(N, latent_dim), num_layers] --> [N, num_layers, latent_dim]
def styles_def_to_tensor(styles_def):
return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)
# Spherical Linear Interpolation (SLERP)
# --------------------------------------
# - Spherical Linear Interpolation is a type of interpolation between two points on an arc.
# - Read more about at https://en.wikipedia.org/wiki/Slerp
# - Watch a video about interpolations at https://www.youtube.com/watch?v=ibkT5ao8kGY
def slerp(val, low, high):
low_norm = low / torch.norm(low, dim=1, keepdim=True)
high_norm = high / torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
return res
# Losses
# Hinge Loss
# ----------
# - Hinge Loss is usually used to train classifiers especially Support Vector Machines (SVMs).
# - Read about at https://towardsdatascience.com/a-definitive-explanation-to-hinge-loss-for-support-vector-machines-ab6d8d3178f1
# - Watch a video about it at https://www.youtube.com/watch?v=RBtgpKmdBlk
# Hinge loss for generator
def gen_hinge_loss(fake, real):
return fake.mean()
# Hinge loss for discriminator
def hinge_loss(real, fake):
return (F.relu(1 + real) + F.relu(1 - fake)).mean()
# Sync Audio
# extract and store audio from video file
def extract_audio(uploaded_file):
base_dir = Path(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])
audio_dir = base_dir / '.audio'
(audio_dir).mkdir(parents=True, exist_ok=True)
video_path = os.path.join(audio_dir, uploaded_file.name)
audio_path = os.path.join(audio_dir, uploaded_file.name[:-4] + '.mp3')
if not os.path.exists(audio_path):
if uploaded_file.name[-4:] == '.mp4':
with open(video_path, "wb") as f:
f.write(uploaded_file.getbuffer())
subprocess.call(['ffmpeg',
'-i', video_path,
'-f', 'mp3',
'-ab', '192000',
'-vn', audio_path])
else:
with open(audio_path, "wb") as f:
f.write(uploaded_file.getbuffer())
return audio_path
# extracts features from audio
def audio_features(file_path):
#ignore warnings
warnings.filterwarnings('ignore')
# frames per second
fps = 24
# load audio file
x, sr = librosa.load(file_path)
# duration
duration = x.shape[0] / sr
# number of frames
num_frames = int(duration * fps)
# samples per frames
samples_per_frame = x.shape[0] / num_frames
# final audio vector
track = np.zeros(num_frames, dtype=x.dtype)
for frame_num in range(num_frames):
start = int(frame_num * samples_per_frame)
end = int((frame_num + 1) * samples_per_frame)
track[frame_num] = x[start : end].max(axis=0)
track /= track.max()
return track
|
py | 1a35608d2ec76698adf0dacfcd4d360842ab94c9 | import asyncio
import re
import subprocess
import time
from dataclasses import replace
from pathlib import Path
from typing import Any, AsyncIterator, Set
from uuid import uuid4 as uuid
import aiodocker
import pytest
from yarl import URL
from neuro_sdk import CONFIG_ENV_NAME, DEFAULT_CONFIG_PATH, JobStatus
from tests.e2e import Helper, make_image_name
def parse_docker_ls_output(docker_ls_output: Any) -> Set[str]:
return {
repo_tag
for info in docker_ls_output
if info["RepoTags"] is not None
for repo_tag in info["RepoTags"]
if repo_tag
}
@pytest.fixture()
def tag() -> str:
return str(uuid())
async def generate_image(docker: aiodocker.Docker, tag: str) -> str:
name = make_image_name()
image_archive = Path(__file__).parent / "assets/echo-tag.tar"
# TODO use random image name here
image_name = f"{name}:{tag}"
with image_archive.open(mode="r+b") as fileobj:
await docker.images.build(
fileobj=fileobj, tag=image_name, buildargs={"TAG": tag}, encoding="identity"
)
return image_name
@pytest.fixture()
async def image(docker: aiodocker.Docker, tag: str) -> AsyncIterator[str]:
image = await generate_image(docker, tag)
yield image
await docker.images.delete(image, force=True)
@pytest.mark.e2e
def test_images_complete_lifecycle(
helper: Helper,
image: str,
tag: str,
event_loop: asyncio.AbstractEventLoop,
docker: aiodocker.Docker,
) -> None:
# Let`s push image
captured = helper.run_cli(["image", "push", image])
# stderr has "Used image ..." lines
# assert not captured.err
image_full_str = f"image://{helper.cluster_name}/{helper.username}/{image}"
assert captured.out.endswith(image_full_str)
image_url = URL(image_full_str)
# Check if image available on registry
image_full_str = f"image://{helper.cluster_name}/{helper.username}/{image}"
image_short_str = f"image:{image}"
assert captured.out.endswith(image_full_str)
image_full_str_no_tag = image_full_str.replace(f":{tag}", "")
image_short_str_no_tag = image_short_str.replace(f":{tag}", "")
# check ls short mode
captured = helper.run_cli(["image", "ls"])
assert image_short_str_no_tag in [
line.strip() for line in captured.out.splitlines()
]
captured = helper.run_cli(["image", "ls", "--full-uri"])
assert image_full_str_no_tag in [line.strip() for line in captured.out.splitlines()]
# check ls long mode
captured = helper.run_cli(["image", "ls", "-l"])
for line in captured.out.splitlines():
if image_short_str_no_tag in line:
break
else:
assert False, f"Not found {image_short_str_no_tag} in {captured.out}"
# delete local
event_loop.run_until_complete(docker.images.delete(image, force=True))
docker_ls_output = event_loop.run_until_complete(docker.images.list())
local_images = parse_docker_ls_output(docker_ls_output)
assert image not in local_images
# Pull image as with another tag
captured = helper.run_cli(["image", "pull", f"image:{image}"])
# stderr has "Used image ..." lines
# assert not captured.err
assert captured.out.endswith(image)
# check pulled locally, delete for cleanup
docker_ls_output = event_loop.run_until_complete(docker.images.list())
local_images = parse_docker_ls_output(docker_ls_output)
assert image in local_images
# Execute image and check result
captured = helper.run_cli(["-q", "run", "--no-wait-start", str(image_url)])
assert not captured.err
job_id = captured.out
assert job_id.startswith("job-")
helper.wait_job_change_state_to(job_id, JobStatus.SUCCEEDED, JobStatus.FAILED)
helper.check_job_output(job_id, re.escape(tag))
@pytest.mark.e2e
def test_image_tags(helper: Helper, image: str, tag: str) -> None:
# push image
captured = helper.run_cli(["image", "push", image])
image_full_str = f"image://{helper.cluster_name}/{helper.username}/{image}"
assert captured.out.endswith(image_full_str)
image_full_str_no_tag = image_full_str.replace(f":{tag}", "")
delay = 0
t0 = time.time()
while time.time() - t0 < 600:
time.sleep(delay)
# check the tag is present now
try:
captured = helper.run_cli(
["image", "tags", image_full_str_no_tag], timeout=300
)
except subprocess.TimeoutExpired:
continue
if tag in map(lambda s: s.strip(), captured.out.splitlines()):
break
# Give a chance to sync remote registries
delay = min(delay * 2 + 1, 15)
else:
raise AssertionError(
f"Delay is reached on waiting for tag {tag} in {captured.out}"
)
cmd = f"neuro image tags {image_full_str}"
result = subprocess.run(cmd, capture_output=True, shell=True)
assertion_msg = f"Command {cmd} should fail: {result.stdout!r} {result.stderr!r}"
assert result.returncode, assertion_msg
image_full_str_latest_tag = image_full_str.replace(f":{tag}", ":latest")
cmd = f"neuro image tags {image_full_str_latest_tag}"
result = subprocess.run(cmd, capture_output=True, shell=True)
assertion_msg = f"Command {cmd} should fail: {result.stdout!r} {result.stderr!r}"
assert result.returncode, assertion_msg
@pytest.mark.e2e
async def test_images_delete(
helper: Helper,
docker: aiodocker.Docker,
) -> None:
image_ref = await generate_image(docker, tag="latest")
name, _ = image_ref.split(":")
img_name = f"image:{name}"
helper.run_cli(["image", "push", name + ":latest"])
captured = helper.run_cli(["-q", "image", "ls"])
assert img_name in captured.out
helper.run_cli(["image", "rm", img_name])
for _ in range(10):
captured = helper.run_cli(["-q", "image", "ls"])
if img_name in captured.out:
time.sleep(5)
else:
break
assert img_name not in captured.out
@pytest.mark.e2e
async def test_images_push_with_specified_name(
helper: Helper,
image: str,
tag: str,
event_loop: asyncio.AbstractEventLoop,
docker: aiodocker.Docker,
) -> None:
# Let`s push image
image_no_tag = image.replace(f":{tag}", "")
pushed_no_tag = f"{image_no_tag}-pushed"
pulled_no_tag = f"{image_no_tag}-pulled"
pulled = f"{pulled_no_tag}:{tag}"
captured = helper.run_cli(["image", "push", image, f"image:{pushed_no_tag}:{tag}"])
# stderr has "Used image ..." lines
# assert not captured.err
async with helper.client() as client:
image_pushed_full_str = (
f"image://{client.config.cluster_name}/"
f"{client.config.username}/{pushed_no_tag}:{tag}"
)
assert captured.out.endswith(image_pushed_full_str)
# Check if image available on registry
docker_ls_output = await docker.images.list()
local_images = parse_docker_ls_output(docker_ls_output)
assert pulled not in local_images
async with helper.client() as client:
image_pushed_full = client.parse.remote_image(image_pushed_full_str)
image_url_without_tag = replace(image_pushed_full, tag=None)
imgs = await client.images.list()
assert image_url_without_tag in imgs
# check locally
docker_ls_output = await docker.images.list()
local_images = parse_docker_ls_output(docker_ls_output)
assert pulled not in local_images
# Pull image as with another name
captured = helper.run_cli(["image", "pull", f"image:{pushed_no_tag}:{tag}", pulled])
# stderr has "Used image ..." lines
# assert not captured.err
assert captured.out.endswith(pulled)
# check locally
docker_ls_output = await docker.images.list()
local_images = parse_docker_ls_output(docker_ls_output)
assert pulled in local_images
# TODO (A.Yushkovskiy): delete the pushed image in GCR
# delete locally
await docker.images.delete(pulled, force=True)
@pytest.mark.e2e
def test_docker_helper(
helper: Helper, image: str, tag: str, nmrc_path: Path, monkeypatch: Any
) -> None:
monkeypatch.setenv(CONFIG_ENV_NAME, str(nmrc_path or DEFAULT_CONFIG_PATH))
helper.run_cli(["config", "docker"])
registry = helper.registry_url.host
username = helper.username
full_tag = f"{registry}/{username}/{image}"
tag_cmd = f"docker tag {image} {full_tag}"
result = subprocess.run(tag_cmd, capture_output=True, shell=True)
assert (
not result.returncode
), f"Command {tag_cmd} failed: {result.stdout!r} {result.stderr!r} "
push_cmd = f"docker push {full_tag}"
result = subprocess.run(push_cmd, capture_output=True, shell=True)
assert (
not result.returncode
), f"Command {push_cmd} failed: {result.stdout!r} {result.stderr!r} "
# Run image and check output
image_url = f"image://{helper.cluster_name}/{username}/{image}"
job_id = helper.run_job_and_wait_state(
image_url, "", wait_state=JobStatus.SUCCEEDED, stop_state=JobStatus.FAILED
)
helper.check_job_output(job_id, re.escape(tag))
|
py | 1a3560a4d5e1b5e8da576ec58ca3366fd66f128a | import numpy as np
import cv2
import os
def get_obs_preprocessor(observation_key, additional_keys, desired_goal_key):
def obs_processor(o):
obs = o[observation_key]
for additional_key in additional_keys:
obs = np.hstack((obs, o[additional_key]))
return np.hstack((obs, o[desired_goal_key]))
return obs_processor
def create_blank_image_directories(save_folder, epoch):
eval_blank_path = f"{save_folder}/epochs/{epoch}/eval_images_blank"
os.makedirs(eval_blank_path, exist_ok=True)
def create_real_corner_image_directories(save_folder, epoch):
real_corners_path = f"{save_folder}/epochs/{epoch}/real_corners_prediction"
os.makedirs(real_corners_path, exist_ok=True)
def create_real_corner_image_dump_directories(save_folder, prefix, epoch):
real_corners_path = f"{save_folder}/epochs/{epoch}/real_corners_dump/{prefix}"
os.makedirs(real_corners_path, exist_ok=True)
def save_blank_images(env, save_folder, epoch, step_number, aux_output):
corner_image, eval_image, cnn_color_image_full, cnn_color_image, cnn_image = env.capture_images(aux_output)
cv2.imwrite(f'{save_folder}/epochs/{epoch}/eval_images_blank/{str(step_number).zfill(3)}.png', eval_image)
def create_regular_image_directories(save_folder, prefix, epoch):
cnn_path = f"{save_folder}/epochs/{epoch}/{prefix}/cnn_images"
cnn_color_path = f"{save_folder}/epochs/{epoch}/{prefix}/cnn_color_images"
cnn_color_full_path = f"{save_folder}/epochs/{epoch}/{prefix}/cnn_color_full_images"
corners_path = f"{save_folder}/epochs/{epoch}/{prefix}/corners_images"
eval_path = f"{save_folder}/epochs/{epoch}/{prefix}/eval_images"
os.makedirs(cnn_path, exist_ok=True)
os.makedirs(cnn_color_path, exist_ok=True)
os.makedirs(cnn_color_full_path, exist_ok=True)
os.makedirs(corners_path, exist_ok=True)
os.makedirs(eval_path, exist_ok=True)
def create_base_epoch_directory(save_folder, epoch):
base_path = f"{save_folder}/epochs/{epoch}"
os.makedirs(base_path, exist_ok=True)
def save_regular_images(env, save_folder, prefix, epoch, step_number, aux_output):
corner_image, eval_image, cnn_color_image_full, cnn_color_image, cnn_image = env.capture_images(aux_output)
cv2.imwrite(f'{save_folder}/epochs/{epoch}/{prefix}/corners_images/{str(step_number).zfill(3)}.png', corner_image)
cv2.imwrite(f'{save_folder}/epochs/{epoch}/{prefix}/eval_images/{str(step_number).zfill(3)}.png', eval_image)
cv2.imwrite(f'{save_folder}/epochs/{epoch}/{prefix}/cnn_images/{str(step_number).zfill(3)}.png', cnn_image)
#TODO: save also these images
#cv2.imwrite(f'{save_folder}/epochs/{epoch}/{prefix}/cnn_color_images/{str(step_number).zfill(3)}.png', cnn_color_image)
#cv2.imwrite(f'{save_folder}/epochs/{epoch}/{prefix}/cnn_color_full_images/{str(step_number).zfill(3)}.png', cnn_color_image_full) |
py | 1a356251f24174a6f4a446d0fa2b13c561bce159 | import pytest
import os
import numpy as np
import pyscal.core as pc
import pyscal.crystal_structures as pcs
def test_q_9():
atoms, boxdims = pcs.make_crystal('bcc', repetitions = [4, 4, 4])
sys = pc.System()
sys.box = boxdims
sys.atoms = atoms
sys.find_neighbors(method = 'voronoi')
sys.calculate_q(9, averaged=True)
q = sys.get_qvals(9, averaged=True)
assert np.round(np.mean(np.array(q)), decimals=2) == 0.00
|
py | 1a3565839e5fcfe969dfdce4c68706475e1e1e46 | #!/usr/bin/env python3
import sys
import time
import serial
import minimalmodbus
SERIAL_PORT = '/dev/ttyUSB0'
SERIAL_SPEED = 9600
SERIAL_TIMEOUT = 0.5
SERIAL_PARITY = serial.PARITY_NONE
MODBUS_DEBUG = False
class SaimanEnergyMeter:
"""A simple class for Saiman Energy Meters (Дала СА4-Э720 П RS)"""
def __init__(self,
address,
serial_port=SERIAL_PORT,
serial_speed=SERIAL_SPEED,
serial_timeout=SERIAL_TIMEOUT,
serial_parity=SERIAL_PARITY,
debug=MODBUS_DEBUG):
self.address = address
self.serial_port = serial_port
self.serial_speed = serial_speed
self.serial_timeout = serial_timeout
self.serial_parity = serial_parity
self.debug = debug
self.configure()
self.conn_open()
self.count_energy()
self.conn_close()
def configure(self):
try:
self.instrument = minimalmodbus.Instrument(self.serial_port, self.address)
self.instrument.serial.baudrate = self.serial_speed
self.instrument.serial.timeout = self.serial_timeout
self.instrument.serial.parity = self.serial_parity
self.instrument.debug = self.debug
except Exception as e:
print(e)
sys.exit(1)
def conn_open(self):
try:
#self.instrument._performCommand(0x8, '\x00\x00\x00\x00')
#self.instrument._performCommand(0x44, '')
self.instrument._performCommand(0x41, '\x01\x31\x31\x31\x31\x31\x31')
time.sleep(self.serial_timeout)
except Exception as e:
print(e)
sys.exit(1)
def conn_close(self):
try:
self.instrument._performCommand(0x42, '')
time.sleep(self.serial_timeout)
except:
pass
def get_reg(self, payload):
try:
time.sleep(self.serial_timeout)
return self.instrument._performCommand(0x3, payload)
except:
return 0
def count_energy(self):
self.reg1 = self.get_reg('\x01\x20\x00\x0E')
self.reg2 = self.get_reg('\x0D\xA0\x00\x0E')
if self.reg1 == 0 and self.reg2 == 0:
print("No data")
sys.exit(1)
elif self.reg1 == 0:
self.reg1 = self.reg2
elif self.reg2 == 0:
self.reg2 = self.reg1
self.t1 = ( int(''.join(minimalmodbus._hexlify(self.reg1).split(' ')[1:6][::-1])) +
int(''.join(minimalmodbus._hexlify(self.reg2).split(' ')[1:6][::-1])) )/100
self.t2 = ( int(''.join(minimalmodbus._hexlify(self.reg1).split(' ')[6:11][::-1])) +
int(''.join(minimalmodbus._hexlify(self.reg2).split(' ')[6:11][::-1])) )/100
self.t3 = ( int(''.join(minimalmodbus._hexlify(self.reg1).split(' ')[11:16][::-1])) +
int(''.join(minimalmodbus._hexlify(self.reg2).split(' ')[11:16][::-1])) )/100
self.total_energy = self.t1 + self.t2 + self.t3
|
py | 1a3565fc650d328a9375720173ff2141ff253814 | from hub2hub import TechnicHub, ble_handler
from time import sleep_ms
# Initialize ble handler and a technic hub
ble = ble_handler()
Thub = TechnicHub(ble)
# connect to a technic hub: press green button on the technic hub
Thub.connect()
# Servo motor connected to port A
Motor = Thub.port.A.motor
# move to 180 degrees and hold
Motor.run_to_position(180,stop_action = 2)
sleep_ms(1000)
# move to 0 and float
Motor.run_to_position(0, stop_action = 0) |
py | 1a35662dfaa5b203f67ae10bec99a0e345c49dc3 | import logging
from ..ast import (
Assignment, BinaryOperator, Block, Function, IfStatement, Import,
Invocation, ListLiteral, NumericLiteral, Return, StringLiteral,
Symbol)
from .base import (
ParsingException, either, flat, joined, joined_skip, many, match, oneof,
p_regex, rstrip, strip, then, then_all, wrapped)
log = logging.getLogger(__name__)
def trace(p):
def t(text):
print('>>>>>', text.replace(' ','[ ]').replace('\n','\\n'))
return p(text)
return t
def dbg(p):
return p
class Parser:
def __init__(self, func, node):
self.func = func
self.node = node
def __call__(self, *args):
try:
result = self.func(*args)
except ParsingException as e:
log.error('syntax error, expected %s, got: %s' %
(e.expected, e.rest))
log.error('parser is: %s', e.parser)
return None
if not result:
#raise RuntimeError('could not parse %s' % self.node)
return None
if type(result[0]) == dict:
return self.node(**result[0]), result[1]
return self.node(result[0]), result[1]
def empty_line():
return p_regex('\\s*\n')
def symbol():
return rstrip(p_regex('[a-zA-Z_]+[a-zA-Z_\\d]*'))
def free_symbol():
return strip(p_regex('[a-zA-Z_]+[a-zA-Z_\\d]*'))
def dotted_name():
return Parser(
flat(joined(match('.'), symbol())),
Symbol)
def string_literal():
return either(p_regex('".*?"'), p_regex("'.*?'"))
def numeric_literal():
return p_regex(r'\d+(\.\d*)?')
def list_literal():
return wrapped(
match('['),
joined_skip(match(','), strip(expression())),
match(']'))
def literal():
return either(
Parser(string_literal(), StringLiteral),
Parser(numeric_literal(), NumericLiteral),
Parser(list_literal(), ListLiteral))
def atom():
return rstrip(either(dotted_name(), literal()))
def free_atom():
return strip(either(dotted_name(), literal()))
def indent(p):
def indent_parser(text):
split = text.split('\n')
matched = []
for i in range(len(split)):
line = split[i]
if line.startswith(' '):
matched.append(line[4:])
elif line:
# stop on the first non-empty line that's
# not properly indented
i -= 1
break
rest = '\n'.join(split[i+1:])
if matched:
r = p('\n'.join(matched))
return r[0], r[1] + rest
return None
return indent_parser
def arguments():
return wrapped(
match('('),
joined_skip(match(','), free_symbol()),
match(')'))
def invocation_arguments():
return wrapped(
match('('),
joined_skip(match(','), strip(expression())),
match(')'))
def invocation():
return Parser(then(
'func', dotted_name(),
'args', invocation_arguments()), Invocation)
def placeholder(f, z):
memo = {}
def load(*args):
return memo.setdefault((f,z), f(*z))(*args)
return load
def memoize(f):
def helper(*args):
return placeholder(f, args)
return helper
@memoize
def binary_operator():
return Parser(then(
'first', either(invocation(), atom()),
'operator', strip(oneof([
'+','-','*','/',
'==','!=', '>', '<'])),
'second', expression()), BinaryOperator)
@memoize
def expression():
return either(
binary_operator(),
invocation(),
atom())
def end_def():
return match(':\n')
def if_statement():
return Parser(then(
'_', match('if '),
'expression', expression(),
'_', end_def(),
'body', block()), IfStatement)
def assignment():
return Parser(then(
'dst', Parser(symbol(), Symbol),
'_', rstrip(match('=')),
'src', atom()), Assignment)
def return_statement():
return Parser(then(
'_', match('return '),
'result', expression()), Return)
@memoize
def statement():
return either(invocation(), assignment(),
if_statement(), return_statement())
def block():
return Parser(
then(
'body', indent(many(
rstrip(statement()))))
, Block)
def function(level):
return Parser(then_all(
'_', match('def '),
'name', symbol(),
'args', arguments(),
'_', end_def(),
'body', block()), Function)
def import_():
return Parser(
then_all(
'_', match('import '),
'name', dotted_name()), Import)
def definition(level=0):
return either(function(level))
def program():
return Parser(
then(
'body',
many(rstrip(either(
empty_line(),
import_(),
definition(),
statement())))),
Block)
|
py | 1a3566c6f150fa5130bf2a74b106b9199dca207f | import time
import logging
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
HANDLED_STR = ['Unhandled', 'Handled']
class LoggingMiddleware(BaseMiddleware):
def __init__(self, logger=__name__):
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
self.logger = logger
super(LoggingMiddleware, self).__init__()
def check_timeout(self, obj):
start = obj.conf.get('_start', None)
if start:
del obj.conf['_start']
return round((time.time() - start) * 1000)
return -1
async def on_pre_process_update(self, update: types.Update, data: dict):
update.conf['_start'] = time.time()
self.logger.debug(f"Received update [ID:{update.update_id}]")
async def on_post_process_update(self, update: types.Update, result, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(f"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)")
async def on_pre_process_message(self, message: types.Message, data: dict):
self.logger.info(f"Received message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]")
async def on_post_process_message(self, message: types.Message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]")
async def on_pre_process_edited_message(self, edited_message, data: dict):
self.logger.info(f"Received edited message [ID:{edited_message.message_id}] "
f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_post_process_edited_message(self, edited_message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"edited message [ID:{edited_message.message_id}] "
f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_pre_process_channel_post(self, channel_post: types.Message, data: dict):
self.logger.info(f"Received channel post [ID:{channel_post.message_id}] "
f"in channel [ID:{channel_post.chat.id}]")
async def on_post_process_channel_post(self, channel_post: types.Message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"channel post [ID:{channel_post.message_id}] "
f"in chat [{channel_post.chat.type}:{channel_post.chat.id}]")
async def on_pre_process_edited_channel_post(self, edited_channel_post: types.Message, data: dict):
self.logger.info(f"Received edited channel post [ID:{edited_channel_post.message_id}] "
f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_post_process_edited_channel_post(self, edited_channel_post: types.Message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"edited channel post [ID:{edited_channel_post.message_id}] "
f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_pre_process_inline_query(self, inline_query: types.InlineQuery, data: dict):
self.logger.info(f"Received inline query [ID:{inline_query.id}] "
f"from user [ID:{inline_query.from_user.id}]")
async def on_post_process_inline_query(self, inline_query: types.InlineQuery, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"inline query [ID:{inline_query.id}] "
f"from user [ID:{inline_query.from_user.id}]")
async def on_pre_process_chosen_inline_result(self, chosen_inline_result: types.ChosenInlineResult, data: dict):
self.logger.info(f"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
f"from user [ID:{chosen_inline_result.from_user.id}] "
f"result [ID:{chosen_inline_result.result_id}]")
async def on_post_process_chosen_inline_result(self, chosen_inline_result, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
f"from user [ID:{chosen_inline_result.from_user.id}] "
f"result [ID:{chosen_inline_result.result_id}]")
async def on_pre_process_callback_query(self, callback_query: types.CallbackQuery, data: dict):
if callback_query.message:
if callback_query.message.from_user:
self.logger.info(f"Received callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
f"from user [ID:{callback_query.message.from_user.id}]")
else:
self.logger.info(f"Received callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]")
else:
self.logger.info(f"Received callback query [ID:{callback_query.id}] "
f"from inline message [ID:{callback_query.inline_message_id}] "
f"from user [ID:{callback_query.from_user.id}]")
async def on_post_process_callback_query(self, callback_query, results, data: dict):
if callback_query.message:
if callback_query.message.from_user:
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
f"from user [ID:{callback_query.message.from_user.id}]")
else:
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]")
else:
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"callback query [ID:{callback_query.id}] "
f"from inline message [ID:{callback_query.inline_message_id}] "
f"from user [ID:{callback_query.from_user.id}]")
async def on_pre_process_shipping_query(self, shipping_query: types.ShippingQuery, data: dict):
self.logger.info(f"Received shipping query [ID:{shipping_query.id}] "
f"from user [ID:{shipping_query.from_user.id}]")
async def on_post_process_shipping_query(self, shipping_query, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"shipping query [ID:{shipping_query.id}] "
f"from user [ID:{shipping_query.from_user.id}]")
async def on_pre_process_pre_checkout_query(self, pre_checkout_query: types.PreCheckoutQuery, data: dict):
self.logger.info(f"Received pre-checkout query [ID:{pre_checkout_query.id}] "
f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_post_process_pre_checkout_query(self, pre_checkout_query, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"pre-checkout query [ID:{pre_checkout_query.id}] "
f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_pre_process_error(self, update, error, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(f"Process update [ID:{update.update_id}]: [failed] (in {timeout} ms)")
class LoggingFilter(logging.Filter):
"""
Extend LogRecord by data from Telegram Update object.
Can be used in logging config:
.. code-block: python3
'filters': {
'telegram': {
'()': LoggingFilter,
'include_content': True,
}
},
...
'handlers': {
'graypy': {
'()': GELFRabbitHandler,
'url': 'amqp://localhost:5672/',
'routing_key': '#',
'localname': 'testapp',
'filters': ['telegram']
},
},
"""
def __init__(self, name='', prefix='tg', include_content=False):
"""
:param name:
:param prefix: prefix for all records
:param include_content: pass into record all data from Update object
"""
super(LoggingFilter, self).__init__(name=name)
self.prefix = prefix
self.include_content = include_content
def filter(self, record: logging.LogRecord):
"""
Extend LogRecord by data from Telegram Update object.
:param record:
:return:
"""
update = types.Update.get_current(True)
if update:
for key, value in self.make_prefix(self.prefix, self.process_update(update)):
setattr(record, key, value)
return True
def process_update(self, update: types.Update):
"""
Parse Update object
:param update:
:return:
"""
yield 'update_id', update.update_id
if update.message:
yield 'update_type', 'message'
yield from self.process_message(update.message)
if update.edited_message:
yield 'update_type', 'edited_message'
yield from self.process_message(update.edited_message)
if update.channel_post:
yield 'update_type', 'channel_post'
yield from self.process_message(update.channel_post)
if update.edited_channel_post:
yield 'update_type', 'edited_channel_post'
yield from self.process_message(update.edited_channel_post)
if update.inline_query:
yield 'update_type', 'inline_query'
yield from self.process_inline_query(update.inline_query)
if update.chosen_inline_result:
yield 'update_type', 'chosen_inline_result'
yield from self.process_chosen_inline_result(update.chosen_inline_result)
if update.callback_query:
yield 'update_type', 'callback_query'
yield from self.process_callback_query(update.callback_query)
if update.shipping_query:
yield 'update_type', 'shipping_query'
yield from self.process_shipping_query(update.shipping_query)
if update.pre_checkout_query:
yield 'update_type', 'pre_checkout_query'
yield from self.process_pre_checkout_query(update.pre_checkout_query)
def make_prefix(self, prefix, iterable):
"""
Add prefix to the label
:param prefix:
:param iterable:
:return:
"""
if not prefix:
yield from iterable
for key, value in iterable:
yield f"{prefix}_{key}", value
def process_user(self, user: types.User):
"""
Generate user data
:param user:
:return:
"""
if not user:
return
yield 'user_id', user.id
if self.include_content:
yield 'user_full_name', user.full_name
if user.username:
yield 'user_name', f"@{user.username}"
def process_chat(self, chat: types.Chat):
"""
Generate chat data
:param chat:
:return:
"""
if not chat:
return
yield 'chat_id', chat.id
yield 'chat_type', chat.type
if self.include_content:
yield 'chat_title', chat.full_name
if chat.username:
yield 'chat_name', f"@{chat.username}"
def process_message(self, message: types.Message):
yield 'message_content_type', message.content_type
yield from self.process_user(message.from_user)
yield from self.process_chat(message.chat)
if not self.include_content:
return
if message.reply_to_message:
yield from self.make_prefix('reply_to', self.process_message(message.reply_to_message))
if message.forward_from:
yield from self.make_prefix('forward_from', self.process_user(message.forward_from))
if message.forward_from_chat:
yield from self.make_prefix('forward_from_chat', self.process_chat(message.forward_from_chat))
if message.forward_from_message_id:
yield 'message_forward_from_message_id', message.forward_from_message_id
if message.forward_date:
yield 'message_forward_date', message.forward_date
if message.edit_date:
yield 'message_edit_date', message.edit_date
if message.media_group_id:
yield 'message_media_group_id', message.media_group_id
if message.author_signature:
yield 'message_author_signature', message.author_signature
if message.text:
yield 'text', message.text or message.caption
yield 'html_text', message.html_text
elif message.audio:
yield 'audio', message.audio.file_id
elif message.animation:
yield 'animation', message.animation.file_id
elif message.document:
yield 'document', message.document.file_id
elif message.game:
yield 'game', message.game.title
elif message.photo:
yield 'photo', message.photo[-1].file_id
elif message.sticker:
yield 'sticker', message.sticker.file_id
elif message.video:
yield 'video', message.video.file_id
elif message.video_note:
yield 'video_note', message.video_note.file_id
elif message.voice:
yield 'voice', message.voice.file_id
elif message.contact:
yield 'contact_full_name', message.contact.full_name
yield 'contact_phone_number', message.contact.phone_number
elif message.venue:
yield 'venue_address', message.venue.address
yield 'location_latitude', message.venue.location.latitude
yield 'location_longitude', message.venue.location.longitude
elif message.location:
yield 'location_latitude', message.location.latitude
yield 'location_longitude', message.location.longitude
elif message.new_chat_members:
yield 'new_chat_members', [user.id for user in message.new_chat_members]
elif message.left_chat_member:
yield 'left_chat_member', [user.id for user in message.new_chat_members]
elif message.invoice:
yield 'invoice_title', message.invoice.title
yield 'invoice_description', message.invoice.description
yield 'invoice_start_parameter', message.invoice.start_parameter
yield 'invoice_currency', message.invoice.currency
yield 'invoice_total_amount', message.invoice.total_amount
elif message.successful_payment:
yield 'successful_payment_currency', message.successful_payment.currency
yield 'successful_payment_total_amount', message.successful_payment.total_amount
yield 'successful_payment_invoice_payload', message.successful_payment.invoice_payload
yield 'successful_payment_shipping_option_id', message.successful_payment.shipping_option_id
yield 'successful_payment_telegram_payment_charge_id', message.successful_payment.telegram_payment_charge_id
yield 'successful_payment_provider_payment_charge_id', message.successful_payment.provider_payment_charge_id
elif message.connected_website:
yield 'connected_website', message.connected_website
elif message.migrate_from_chat_id:
yield 'migrate_from_chat_id', message.migrate_from_chat_id
elif message.migrate_to_chat_id:
yield 'migrate_to_chat_id', message.migrate_to_chat_id
elif message.pinned_message:
yield from self.make_prefix('pinned_message', message.pinned_message)
elif message.new_chat_title:
yield 'new_chat_title', message.new_chat_title
elif message.new_chat_photo:
yield 'new_chat_photo', message.new_chat_photo[-1].file_id
# elif message.delete_chat_photo:
# yield 'delete_chat_photo', message.delete_chat_photo
# elif message.group_chat_created:
# yield 'group_chat_created', message.group_chat_created
# elif message.passport_data:
# yield 'passport_data', message.passport_data
def process_inline_query(self, inline_query: types.InlineQuery):
yield 'inline_query_id', inline_query.id
yield from self.process_user(inline_query.from_user)
if self.include_content:
yield 'inline_query_text', inline_query.query
if inline_query.location:
yield 'location_latitude', inline_query.location.latitude
yield 'location_longitude', inline_query.location.longitude
if inline_query.offset:
yield 'inline_query_offset', inline_query.offset
def process_chosen_inline_result(self, chosen_inline_result: types.ChosenInlineResult):
yield 'chosen_inline_result_id', chosen_inline_result.result_id
yield from self.process_user(chosen_inline_result.from_user)
if self.include_content:
yield 'inline_query_text', chosen_inline_result.query
if chosen_inline_result.location:
yield 'location_latitude', chosen_inline_result.location.latitude
yield 'location_longitude', chosen_inline_result.location.longitude
def process_callback_query(self, callback_query: types.CallbackQuery):
yield from self.process_user(callback_query.from_user)
yield 'callback_query_data', callback_query.data
if callback_query.message:
yield from self.make_prefix('callback_query_message', self.process_message(callback_query.message))
if callback_query.inline_message_id:
yield 'callback_query_inline_message_id', callback_query.inline_message_id
if callback_query.chat_instance:
yield 'callback_query_chat_instance', callback_query.chat_instance
if callback_query.game_short_name:
yield 'callback_query_game_short_name', callback_query.game_short_name
def process_shipping_query(self, shipping_query: types.ShippingQuery):
yield 'shipping_query_id', shipping_query.id
yield from self.process_user(shipping_query.from_user)
if self.include_content:
yield 'shipping_query_invoice_payload', shipping_query.invoice_payload
def process_pre_checkout_query(self, pre_checkout_query: types.PreCheckoutQuery):
yield 'pre_checkout_query_id', pre_checkout_query.id
yield from self.process_user(pre_checkout_query.from_user)
if self.include_content:
yield 'pre_checkout_query_currency', pre_checkout_query.currency
yield 'pre_checkout_query_total_amount', pre_checkout_query.total_amount
yield 'pre_checkout_query_invoice_payload', pre_checkout_query.invoice_payload
yield 'pre_checkout_query_shipping_option_id', pre_checkout_query.shipping_option_id
|
py | 1a35680acfe35c16ed1df2b1789465b14abbb70f | import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.inspection import permutation_importance
from sklearn import svm
from data import *
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import eli5
from eli5.sklearn import PermutationImportance
'''
Train discriminative classifiers and obtain most important covariates
'''
def get_top_featues(X, y, clf):
perm = PermutationImportance(clf, random_state=1).fit(X, y)
return np.argsort(-perm.feature_importances_)
def train_clf(X_train, y_train, n_cov, flag='lr'):
if flag == 'lr':
clf = LogisticRegression(penalty='l1', C=0.1, solver='saga')
clf.fit(X_train, y_train)
weights = np.abs(clf.coef_)
S = []
for w in weights:
S.append( (-w).argsort()[:n_cov] )
S = np.concatenate( S, axis=0 )
S = np.unique( S )
return clf, S
# if flag == 'rf':
# clf = RandomForestClassifier()
# clf.fit(X_train,y_train)
# result = permutation_importance(clf, X_train, y_train, n_repeats=10)
# sorted_idx = np.argsort(-result.importances_mean)
# S = sorted_idx[:n_cov]
# return clf, S
if flag == 'rf':
clf = RandomForestClassifier()
clf.fit(X_train,y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if flag == 'nn':
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(3, 2), random_state=1)
clf.fit(X_train,y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if flag == 'svm':
clf = svm.SVC(probability=True)
clf.fit(X_train, y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if flag == 'nb':
clf = BernoulliNB(alpha=1.0e-10)
clf.fit(X_train, y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if __name__ == '__main__':
X, y = get_spam_data("data/uciData.csv")
X_train, X_test, y_train, y_test = generate_train_test(X, y, q=0.3)
flag = 'svm'
clf, S = train_clf(X_train, y_train, 11, flag=flag)
#pr = clf.predict(X_test)
print( clf.predict_proba(X_test) )
#
|
py | 1a356818aeba3c4d3691da87db03d3435d07de44 | # Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rmf_adapter as adpt
import rmf_adapter.plan as plan
import rmf_adapter.schedule as schedule
from rmf_fleet_msgs.msg import DockSummary
import numpy as np
import threading
import math
import copy
import enum
import time
from datetime import timedelta
from .RobotClientAPI import RobotAPI
# States for RobotCommandHandle's state machine used when guiding robot along
# a new path
class RobotState(enum.IntEnum):
IDLE = 0
WAITING = 1
MOVING = 2
class RobotCommandHandle(adpt.RobotCommandHandle):
def __init__(self,
name,
config,
node,
graph,
vehicle_traits,
transforms,
map_name,
initial_waypoint,
initial_orientation,
charger_waypoint,
update_frequency,
adapter):
adpt.RobotCommandHandle.__init__(self)
self.name = name
self.config = config
self.node = node
self.graph = graph
self.vehicle_traits = vehicle_traits
self.transforms = transforms
self.map_name = map_name
self.initial_waypoint = initial_waypoint
self.initial_orientation = initial_orientation
# Get the index of the charger waypoint
waypoint = self.graph.find_waypoint(charger_waypoint)
assert waypoint, f"Charger waypoint {charger_waypoint} \
does not exist in the navigation graph"
self.charger_waypoint_index = waypoint.index
self.charger_is_set = False
self.update_frequency = update_frequency
self.update_handle = None # RobotUpdateHandle
self.battery_soc = 1.0
self.api = None
self.position = [] # (x,y,theta) in RMF coordinates (meters, radians)
self.initialized = False
self.state = RobotState.IDLE
self.dock_name = ""
self.adapter = adapter
self.requested_waypoints = [] # RMF Plan waypoints
self.remaining_waypoints = []
self.path_finished_callback = None
self.next_arrival_estimator = None
self.path_index = 0
self.docking_finished_callback = None
# RMF location trackers
self.last_known_lane_index = None
self.last_known_waypoint_index = None
# if robot is waiting at a waypoint. This is a Graph::Waypoint index
self.on_waypoint = None
# if robot is travelling on a lane. This is a Graph::Lane index
self.on_lane = None
self.target_waypoint = None # this is a Plan::Waypoint
# The graph index of the waypoint the robot is currently docking into
self.dock_waypoint_index = None
# Threading variables
self._lock = threading.Lock()
self._follow_path_thread = None
self._quit_path_event = threading.Event()
self._dock_thread = None
self._quit_dock_event = threading.Event()
# Establish connection with the robot
self.api = RobotAPI(
self.config['base_url'],
self.config['user'],
self.config['password'],
robot_name=name,
config=self.config,
vehicle_traits=vehicle_traits,
)
assert self.api.connected, "Unable to connect to Robot API server"
self.position = self.get_position() # RMF coordinates
assert len(
self.position) > 2, "Unable to get current location of the robot"
self.node.get_logger().info(
f"The robot is starting at: [{self.position[0]:.2f}, "
f"{self.position[1]:.2f}, {self.position[2]:.2f}]")
# Obtain StartSet for the robot
self.starts = []
time_now = self.adapter.now()
if (self.initial_waypoint is not None) and\
(self.initial_orientation is not None):
self.node.get_logger().info(
f"Using provided initial waypoint [{self.initial_waypoint}] "
f"and orientation [{self.initial_orientation:.2f}] to "
f"initialize starts for robot [{self.name}]")
# Get the waypoint index for initial_waypoint
initial_waypoint_index = self.graph.find_waypoint(
self.initial_waypoint).index
self.starts = [plan.Start(time_now,
initial_waypoint_index,
self.initial_orientation)]
else:
self.node.get_logger().info(
f"Running compute_plan_starts for robot:{self.name}")
self.starts = plan.compute_plan_starts(
self.graph,
self.map_name,
self.position,
time_now)
if self.starts is None or len(self.starts) == 0:
self.node.get_logger().error(
f"Unable to determine StartSet for {self.name}")
return
start = self.starts[0]
# Update tracking variables
if start.lane is not None: # If the robot is on a lane
self.last_known_lane_index = start.lane
self.on_lane = start.lane
self.last_known_waypoint_index = start.waypoint
else: # Otherwise, the robot is on a waypoint
self.last_known_waypoint_index = start.waypoint
self.on_waypoint = start.waypoint
self.state_update_timer = self.node.create_timer(
1.0 / self.update_frequency,
self.update)
self.initialized = True
def clear(self):
with self._lock:
self.requested_waypoints = []
self.remaining_waypoints = []
self.path_finished_callback = None
self.next_arrival_estimator = None
self.docking_finished_callback = None
self.state = RobotState.IDLE
def stop(self):
# Stop the robot. Tracking variables should remain unchanged.
while True:
self.node.get_logger().info("Requesting robot to stop...")
if self.api.stop():
break
time.sleep(1.0)
if self._follow_path_thread is not None:
self._quit_path_event.set()
if self._follow_path_thread.is_alive():
self._follow_path_thread.join()
self._follow_path_thread = None
self.clear()
def find_location(self, target_pose):
if self.target_waypoint.graph_index is not \
None and self.dist(self.position, target_pose) < 0.5:
self.on_waypoint = self.target_waypoint.graph_index
elif self.last_known_waypoint_index is not \
None and self.dist(
self.position, self.graph.get_waypoint(
self.last_known_waypoint_index).location) < 0.5:
self.on_waypoint = self.last_known_waypoint_index
else:
self.on_lane = None # update_off_grid()
self.on_waypoint = None
def follow_new_path(
self,
waypoints,
next_arrival_estimator,
path_finished_callback):
self.stop()
self._quit_path_event.clear()
self.node.get_logger().info("Received new path to follow...")
self.remaining_waypoints = self.get_remaining_waypoints(waypoints)
assert next_arrival_estimator is not None
assert path_finished_callback is not None
self.next_arrival_estimator = next_arrival_estimator
self.path_finished_callback = path_finished_callback
def _follow_path():
target_pose = []
while (
self.remaining_waypoints or
self.state == RobotState.MOVING or
self.state == RobotState.WAITING):
# Check if we need to abort
if self._quit_path_event.is_set():
self.node.get_logger().info("Aborting previously followed "
"path")
return
# State machine
if self.state == RobotState.IDLE:
# Assign the next waypoint
self.target_waypoint = self.remaining_waypoints[0][1]
self.path_index = self.remaining_waypoints[0][0]
# Move robot to next waypoint
target_pose = self.target_waypoint.position
[x, y] = self.transforms["rmf_to_robot"].transform(
target_pose[:2])
theta = target_pose[2] + \
self.transforms['orientation_offset']
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# Ensure x, y, theta are in units that api.navigate() #
# ------------------------ #
response = self.api.navigate([x, y, theta], self.map_name)
if response:
self.remaining_waypoints = self.remaining_waypoints[1:]
self.state = RobotState.MOVING
else:
self.node.get_logger().info(
f"Robot {self.name} failed to navigate to "
f"[{x:.0f}, {y:.0f}, {theta:.0f}] coordinates. "
f"Retrying...")
time.sleep(1.0)
elif self.state == RobotState.WAITING:
time.sleep(1.0)
time_now = self.adapter.now()
with self._lock:
if self.target_waypoint is not None:
waypoint_wait_time = self.target_waypoint.time
if (waypoint_wait_time < time_now):
self.state = RobotState.IDLE
else:
if self.path_index is not None:
delta = waypoint_wait_time - time_now
self.node.get_logger().info(
f"Waiting for {(delta).seconds}s")
self.next_arrival_estimator(
self.path_index,
timedelta(seconds=0.0))
elif self.state == RobotState.MOVING:
time.sleep(1.0)
# Check if we have reached the target
with self._lock:
if (self.api.navigation_completed()):
self.node.get_logger().info(
f"Robot [{self.name}] has reached its target "
f"waypoint")
self.state = RobotState.WAITING
if (self.target_waypoint.graph_index is not None):
self.on_waypoint = \
self.target_waypoint.graph_index
self.last_known_waypoint_index = \
self.on_waypoint
else:
self.on_waypoint = None # still on a lane
else:
# Update the lane the robot is on
lane = self.get_current_lane()
if lane is not None:
self.on_waypoint = None
self.on_lane = lane
else:
# The robot may either be on the previous
# waypoint or the target one
self.find_location(target_pose)
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# If your robot does not have an API to report the
# remaining travel duration, replace the API call
# below with an estimation
# ------------------------ #
duration = self.api.navigation_remaining_duration()
if self.path_index is not None:
self.next_arrival_estimator(
self.path_index, timedelta(seconds=duration))
self.path_finished_callback()
self.node.get_logger().info(
f"Robot {self.name} has successfully navigated along "
f"requested path.")
self._follow_path_thread = threading.Thread(
target=_follow_path)
self._follow_path_thread.start()
def dock(
self,
dock_name,
docking_finished_callback):
''' Docking is very specific to each application. Hence, the user will
need to customize this function accordingly. In this example, we
assume the dock_name is the same as the name of the waypoints that
the robot is trying to dock into. We then call api.start_process()
to initiate the robot specific process. This could be to start a
cleaning process or load/unload a cart for delivery.
'''
self._quit_dock_event.clear()
if self._dock_thread is not None:
self._dock_thread.join()
self.dock_name = dock_name
assert docking_finished_callback is not None
self.docking_finished_callback = docking_finished_callback
# Get the waypoint that the robot is trying to dock into
dock_waypoint = self.graph.find_waypoint(self.dock_name)
assert(dock_waypoint)
self.dock_waypoint_index = dock_waypoint.index
def _dock():
# Request the robot to start the relevant process
self.node.get_logger().info(
f"Requesting robot {self.name} to dock at {self.dock_name}")
self.api.start_process(self.dock_name, self.map_name)
with self._lock:
self.on_waypoint = None
self.on_lane = None
time.sleep(1.0)
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# With whatever logic you need for docking #
# ------------------------ #
while (not self.api.docking_completed()):
# Check if we need to abort
if self._quit_dock_event.is_set():
self.node.get_logger().info("Aborting docking")
return
self.node.get_logger().info("Robot is docking...")
time.sleep(1.0)
with self._lock:
self.on_waypoint = self.dock_waypoint_index
self.dock_waypoint_index = None
self.docking_finished_callback()
self.node.get_logger().info("Docking completed")
self._dock_thread = threading.Thread(target=_dock)
self._dock_thread.start()
def get_position(self):
''' This helper function returns the live position of the robot in the
RMF coordinate frame'''
position = self.api.position()
if position is not None:
x, y = self.transforms['robot_to_rmf'].transform(
[position[0], position[1]])
theta = position[2] - \
self.transforms['orientation_offset']
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# Ensure x, y are in meters and theta in radians #
# ------------------------ #
# Wrap theta between [-pi, pi]. Else arrival estimate will
# assume robot has to do full rotations and delay the schedule
if theta > np.pi:
theta = theta - (2 * np.pi)
if theta < -np.pi:
theta = (2 * np.pi) + theta
return [x, y, theta]
else:
self.node.get_logger().error(
"Unable to retrieve position from robot.")
return self.position
def get_battery_soc(self):
battery_soc = self.api.battery_soc()
if battery_soc is not None:
return battery_soc
else:
self.node.get_logger().error(
"Unable to retrieve battery data from robot.")
return self.battery_soc
def update(self):
self.position = self.get_position()
self.battery_soc = self.get_battery_soc()
if self.update_handle is not None:
self.update_state()
def update_state(self):
self.update_handle.update_battery_soc(self.battery_soc)
if not self.charger_is_set:
if ("max_delay" in self.config.keys()):
max_delay = self.config["max_delay"]
self.node.get_logger().info(
f"Setting max delay to {max_delay}s")
self.update_handle.set_maximum_delay(max_delay)
if (self.charger_waypoint_index < self.graph.num_waypoints):
self.update_handle.set_charger_waypoint(
self.charger_waypoint_index)
else:
self.node.get_logger().warn(
"Invalid waypoint supplied for charger. "
"Using default nearest charger in the map")
self.charger_is_set = True
# Update position
with self._lock:
if (self.on_waypoint is not None): # if robot is on a waypoint
self.update_handle.update_current_waypoint(
self.on_waypoint, self.position[2])
elif (self.on_lane is not None): # if robot is on a lane
# We only keep track of the forward lane of the robot.
# However, when calling this update it is recommended to also
# pass in the reverse lane so that the planner does not assume
# the robot can only head forwards. This would be helpful when
# the robot is still rotating on a waypoint.
forward_lane = self.graph.get_lane(self.on_lane)
entry_index = forward_lane.entry.waypoint_index
exit_index = forward_lane.exit.waypoint_index
reverse_lane = self.graph.lane_from(exit_index, entry_index)
lane_indices = [self.on_lane]
if reverse_lane is not None: # Unidirectional graph
lane_indices.append(reverse_lane.index)
self.update_handle.update_current_lanes(
self.position, lane_indices)
elif (self.dock_waypoint_index is not None):
self.update_handle.update_off_grid_position(
self.position, self.dock_waypoint_index)
# if robot is merging into a waypoint
elif (self.target_waypoint is not None and
self.target_waypoint.graph_index is not None):
self.update_handle.update_off_grid_position(
self.position, self.target_waypoint.graph_index)
else: # if robot is lost
self.update_handle.update_lost_position(
self.map_name, self.position)
def get_current_lane(self):
def projection(current_position,
target_position,
lane_entry,
lane_exit):
px, py, _ = current_position
p = np.array([px, py])
t = np.array(target_position)
entry = np.array(lane_entry)
exit = np.array(lane_exit)
return np.dot(p - t, exit - entry)
if self.target_waypoint is None:
return None
approach_lanes = self.target_waypoint.approach_lanes
# Spin on the spot
if approach_lanes is None or len(approach_lanes) == 0:
return None
# Determine which lane the robot is currently on
for lane_index in approach_lanes:
lane = self.graph.get_lane(lane_index)
p0 = self.graph.get_waypoint(lane.entry.waypoint_index).location
p1 = self.graph.get_waypoint(lane.exit.waypoint_index).location
p = self.position
before_lane = projection(p, p0, p0, p1) < 0.0
after_lane = projection(p, p1, p0, p1) >= 0.0
if not before_lane and not after_lane: # The robot is on this lane
return lane_index
return None
def dist(self, A, B):
''' Euclidian distance between A(x,y) and B(x,y)'''
assert(len(A) > 1)
assert(len(B) > 1)
return math.sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2)
def get_remaining_waypoints(self, waypoints: list):
'''
The function returns a list where each element is a tuple of the index
of the waypoint and the waypoint present in waypoints. This function
may be modified if waypoints in a path need to be filtered.
'''
assert(len(waypoints) > 0)
remaining_waypoints = []
for i in range(len(waypoints)):
remaining_waypoints.append((i, waypoints[i]))
return remaining_waypoints
|
py | 1a356866a283d21d15190a1afdfc92de377ea94d |
#http://www.compaq.com/fortran/docs/
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
r' Version (?P<version>[^\s]*).*')
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError as e:
if '_MSVCCompiler__root' in str(e):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
else:
raise
except IOError as e:
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
except ValueError as e:
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='compaq').get_version())
|
py | 1a3569766e7f2997db39ae97a19123521dc22289 | import os
from whitenoise import WhiteNoise
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_backend.settings')
application = get_wsgi_application()
application = WhiteNoise(application, root='/frontend/build/static')
application.add_files('/static', prefix='more-files/')
|
py | 1a35697881dab8fe44ba9d21cf8e04bf68375c3d | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
rbp_files = dict(snakemake.input)
values = []
for rbp, f_name in rbp_files.items():
df = pd.read_csv(f_name)
positives = len(df[df['class'] == 1])
negatives = len(df[df['class'] == 0])
neg_ratio = negatives / (positives + negatives)
values.append((positives, negatives, neg_ratio))
values = np.array(values)
plt.plot([0,np.max(values)], [0,np.max(values)], c="orange", zorder=1)
plt.scatter(values[:, 0], values[:, 1], s=8, alpha=0.5, zorder=2)
plt.xlabel("Number of positives")
plt.ylabel("Number of negatives")
plt.title(f'{snakemake.wildcards["cell_line"]}')
plt.savefig(snakemake.output[0], dpi=300)
|
py | 1a3569da6b49c43131b291fe68c550b442e8f6a2 | from unittest import TestCase
from datetime import datetime
from extract_ride_data import ZeroLogHeader, LogEntry, ZeroLogEntry
class TestLogHeader(TestCase):
def test_decode(self):
log_text = '''Zero MBB log
Serial number 2015_mbb_48e0f7_00720
VIN 538SD9Z37GCG06073
Firmware rev. 51
Board rev. 3
Model DSR
Printing 8397 of 8397 log entries..
Entry Time of Log Event Conditions
+--------+----------------------+--------------------------+----------------------------------
00001 05/13/2018 10:06:43 DEBUG: Sevcon Contactor Drive ON.
'''
log_lines = log_text.splitlines()
log_header = ZeroLogHeader(log_lines)
self.assertEqual({
'mbb': {
'board_rev': '3',
'firmware_rev': '51',
'model': 'DSR',
'serial_no': '2015_mbb_48e0f7_00720',
'vin': '538SD9Z37GCG06073'},
'model': {'manufacturer': 'Zero Motorcycles',
'model': 'DSR',
'motor': {'power': '16kW', 'size': '75-7R'},
'pack_capacity': '13.0',
'plant_location': 'Santa Cruz, CA',
'platform': 'SDS',
'year': 2016},
'num_entries': 8397,
'num_entries_expected': 8397,
'source': 'MBB',
'title': 'Zero MBB log'
}, log_header.to_json())
class TestLogEntry(TestCase):
def test_decode(self):
log_entry = LogEntry(' 2018-05-20 16:36:56 \t something happened \n', field_sep='\t')
self.assertEqual(log_entry.field_values, ['2018-05-20 16:36:56', 'something happened'])
def test_order(self):
first_entry = LogEntry('2018-05-20 16:36:56\tsomething happened', field_sep='\t')
first_entry.timestamp = first_entry.decode_timestamp(first_entry.field_values[0])
second_entry = LogEntry('2018-05-20 16:37:00\tsomething happened later', field_sep='\t')
second_entry.timestamp = second_entry.decode_timestamp(second_entry.field_values[0])
self.assertLess(first_entry, second_entry)
self.assertGreater(second_entry, first_entry)
def test_to_csv(self):
log_entry = LogEntry(' 2018-05-20 16:36:56 \t something happened \n', field_sep='\t')
self.assertEqual('2018-05-20 16:36:56,something happened',
log_entry.to_csv(['timestamp', 'message']))
class TestZeroLogEntry(TestCase):
def assert_consistent_log_entry(self, log_entry: ZeroLogEntry):
self.assertIsInstance(log_entry.entry, int)
self.assertIsInstance(log_entry.timestamp, datetime)
self.assertLess(0, log_entry.entry)
self.assertIsInstance(log_entry.event, str)
self.assertIsInstance(log_entry.component, str)
self.assertIsInstance(log_entry.conditions, dict)
def test_conditions_to_dict(self):
conditions = ZeroLogEntry.conditions_to_dict(
'''PackTemp: h 21C, l 20C, PackSOC: 91%, Vpack:113.044V, MotAmps: 0, BattAmps: 2,\
Mods: 11, MotTemp: 26C, CtrlTemp: 19C, AmbTemp: 20C, MotRPM: 0, Odo:48809km'''
)
self.assertDictEqual({'AmbTemp': '20C',
'BattAmps': '2',
'CtrlTemp': '19C',
'Mods': '11',
'MotAmps': '0',
'MotRPM': '0',
'MotTemp': '26C',
'Odo': '48809km',
'PackSOC': '91%',
'PackTemp (h)': '21C',
'PackTemp (l)': '20C',
'Vpack': '113.044V'}, conditions)
conditions = ZeroLogEntry.conditions_to_dict(
'''Bmvolts: 92062, Cmvolts: 118937, Amps: 0, RPM: 0''')
self.assertDictEqual({'Bmvolts': '92062',
'Cmvolts': '118937',
'Amps': '0',
'RPM': '0'},
conditions)
def test_disarmed(self):
log_entry = ZeroLogEntry('''
00001 05/21/2018 21:12:20 Disarmed \
PackTemp: h 21C, l 20C, PackSOC: 91%, Vpack:113.044V, MotAmps: 0, BattAmps: 2, Mods: 11,\
MotTemp: 26C, CtrlTemp: 19C, AmbTemp: 20C, MotRPM: 0, Odo:48809km
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(1, log_entry.entry)
self.assertEqual('', log_entry.event_type)
self.assertEqual('', log_entry.event_level)
self.assertEqual('Disarmed', log_entry.event)
self.assertDictEqual({'AmbTemp': '20C',
'BattAmps': '2',
'CtrlTemp': '19C',
'Mods': '11',
'MotAmps': '0',
'MotRPM': '0',
'MotTemp': '26C',
'Odo': '48809km',
'PackSOC': '91%',
'PackTemp (h)': '21C',
'PackTemp (l)': '20C',
'Vpack': '113.044V'},
log_entry.conditions)
def test_info_only_data(self):
log_entry = ZeroLogEntry('''
07558 05/20/2018 16:36:56 INFO: Bmvolts: 92062, Cmvolts: 118937, Amps: 0, RPM: 0
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7558, log_entry.entry)
self.assertEqual('2018-05-20 16:36:56', str(log_entry.timestamp))
self.assertEqual('INFO', log_entry.event_level)
self.assertEqual('', log_entry.event)
self.assertDictEqual({'Bmvolts': '92062',
'Cmvolts': '118937',
'Amps': '0',
'RPM': '0'},
log_entry.conditions)
def test_info_and_conditions_message_join(self):
log_entry = ZeroLogEntry('''
07544 05/20/2018 16:36:52 DEBUG: Module mode Change Requires Disconnect
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7544, log_entry.entry)
self.assertEqual('DEBUG', log_entry.event_level)
self.assertEqual('Module mode Change Requires Disconnect', log_entry.event)
self.assertDictEqual({}, log_entry.conditions)
def test_current_limited(self):
log_entry = ZeroLogEntry('''
07396 05/20/2018 16:15:31\
Batt Dischg Cur Limited 281 A (40.72463768115942%), MinCell: 3383mV, MaxPackTemp: 34C''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7396, log_entry.entry)
self.assertEqual('LIMIT', log_entry.event_type)
self.assertEqual('Batt Dischg Cur Limited', log_entry.event)
self.assertDictEqual({'MinCell': '3383mV',
'MaxPackTemp': '34C',
'BattAmps': '281',
'PackSOC': '40.72463768115942%'},
log_entry.conditions)
def test_error_entry(self):
log_entry = ZeroLogEntry('''
07758 05/20/2018 16:52:01\
ERROR: Module 01 maximum connection retries reached. Flagging ineligble.
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7758, log_entry.entry)
self.assertEqual('ERROR', log_entry.event_level)
self.assertEqual('Battery', log_entry.component)
self.assertTrue(log_entry.is_battery_event())
self.assertEqual('Module maximum connection retries reached. Flagging ineligble.',
log_entry.event)
self.assertDictEqual({'Module': '01'}, log_entry.conditions)
self.assertEqual(1, log_entry.battery_module_no())
def test_module_not_connected(self):
log_entry = ZeroLogEntry('''
01525 05/14/2018 16:49:14 Module 1 not connected, PV 109511mV, diff 0mV, Allowed diff 750mV,\
pack cap 26Ah, batt curr 0A, PackTemp h 23C, l 23C, last CAN msg 4ms ago, lcell 3903mV,\
Max charge 10cx10, max discharge 100cx10
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(1525, log_entry.entry)
self.assertEqual({'Module': '1',
'PV': '109511mV',
'diff': '0mV',
'Allowed diff': '750mV',
'pack cap': '26Ah',
'batt curr': '0A',
'PackTemp h': '23C',
'l': '23C',
'lcell': '3903mV',
'Max charge': '10cx10',
'max discharge': '100cx10'},
log_entry.conditions)
self.assertEqual(1, log_entry.battery_module_no())
|
py | 1a356a009571d0d9e9d24c6cccbe4aecb853d681 | from .ks import KSDrift
from .mmd import MMDDrift
__all__ = [
"KSDrift",
"MMDDrift"
]
|
py | 1a356b88d42ac242498fd41cf4d6f001aa6d4ae1 | ## Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
## SPDX-License-Identifier: Apache-2.0
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get(protocol):
if protocol == "slmp" or protocol == "opcda":
return "python3.7"
# add runtime details for new protocol
return 0 |
py | 1a356d1711759d9ba786a9de74b869954c26e7a3 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['HostPool']
class HostPool(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
custom_rdp_property: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_name: Optional[pulumi.Input[str]] = None,
host_pool_type: Optional[pulumi.Input[str]] = None,
load_balancer_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
max_session_limit: Optional[pulumi.Input[int]] = None,
personal_desktop_assignment_type: Optional[pulumi.Input[str]] = None,
preferred_app_group_type: Optional[pulumi.Input[str]] = None,
registration_info: Optional[pulumi.Input[pulumi.InputType['RegistrationInfoArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
ring: Optional[pulumi.Input[int]] = None,
sso_context: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validation_environment: Optional[pulumi.Input[bool]] = None,
vm_template: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a HostPool definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] custom_rdp_property: Custom rdp property of HostPool.
:param pulumi.Input[str] description: Description of HostPool.
:param pulumi.Input[str] friendly_name: Friendly name of HostPool.
:param pulumi.Input[str] host_pool_name: The name of the host pool within the specified resource group
:param pulumi.Input[str] host_pool_type: HostPool type for desktop.
:param pulumi.Input[str] load_balancer_type: The type of the load balancer.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[int] max_session_limit: The max session limit of HostPool.
:param pulumi.Input[str] personal_desktop_assignment_type: PersonalDesktopAssignment type for HostPool.
:param pulumi.Input[str] preferred_app_group_type: The type of preferred application group type, default to Desktop Application Group
:param pulumi.Input[pulumi.InputType['RegistrationInfoArgs']] registration_info: The registration info of HostPool.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[int] ring: The ring number of HostPool.
:param pulumi.Input[str] sso_context: Path to keyvault containing ssoContext secret.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] validation_environment: Is validation environment.
:param pulumi.Input[str] vm_template: VM template for sessionhosts configuration within hostpool.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['custom_rdp_property'] = custom_rdp_property
__props__['description'] = description
__props__['friendly_name'] = friendly_name
if host_pool_name is None:
raise TypeError("Missing required property 'host_pool_name'")
__props__['host_pool_name'] = host_pool_name
if host_pool_type is None:
raise TypeError("Missing required property 'host_pool_type'")
__props__['host_pool_type'] = host_pool_type
if load_balancer_type is None:
raise TypeError("Missing required property 'load_balancer_type'")
__props__['load_balancer_type'] = load_balancer_type
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['max_session_limit'] = max_session_limit
__props__['personal_desktop_assignment_type'] = personal_desktop_assignment_type
if preferred_app_group_type is None:
raise TypeError("Missing required property 'preferred_app_group_type'")
__props__['preferred_app_group_type'] = preferred_app_group_type
__props__['registration_info'] = registration_info
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['ring'] = ring
__props__['sso_context'] = sso_context
__props__['tags'] = tags
__props__['validation_environment'] = validation_environment
__props__['vm_template'] = vm_template
__props__['application_group_references'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190123preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190924preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20191210preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201019preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201102preview:HostPool")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HostPool, __self__).__init__(
'azure-nextgen:desktopvirtualization/v20200921preview:HostPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HostPool':
"""
Get an existing HostPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return HostPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> pulumi.Output[Sequence[str]]:
"""
List of applicationGroup links.
"""
return pulumi.get(self, "application_group_references")
@property
@pulumi.getter(name="customRdpProperty")
def custom_rdp_property(self) -> pulumi.Output[Optional[str]]:
"""
Custom rdp property of HostPool.
"""
return pulumi.get(self, "custom_rdp_property")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of HostPool.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of HostPool.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPoolType")
def host_pool_type(self) -> pulumi.Output[str]:
"""
HostPool type for desktop.
"""
return pulumi.get(self, "host_pool_type")
@property
@pulumi.getter(name="loadBalancerType")
def load_balancer_type(self) -> pulumi.Output[str]:
"""
The type of the load balancer.
"""
return pulumi.get(self, "load_balancer_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxSessionLimit")
def max_session_limit(self) -> pulumi.Output[Optional[int]]:
"""
The max session limit of HostPool.
"""
return pulumi.get(self, "max_session_limit")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="personalDesktopAssignmentType")
def personal_desktop_assignment_type(self) -> pulumi.Output[Optional[str]]:
"""
PersonalDesktopAssignment type for HostPool.
"""
return pulumi.get(self, "personal_desktop_assignment_type")
@property
@pulumi.getter(name="preferredAppGroupType")
def preferred_app_group_type(self) -> pulumi.Output[str]:
"""
The type of preferred application group type, default to Desktop Application Group
"""
return pulumi.get(self, "preferred_app_group_type")
@property
@pulumi.getter(name="registrationInfo")
def registration_info(self) -> pulumi.Output[Optional['outputs.RegistrationInfoResponse']]:
"""
The registration info of HostPool.
"""
return pulumi.get(self, "registration_info")
@property
@pulumi.getter
def ring(self) -> pulumi.Output[Optional[int]]:
"""
The ring number of HostPool.
"""
return pulumi.get(self, "ring")
@property
@pulumi.getter(name="ssoContext")
def sso_context(self) -> pulumi.Output[Optional[str]]:
"""
Path to keyvault containing ssoContext secret.
"""
return pulumi.get(self, "sso_context")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="validationEnvironment")
def validation_environment(self) -> pulumi.Output[Optional[bool]]:
"""
Is validation environment.
"""
return pulumi.get(self, "validation_environment")
@property
@pulumi.getter(name="vmTemplate")
def vm_template(self) -> pulumi.Output[Optional[str]]:
"""
VM template for sessionhosts configuration within hostpool.
"""
return pulumi.get(self, "vm_template")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a356e83c56931fa4f856912587a3c60ea36adde | import random
import pygame
import tkinter as tk
from tkinter import messagebox
class cube(object):
rows = 50
w = 500
def __init__(self,start,dirnx=1,dirny=0,color=(255,0,0)):
self.pos = start
self.dirnx = 1
self.dirny = 0
self.color = color
def move(self, dirnx, dirny):
self.dirnx = dirnx
self.dirny = dirny
self.pos = (self.pos[0] + self.dirnx, self.pos[1] + self.dirny)
def draw(self, surface):
dis = self.w // self.rows
i = self.pos[0]
j = self.pos[1]
pygame.draw.rect(surface, self.color, (i*dis+1,j*dis+1, dis-2, dis-2))
class snake(object):
body = []
turns = {}
def __init__(self, color, pos):
self.color = color
self.head = cube(pos)
self.body.append(self.head)
self.dirnx = 0
self.dirny = 1
def move(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
keys = pygame.key.get_pressed()
for key in keys:
if keys[pygame.K_LEFT]:
self.dirnx = -1
self.dirny = 0
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_RIGHT]:
self.dirnx = 1
self.dirny = 0
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_UP]:
self.dirnx = 0
self.dirny = -1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_DOWN]:
self.dirnx = 0
self.dirny = 1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
for i, c in enumerate(self.body):
p = c.pos[:]
if p in self.turns:
turn = self.turns[p]
c.move(turn[0],turn[1])
if i == len(self.body)-1:
self.turns.pop(p)
else:
if c.dirnx == -1 and c.pos[0] <= 0: c.pos = (c.rows-1, c.pos[1])
elif c.dirnx == 1 and c.pos[0] >= c.rows-1: c.pos = (0,c.pos[1])
elif c.dirny == 1 and c.pos[1] >= c.rows-1: c.pos = (c.pos[0], 0)
elif c.dirny == -1 and c.pos[1] <= 0: c.pos = (c.pos[0],c.rows-1)
else: c.move(c.dirnx,c.dirny)
def reset(self, pos):
self.head = cube(pos)
self.body = []
self.body.append(self.head)
self.turns = {}
self.dirnx = 0
self.dirny = 1
def addCube(self):
tail = self.body[-1]
dx, dy = tail.dirnx, tail.dirny
if dx == 1 and dy == 0:
self.body.append(cube((tail.pos[0]-1,tail.pos[1])))
elif dx == -1 and dy == 0:
self.body.append(cube((tail.pos[0]+1,tail.pos[1])))
elif dx == 0 and dy == 1:
self.body.append(cube((tail.pos[0],tail.pos[1]-1)))
elif dx == 0 and dy == -1:
self.body.append(cube((tail.pos[0],tail.pos[1]+1)))
self.body[-1].dirnx = dx
self.body[-1].dirny = dy
def draw(self, surface):
for i, c in enumerate(self.body):
if i ==0:
c.draw(surface)
else:
c.draw(surface)
def redrawWindow(surface):
global rows, width, s, snack
surface.fill((0,0,0))
s.draw(surface)
snack.draw(surface)
pygame.display.update()
def randomSnack(rows, item):
positions = item.body
while True:
x = random.randrange(rows)
y = random.randrange(rows)
if len(list(filter(lambda z:z.pos == (x,y), positions))) > 0:
continue
else:
break
return (x,y)
def message_box(subject, content):
root = tk.Tk()
root.attributes("-topmost", True)
root.withdraw()
messagebox.showinfo(subject, content)
try:
root.destroy()
except:
pass
class bluesnake(object):
body = []
turns = {}
def __init__(self, color, pos):
self.color = color
self.head = cube(pos)
self.body.append(self.head)
self.dirnx = 5
self.dirny = 5
def move(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
keys = pygame.key.get_pressed()
for key in keys:
if keys[pygame.K_a]:
self.dirnx = -1
self.dirny = 0
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_d]:
self.dirnx = 1
self.dirny = 0
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_w]:
self.dirnx = 0
self.dirny = -1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_s]:
self.dirnx = 0
self.dirny = 1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
for i, c in enumerate(self.body):
p = c.pos[:]
if p in self.turns:
turn = self.turns[p]
c.move(turn[0],turn[1])
if i == len(self.body)-1:
self.turns.pop(p)
else:
if c.dirnx == -1 and c.pos[0] <= 0: c.pos = (c.rows-1, c.pos[1])
elif c.dirnx == 1 and c.pos[0] >= c.rows-1: c.pos = (0,c.pos[1])
elif c.dirny == 1 and c.pos[1] >= c.rows-1: c.pos = (c.pos[0], 0)
elif c.dirny == -1 and c.pos[1] <= 0: c.pos = (c.pos[0],c.rows-1)
else: c.move(c.dirnx,c.dirny)
def reset(self, pos):
self.head = cube(pos)
self.body = []
self.body.append(self.head)
self.turns = {}
self.dirnx = 0
self.dirny = 1
def addCube(self):
tail = self.body[-1]
dx, dy = tail.dirnx, tail.dirny
if dx == 1 and dy == 0:
self.body.append(cube((tail.pos[0]-1,tail.pos[1])))
elif dx == -1 and dy == 0:
self.body.append(cube((tail.pos[0]+1,tail.pos[1])))
elif dx == 0 and dy == 1:
self.body.append(cube((tail.pos[0],tail.pos[1]-1)))
elif dx == 0 and dy == -1:
self.body.append(cube((tail.pos[0],tail.pos[1]+1)))
self.body[-1].dirnx = dx
self.body[-1].dirny = dy
def draw(self, surface):
for i, c in enumerate(self.body):
if i ==0:
c.draw(surface)
else:
c.draw(surface)
def redrawWindow(surface):
global rows, width, s, snack
surface.fill((0,0,0))
s.draw(surface)
snack.draw(surface)
pygame.display.update()
def main():
global width, rows, s, snack
width = 500
rows = 50
win = pygame.display.set_mode((width, width))
s = snake((255,0,0), (10,10))
blu = bluesnake((0,0,255), (10,10))
snack = cube(randomSnack(rows, s), color=(0,255,0))
flag = True
clock = pygame.time.Clock()
while flag:
pygame.time.delay(50)
clock.tick(10)
blu.move()
s.move()
if s.body[0].pos == snack.pos:
s.addCube()
snack = cube(randomSnack(rows, s), color=(0,255,0))
if blu.body[0].pos == snack.pos:
blu.addCube()
snack = cube(randomSnack(rows, s), color=(0,255,0))
for x in range(len(s.body)):
if s.body[x].pos in list(map(lambda z:z.pos,s.body[x+1:])):
print('Score: ', len(s.body))
message_box('You Lost!', 'Play again...')
s.reset((10,10))
break
for x in range(len(blu.body)):
if blu.body[x].pos in list(map(lambda z:z.pos,blu.body[x+1:])):
print('Score: ', len(blu.body))
message_box('You Lost!', 'Play again...')
blu.reset((10,10))
break
redrawWindow(win)
main()
|
py | 1a356edd508934c2db8a30b8baeda479ac345086 |
#-------------------------------------------------------------------------------
# Name: calculate the moving average of LSWI
# Inputs: LSWImax for each tile for each year (2001~2014)
#
# Author: Yao Zhang
#
# Created: 12/01/2015
# Modified: 05/04/2016
# Copyright: (c) eomf 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import multiprocessing
import os
from os import listdir
from os.path import isfile, join
from osgeo import gdal
from osgeo.gdalconst import *
import numpy
import numpy.ma as ma
# input the parent directory
root='/data/ifs/VPM/'
# input the yearwww.iplaysoft.com
#Function to build vrt
def buildVrtFile (year,tile):
fileList=[]
for yr in range(int(year)-2,int(year)+3):
fileList.append([os.path.join('/data/ifs/users/xcwu/VPM_GPP/LSWImax/LSWI/',str(yr),tile+'.'+str(yr)+'.maxLSWI_5d_10d.tif')])
#print len(fileList),'files were built into a vrt file'
filename=os.path.join(dirLSWIMA,year+tile+'_list.txt')
outFilelist=open(filename,'w')
for file in fileList:
outFilelist.write(file[0]+'\r\n')
outFilelist.close()
return filename
# Function to write array to tiff file
def write_file(output_name,output_array,GeoT,xsize,ysize,proJ,driverName='GTiff'):
print "creating", output_name
dr=gdal.GetDriverByName(driverName)
dr.Register()
do=dr.Create(output_name,xsize,ysize,1,gdal.GDT_Float32)
do.SetGeoTransform(GeoT)
do.SetProjection(proJ)
do.GetRasterBand(1).WriteArray(output_array)
do.GetRasterBand(1).SetNoDataValue(32767)
do=None
# Function to calculate the moving average
def movingaverage(tile):
# Output directories for moving average LSWImax
# if the output directories don't exist, create the new directories
if not os.path.exists(dirLSWIMA):
os.makedirs(dirLSWIMA)
# build LSWImax vrt file and read as an array
file=buildVrtFile(year,tile)
vrtLSWImax=os.path.join(os.path.dirname(file),year+tile+'LSWImax_vrt.vrt')
#print "Building the vrt file: ", year+tile+vrtLSWImax
os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtLSWImax)
global rows, cols, geoProj,geoTran
inLSWImax=gdal.Open(vrtLSWImax)
#print "reading the multi-LSWI..."
LSWImax=inLSWImax.ReadAsArray()
rows = 2400
cols = 2400
geoTran=inLSWImax.GetGeoTransform()
geoProj=inLSWImax.GetProjection()
#find the second largest LSWImax
secLSWImax = numpy.sort(LSWImax, axis=0, kind='quicksort', order=None)[3,:,:]
write_file(dirLSWIMA+'/'+tile+'.'+year+'.maxLSWI_MA.tif',secLSWImax,geoTran,rows,cols,geoProj,driverName='GTiff')
secLSWImax=None
LSWImax=None
def process_list(tiles = None, mp = True, save_cpus = 0):
if mp:
count = multiprocessing.cpu_count()-save_cpus
#manager = multiprocessing.Manager()
#lock = manager.Lock()
#map(lambda f: f.append(lock), tiles)
pool = multiprocessing.Pool(processes=count)
pool.map(movingaverage, tiles)
tile=['h00v09','h01v09','h02v09','h03v09','h04v09','h08v09','h09v09','h10v09','h11v09','h12v09','h13v09',\
'h14v09','h16v09','h18v09','h19v09','h20v09','h21v09','h22v09','h23v09','h25v09','h27v09','h28v09',\
'h29v09','h30v09','h31v09','h32v09','h33v09','h34v09','h35v09','h00v10','h01v10','h02v10','h03v10',\
'h04v10','h05v10','h10v10','h11v10','h12v10','h13v10','h14v10','h17v10','h19v10','h20v10','h21v10',\
'h22v10','h23v10','h27v10','h28v10','h29v10','h30v10','h31v10','h32v10','h33v10','h34v10','h35v10',\
'h01v11','h02v11','h03v11','h04v11','h05v11','h06v11','h08v11','h10v11','h11v11','h12v11','h13v11',\
'h14v11','h15v11','h19v11','h20v11','h21v11','h22v11','h23v11','h27v11','h28v11','h29v11','h30v11',\
'h31v11','h32v11','h33v11','h11v12','h12v12','h13v12','h16v12','h17v12','h19v12','h20v12','h24v12',\
'h27v12','h28v12','h29v12','h30v12','h31v12','h32v12','h05v13','h12v13','h13v13','h17v13','h20v13',\
'h21v13','h22v13','h28v13','h29v13','h30v13','h31v13','h13v14','h14v14','h15v14','h16v14','h18v14',\
'h22v14','h27v14','h28v14']
#for year in ['2003','2004','2005','2006','2007','2008','2009','2010','2011','2012']:
for year in ['2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014']:
dirLSWIMA=root+'/driving_data/LSWImax_MA_06/'+year
process_list(tiles=tile, mp = True, save_cpus = 0)
|
py | 1a356f59964e23c5966414aff5994bf5860b161d | from setuptools import setup
setup(name='hearsay',
version='0.0.1',
description='Models for non-parametric time series classification.',
author='Norbert Crombach',
author_email='[email protected]',
url='https://github.com/norbert/hearsay',
license='MIT',
packages=['hearsay'],
install_requires=['scikit-learn'])
|
py | 1a357155d4f179b05d7b6c3bcb1172e60f947a71 | """SAElections URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = [
url(r'^$', 'director.views.direct', name='home'), # The voting/thank you page is here
url(r'^/$', 'director.views.direct', name='home'), # and also here
url(r'^login/$', 'authentication.views.user_login', name='login'),
url(r'^logout/$', 'authentication.views.user_logout', name='logout'),
url(r'^authenticate/$', 'authentication.views.user_auth', name='authenticate'),
url(r'^save-votes/(?P<votes>.+)/$', 'voting.views.save_votes', name='save-votes'),
url(r'^admin/', include(admin.site.urls)),
] + staticfiles_urlpatterns()
|
py | 1a35719e04002f8c92bf5294ab7107a698aaf818 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Documentation: Menu.py
Classes and functions:
Menu main class of this modul
Description:
the base class for the menus. All other menu classes should be derived from this
one
"""
__author__ = "Fireclaw the Fox"
__license__ = """
Simplified BSD (BSD 2-Clause) License.
See License.txt or http://opensource.org/licenses/BSD-2-Clause for more info
"""
from direct.showbase.DirectObject import DirectObject
from direct.gui.DirectGui import DirectFrame
from direct.gui.DirectGui import DirectLabel
from direct.gui.DirectGui import DirectButton
from panda3d.core import TextNode
import time
class Menu(DirectObject):
def __init__(self):
"""Default constructor"""
# load the default fonts
#self.defaultFont = loader.loadFont("gui/fonts/UbuntuBold")
#self.defaultFontRegular = loader.loadFont("gui/fonts/UbuntuRegular")
# load the default button image map
self.defaultBtnMaps = base.loader.loadModel(
"gui/buttons/mainMenu/button_maps")
# this button can be created with the createBackButton function
self.btnBack = None
self.frameMain = DirectFrame(
# size of the frame
frameSize = (base.a2dLeft, base.a2dRight,
base.a2dTop, base.a2dBottom),
# position of the frame
pos = (0, 0, 0),
# tramsparent bg color
frameColor = (0, 0, 0, 0))
self.title = DirectLabel(
scale = 0.25,
pos = (0, 0, -0.25),
frameColor = (0, 0, 0, 0),
text = "Missing Title",
text_align = TextNode.ACenter,
text_fg = (1,1,1,1),
#text_font = self.defaultFont
)
self.title.reparentTo(base.a2dTopCenter)
self.clock = DirectLabel(
scale = 0.1,
pos = (-.1,0,.1),
frameColor = (0, 0, 0, 0),
text = "00:00",
text_align = TextNode.ARight,
text_fg = (1,1,1,1))
self.clock.reparentTo(base.a2dBottomRight)
self.hide()
def showBase(self):
"""Show all GUI controls of the base menu"""
self.accept("RatioChanged", self.recalcAspectRatio)
self.frameMain.show()
self.clock.show()
self.title.show()
if self.btnBack:
self.btnBack.show()
if not taskMgr.hasTaskNamed("clock"):
taskMgr.add(self.clockTask, "clock")
def hideBase(self):
"""Hide all GUI controls of the base menu"""
self.ignore("RatioChanged")
self.frameMain.hide()
self.clock.hide()
self.title.hide()
if self.btnBack:
self.btnBack.hide()
if taskMgr.hasTaskNamed("clock"):
taskMgr.remove("clock")
def createBackButton(self, func):
"""Create the back button on the bottom left edge of the window"""
self.btnBack = DirectButton(
# size of the button
scale = (0.25, 0.25, 0.25),
# size of the text
text_scale = (0.5*1.33, 0.5, 0.5),
# the text on the button
text = "ABC",
# set the alignment to right
text_align = TextNode.ARight,
# put the text on the left side of the button
text_pos = (4.1, -0.15),
# set the text color to white
text_fg = (1,1,1,1),
# set the font of the text
#text_font = self.defaultFont,
# set the buttons images
geom = (self.defaultBtnMaps.find("**/button_ready"),
self.defaultBtnMaps.find("**/button_click"),
self.defaultBtnMaps.find("**/button_rollover"),
self.defaultBtnMaps.find("**/button_disabled")),
# set no relief
relief = 1,
# make it transparent
frameColor = (0,0,0,0),
# No sink in when press
pressEffect = False,
# position on the window
pos = (0.0, 0, 0.2),
# the event which is thrown on clickSound
command = func,
# sounds that should be played
rolloverSound = None,
clickSound = None)
self.btnBack.reparentTo(base.a2dBottomLeft)
def clockTask(self, task):
self.clock["text"] = time.strftime("%H:%M")
return task.cont
def recalcAspectRatio(self):
"""get the new aspect ratio to resize the mainframe"""
self.frameMain["frameSize"] = (
base.a2dLeft, base.a2dRight,
base.a2dTop, base.a2dBottom)
|
py | 1a3571b43ad3d2f7693efe72692e8e2c351e7a76 | x = 5
y = 3
print(x <= y)
# returns False because 5 is neither less than or equal to 3
# Author: Bryan G
|
py | 1a3572c5524c73c88fee1e49deddc4b01c526b52 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
try:
from ncclient.xml_ import NCElement
HAS_NCCLIENT = True
except ImportError:
HAS_NCCLIENT = False
try:
from lxml.etree import Element, fromstring, XMLSyntaxError
except ImportError:
from xml.etree.ElementTree import Element, fromstring
if sys.version_info < (2, 7):
from xml.parsers.expat import ExpatError as XMLSyntaxError
else:
from xml.etree.ElementTree import ParseError as XMLSyntaxError
NS_MAP = {'nc': "urn:ietf:params:xml:ns:netconf:base:1.0"}
def exec_rpc(module, *args, **kwargs):
connection = NetconfConnection(module._socket_path)
return connection.execute_rpc(*args, **kwargs)
class NetconfConnection(Connection):
def __init__(self, socket_path):
super(NetconfConnection, self).__init__(socket_path)
def __rpc__(self, name, *args, **kwargs):
"""Executes the json-rpc and returns the output received
from remote device.
:name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
:args: Ordered list of params passed as arguments to rpc method
:kwargs: Dict of valid key, value pairs passed as arguments to rpc method
For usage refer the respective connection plugin docs.
"""
self.check_rc = kwargs.pop('check_rc', True)
self.ignore_warning = kwargs.pop('ignore_warning', True)
response = self._exec_jsonrpc(name, *args, **kwargs)
if 'error' in response:
rpc_error = response['error'].get('data')
return self.parse_rpc_error(to_bytes(rpc_error, errors='surrogate_then_replace'))
return fromstring(to_bytes(response['result'], errors='surrogate_then_replace'))
def parse_rpc_error(self, rpc_error):
if self.check_rc:
try:
error_root = fromstring(rpc_error)
root = Element('root')
root.append(error_root)
error_list = root.findall('.//nc:rpc-error', NS_MAP)
if not error_list:
raise ConnectionError(to_text(rpc_error, errors='surrogate_then_replace'))
warnings = []
for error in error_list:
message_ele = error.find('./nc:error-message', NS_MAP)
if message_ele is None:
message_ele = error.find('./nc:error-info', NS_MAP)
message = message_ele.text if message_ele is not None else None
severity = error.find('./nc:error-severity', NS_MAP).text
if severity == 'warning' and self.ignore_warning and message is not None:
warnings.append(message)
else:
raise ConnectionError(to_text(rpc_error, errors='surrogate_then_replace'))
return warnings
except XMLSyntaxError:
raise ConnectionError(rpc_error)
def transform_reply():
return b'''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
'''
# Note: Workaround for ncclient 0.5.3
def remove_namespaces(data):
if not HAS_NCCLIENT:
raise ImportError("ncclient is required but does not appear to be installed. "
"It can be installed using `pip install ncclient`")
return NCElement(data, transform_reply()).data_xml
|
py | 1a357399576ca29dc3512507d895a7620b77acfd | # pylint: disable=missing-module-docstring (C0114)
import os
from typing import Tuple
import numpy as np
from core import DIM_3
from utils.cs_conversion import ChannelRange
DATA_DIR = 'data'
TEST_DIR = 'tests'
MUNICH_1_PATH = os.path.join(DATA_DIR, 'munich_1.png')
MUNICH_1_GRAY_PATH = os.path.join(DATA_DIR, 'munich_1_gray.png')
MUNICH_2_PATH = os.path.join(DATA_DIR, 'munich_2.png')
MUNICH_2_GRAY_PATH = os.path.join(DATA_DIR, 'munich_2_gray.png')
MUNICH_3_PATH = os.path.join(DATA_DIR, 'munich_3.png')
MUNICH_4_PATH = os.path.join(DATA_DIR, 'munich_4.png')
SNOW_1_PATH = os.path.join(DATA_DIR, 'snow_1.png')
SNOW_2_PATH = os.path.join(DATA_DIR, 'snow_2.png')
HEIGHT = 4
WIDTH = 5
CHANNELS = 3
ONES_IMAGE = np.ones((HEIGHT, WIDTH, CHANNELS), dtype=np.uint8)
ONES_IMAGE_FLOAT = np.ones((HEIGHT, WIDTH, CHANNELS), dtype=np.float32)
ColorType = Tuple[int, int, int]
CHANNELS_DEFAULT = (0, 1, 2)
CHANNEL_RANGES_DEFAULT = tuple([ChannelRange(0., 1.)] * DIM_3)
|
py | 1a3573cc0f67b710d7e2d9b247fb363fc9e9253c | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset_metadata.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_transform.saved import saved_transform_io
import unittest
from tensorflow.contrib import lookup
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def _create_test_saved_model():
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.placeholder(tf.float32, shape=[1])
output = (input_float - 2.0) / 5.0
inputs = {'x': input_float}
outputs = {'x_scaled': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
return export_path
class SavedTransformIOTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._test_saved_model = _create_test_saved_model()
def test_apply_saved_transform(self):
with tf.Graph().as_default() as graph:
with tf.Session().as_default() as session:
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], transformed_features.keys())
result_tensor = transformed_features['x_scaled']
self.assertTrue(isinstance(result_tensor, tf.Tensor))
self.assertAllEqual(session.run(result_tensor), [247.0])
self.assertEqual(graph.get_tensor_by_name('Const:0'), input_floats)
self.assertEqual(
graph.get_tensor_by_name('transform/truediv:0'),
result_tensor)
def test_apply_transform_extra_features_no_passthrough(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_floats = tf.constant([1234.0]) # tf.float32
input_features = {'x': input_floats,
'extra_1': tf.constant('1'),
'extra_2': tf.constant('2')}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_transform_type_mismatch(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_strings = tf.constant(['bogus']) # tf.string
input_features = {'x': input_strings}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_transform_shape_mismatch(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_floats = tf.constant(1234.0) # tf.float32
input_features = {'x': input_floats}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_saved_transform_to_tensor_inside_scope(self):
with tf.Graph().as_default():
with tf.name_scope('my_scope'):
with tf.Session().as_default() as session:
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], transformed_features.keys())
result_tensor = transformed_features['x_scaled']
self.assertAllEqual(session.run(result_tensor), [247.0])
def test_apply_saved_transform_to_tensor_outside_scope(self):
with tf.Graph().as_default():
input_floats = tf.constant([1237.0]) # tf.float32
with tf.name_scope('my_scope'):
with tf.Session().as_default() as session:
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], transformed_features.keys())
result_tensor = transformed_features['x_scaled']
self.assertAllEqual(session.run(result_tensor), [247.0])
def test_dense_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.placeholder(tf.float32)
# show that unrelated & unmapped placeholders do not interfere
tf.placeholder(tf.int64)
output = input_float / 5.0
inputs = {'input': input_float}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
# Using a computed input gives confidence that the graphs are fused.
input_float = tf.constant(25.0) * 2
inputs = {'input': input_float}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
result = session.run(outputs['output'])
# (25 * 2) / 5 = 10
self.assertEqual(10.0, result)
def test_table_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_string = tf.placeholder(tf.string)
# Map string through a table, in this case based on a constant tensor.
table = lookup.index_table_from_tensor(
tf.constant(['cat', 'dog', 'giraffe']))
output = table.lookup(input_string)
inputs = {'input': input_string}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
# Using a computed input gives confidence that the graphs are fused.
input_string = tf.constant('dog')
inputs = {'input': input_string}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
session.run(tf.tables_initializer())
result = session.run(outputs['output'])
self.assertEqual(1, result)
def test_sparse_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.sparse_placeholder(tf.float32)
output = input_float / 5.0
inputs = {'input': input_float}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
input_sparse = tf.SparseTensor(
indices=indices, values=values, dense_shape=shape)
# Using a computed input gives confidence that the graphs are fused
inputs = {'input': input_sparse * 10}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
output_sparse = outputs['output']
self.assertTrue(isinstance(output_sparse, tf.SparseTensor))
result = session.run(output_sparse)
# indices and shape unchanged; values divided by 2
self.assertEqual(indices.tolist(), result.indices.tolist())
self.assertEqual([2.0, 4.0], result.values.tolist())
self.assertEqual(shape.tolist(), result.dense_shape.tolist())
def test_stale_asset_collections_are_cleaned(self):
vocabulary_file = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes('asset'))
file_io.write_string_to_file(vocabulary_file, 'foo bar baz')
export_path = os.path.join(tempfile.mkdtemp(), 'export')
# create a SavedModel including assets
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_string = tf.placeholder(tf.string)
# Map string through a table loaded from an asset file
table = lookup.index_table_from_file(
vocabulary_file, num_oov_buckets=12, default_value=12)
output = table.lookup(input_string)
inputs = {'input': input_string}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
# Load it and save it again repeatedly, verifying that the asset collections
# remain valid.
for _ in [1, 2, 3]:
with tf.Graph().as_default() as g:
with tf.Session().as_default() as session:
input_string = tf.constant('dog')
inputs = {'input': input_string}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
self.assertEqual(
1, len(g.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
self.assertEqual(
0, len(g.get_collection(tf.saved_model.constants.ASSETS_KEY)))
# Check that every ASSET_FILEPATHS refers to a Tensor in the graph.
# If not, get_tensor_by_name() raises KeyError.
for asset_path in g.get_collection(ops.GraphKeys.ASSET_FILEPATHS):
tensor_name = asset_path.name
g.get_tensor_by_name(tensor_name)
export_path = os.path.join(tempfile.mkdtemp(), 'export')
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
if __name__ == '__main__':
unittest.main()
|
py | 1a3574be60fcbd6dfaab55bf65ce15791195b5b9 | # coding: utf-8
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors # NOQA
|
py | 1a3575b917de1f410da139c54f283460def9f5b9 | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import webbrowser
import pyqrcode
import requests
import json
import xml.dom.minidom
import urllib
import time
import re
import random
from requests.exceptions import ConnectionError, ReadTimeout
import HTMLParser
UNKONWN = 'unkonwn'
SUCCESS = '200'
SCANED = '201'
TIMEOUT = '408'
def show_image(file):
if sys.version_info >= (3, 3):
from shlex import quote
else:
from pipes import quote
if sys.platform == "darwin":
command = "open -a /Applications/Preview.app %s&" % quote(file)
os.system(command)
else :
webbrowser.open(file)
class WXBot:
"""WXBot, a framework to process WeChat messages"""
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.device_id = 'e' + repr(random.random())[2:17]
self.base_request = {}
self.sync_key_str = ''
self.sync_key = []
self.sync_host = ''
self.session = requests.Session()
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5'})
self.conf = {'qr': 'png'}
self.my_account = {} # this account
# all kind of accounts: contacts, public accounts, groups, special accounts
self.member_list = []
# members of all groups, {'group_id1': [member1, member2, ...], ...}
self.group_members = {}
# all accounts, {'group_member':{'id':{'type':'group_member', 'info':{}}, ...}, 'normal_member':{'id':{}, ...}}
self.account_info = {'group_member': {}, 'normal_member': {}}
self.contact_list = [] # contact list
self.public_list = [] # public account list
self.group_list = [] # group chat list
self.special_list = [] # special list account
def get_contact(self):
"""Get information of all contacts of current account."""
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' \
% (self.pass_ticket, self.skey, int(time.time()))
r = self.session.post(url, data='{}')
r.encoding = 'utf-8'
if self.DEBUG:
with open('contacts.json', 'w') as f:
f.write(r.text.encode('utf-8'))
dic = json.loads(r.text)
self.member_list = dic['MemberList']
special_users = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail',
'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle',
'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp',
'blogapp', 'facebookapp', 'masssendapp', 'meishiapp',
'feedsapp', 'voip', 'blogappweixin', 'weixin', 'brandsessionholder',
'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c',
'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11',
'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.contact_list = []
self.public_list = []
self.special_list = []
self.group_list = []
for contact in self.member_list:
if contact['VerifyFlag'] & 8 != 0: # public account
self.public_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'public', 'info': contact}
elif contact['UserName'] in special_users: # special account
self.special_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'special', 'info': contact}
elif contact['UserName'].find('@@') != -1: # group
self.group_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'group', 'info': contact}
elif contact['UserName'] == self.my_account['UserName']: # self
self.account_info['normal_member'][contact['UserName']] = {'type': 'self', 'info': contact}
pass
else:
self.contact_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'contact', 'info': contact}
self.group_members = self.batch_get_group_members()
for group in self.group_members:
for member in self.group_members[group]:
if member['UserName'] not in self.account_info:
self.account_info['group_member'][member['UserName']] = {'type': 'group_member',
'info': member,
'group': group}
if self.DEBUG:
with open('contact_list.json', 'w') as f:
f.write(json.dumps(self.contact_list))
with open('special_list.json', 'w') as f:
f.write(json.dumps(self.special_list))
with open('group_list.json', 'w') as f:
f.write(json.dumps(self.group_list))
with open('public_list.json', 'w') as f:
f.write(json.dumps(self.public_list))
with open('member_list.json', 'w') as f:
f.write(json.dumps(self.member_list))
with open('group_users.json', 'w') as f:
f.write(json.dumps(self.group_members))
with open('account_info.json', 'w') as f:
f.write(json.dumps(self.account_info))
return True
def batch_get_group_members(self):
"""Get information of accounts in all groups at once."""
url = self.base_uri + '/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.base_request,
"Count": len(self.group_list),
"List": [{"UserName": group['UserName'], "EncryChatRoomId": ""} for group in self.group_list]
}
r = self.session.post(url, data=json.dumps(params))
r.encoding = 'utf-8'
dic = json.loads(r.text)
group_members = {}
for group in dic['ContactList']:
gid = group['UserName']
members = group['MemberList']
group_members[gid] = members
return group_members
def get_group_member_name(self, gid, uid):
"""
Get name of a member in a group.
:param gid: group id
:param uid: group member id
:return: names like {"display_name": "test_user", "nickname": "test", "remark_name": "for_test" }
"""
if gid not in self.group_members:
return None
group = self.group_members[gid]
for member in group:
if member['UserName'] == uid:
names = {}
if 'RemarkName' in member and member['RemarkName']:
names['remark_name'] = member['RemarkName']
if 'NickName' in member and member['NickName']:
names['nickname'] = member['NickName']
if 'DisplayName' in member and member['DisplayName']:
names['display_name'] = member['DisplayName']
return names
return None
def get_contact_info(self, uid):
if uid in self.account_info['normal_member']:
return self.account_info['normal_member'][uid]
else:
return None
def get_group_member_info(self, uid):
if uid in self.account_info['group_member']:
return self.account_info['group_member'][uid]
else:
return None
def get_group_member_info(self, uid, gid):
if gid not in self.group_members:
return None
for member in self.group_members[gid]:
if member['UserName'] == uid:
return {'type': 'group_member', 'info': member}
return None
def get_contact_name(self, uid):
info = self.get_contact_info(uid)
if info is None:
return None
info = info['info']
name = {}
if 'RemarkName' in info and info['RemarkName']:
name['remark_name'] = info['RemarkName']
if 'NickName' in info and info['NickName']:
name['nickname'] = info['NickName']
if 'DisplayName' in info and info['DisplayName']:
name['display_name'] = info['DisplayName']
if len(name) == 0:
return None
else:
return name
def get_group_member_name(self, uid):
info = self.get_group_member_info(uid)
if info is None:
return None
info = info['info']
name = {}
if 'RemarkName' in info and info['RemarkName']:
name['remark_name'] = info['RemarkName']
if 'NickName' in info and info['NickName']:
name['nickname'] = info['NickName']
if 'DisplayName' in info and info['DisplayName']:
name['display_name'] = info['DisplayName']
if len(name) == 0:
return None
else:
return name
def get_group_member_name(self, uid, gid):
info = self.get_group_member_info(uid, gid)
if info is None:
return None
info = info['info']
name = {}
if 'RemarkName' in info and info['RemarkName']:
name['remark_name'] = info['RemarkName']
if 'NickName' in info and info['NickName']:
name['nickname'] = info['NickName']
if 'DisplayName' in info and info['DisplayName']:
name['display_name'] = info['DisplayName']
if len(name) == 0:
return None
else:
return name
@staticmethod
def get_contact_prefer_name(name):
if name is None:
return None
if 'remark_name' in name:
return name['remark_name']
if 'nickname' in name:
return name['nickname']
if 'display_name' in name:
return name['display_name']
return None
@staticmethod
def get_group_member_prefer_name(name):
if name is None:
return None
if 'remark_name' in name:
return name['remark_name']
if 'display_name' in name:
return name['display_name']
if 'nickname' in name:
return name['nickname']
return None
def get_user_type(self, wx_user_id):
"""
Get the relationship of a account and current user.
:param wx_user_id:
:return: The type of the account.
"""
for account in self.contact_list:
if wx_user_id == account['UserName']:
return 'contact'
for account in self.public_list:
if wx_user_id == account['UserName']:
return 'public'
for account in self.special_list:
if wx_user_id == account['UserName']:
return 'special'
for account in self.group_list:
if wx_user_id == account['UserName']:
return 'group'
for group in self.group_members:
for member in self.group_members[group]:
if member['UserName'] == wx_user_id:
return 'group_member'
return 'unknown'
def is_contact(self, uid):
for account in self.contact_list:
if uid == account['UserName']:
return True
return False
def is_public(self, uid):
for account in self.public_list:
if uid == account['UserName']:
return True
return False
def is_special(self, uid):
for account in self.special_list:
if uid == account['UserName']:
return True
return False
def handle_msg_all(self, msg):
"""
The function to process all WeChat messages, please override this function.
msg:
msg_id -> id of the received WeChat message
msg_type_id -> the type of the message
user -> the account that the message if sent from
content -> content of the message
:param msg: The received message.
:return: None
"""
pass
@staticmethod
def proc_at_info(msg):
if not msg:
return '', []
segs = msg.split(u'\u2005')
str_msg_all = ''
str_msg = ''
infos = []
if len(segs) > 1:
for i in range(0, len(segs)-1):
segs[i] += u'\u2005'
pm = re.search(u'@.*\u2005', segs[i]).group()
if pm:
name = pm[1:-1]
string = segs[i].replace(pm, '')
str_msg_all += string + '@' + name + ' '
str_msg += string
if string:
infos.append({'type': 'str', 'value': string})
infos.append({'type': 'at', 'value': name})
else:
infos.append({'type': 'str', 'value': segs[i]})
str_msg_all += segs[i]
str_msg += segs[i]
str_msg_all += segs[-1]
str_msg += segs[-1]
infos.append({'type': 'str', 'value': segs[-1]})
else:
infos.append({'type': 'str', 'value': segs[-1]})
str_msg_all = msg
str_msg = msg
return str_msg_all.replace(u'\u2005', ''), str_msg.replace(u'\u2005', ''), infos
def extract_msg_content(self, msg_type_id, msg):
"""
content_type_id:
0 -> Text
1 -> Location
3 -> Image
4 -> Voice
5 -> Recommend
6 -> Animation
7 -> Share
8 -> Video
9 -> VideoCall
10 -> Redraw
11 -> Empty
99 -> Unknown
:param msg_type_id: The type of the received message.
:param msg: The received message.
:return: The extracted content of the message.
"""
mtype = msg['MsgType']
content = HTMLParser.HTMLParser().unescape(msg['Content'])
msg_id = msg['MsgId']
msg_content = {}
if msg_type_id == 0:
return {'type': 11, 'data': ''}
elif msg_type_id == 2: # File Helper
return {'type': 0, 'data': content.replace('<br/>', '\n')}
elif msg_type_id == 3: # Group
sp = content.find('<br/>')
uid = content[:sp]
content = content[sp:]
content = content.replace('<br/>', '')
uid = uid[:-1]
name = self.get_contact_prefer_name(self.get_contact_name(uid))
if not name:
name = self.get_group_member_prefer_name(self.get_group_member_name(uid, msg['FromUserName']))
if not name:
name = 'unknown'
msg_content['user'] = {'id': uid, 'name': name}
else: # Self, Contact, Special, Public, Unknown
pass
msg_prefix = (msg_content['user']['name'] + ':') if 'user' in msg_content else ''
if mtype == 1:
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
r = self.session.get(content)
r.encoding = 'gbk'
data = r.text
pos = self.search_content('title', data, 'xml')
msg_content['type'] = 1
msg_content['data'] = pos
msg_content['detail'] = data
if self.DEBUG:
print ' %s[Location] %s ' % (msg_prefix, pos)
else:
msg_content['type'] = 0
if msg_type_id == 3 or (msg_type_id == 1 and msg['ToUserName'][:2] == '@@'): # Group text message
msg_infos = self.proc_at_info(content)
str_msg_all = msg_infos[0]
str_msg = msg_infos[1]
detail = msg_infos[2]
msg_content['data'] = str_msg_all
msg_content['detail'] = detail
msg_content['desc'] = str_msg
else:
msg_content['data'] = content
if self.DEBUG:
try:
print ' %s[Text] %s' % (msg_prefix, msg_content['data'])
except UnicodeEncodeError:
print ' %s[Text] (illegal text).' % msg_prefix
elif mtype == 3:
msg_content['type'] = 3
msg_content['data'] = self.get_msg_img_url(msg_id)
if self.DEBUG:
image = self.get_msg_img(msg_id)
print ' %s[Image] %s' % (msg_prefix, image)
elif mtype == 34:
msg_content['type'] = 4
msg_content['data'] = self.get_voice_url(msg_id)
if self.DEBUG:
voice = self.get_voice(msg_id)
print ' %s[Voice] %s' % (msg_prefix, voice)
elif mtype == 42:
msg_content['type'] = 5
info = msg['RecommendInfo']
msg_content['data'] = {'nickname': info['NickName'],
'alias': info['Alias'],
'province': info['Province'],
'city': info['City'],
'gender': ['unknown', 'male', 'female'][info['Sex']]}
if self.DEBUG:
print ' %s[Recommend]' % msg_prefix
print ' -----------------------------'
print ' | NickName: %s' % info['NickName']
print ' | Alias: %s' % info['Alias']
print ' | Local: %s %s' % (info['Province'], info['City'])
print ' | Gender: %s' % ['unknown', 'male', 'female'][info['Sex']]
print ' -----------------------------'
elif mtype == 47:
msg_content['type'] = 6
msg_content['data'] = self.search_content('cdnurl', content)
if self.DEBUG:
print ' %s[Animation] %s' % (msg_prefix, msg_content['data'])
elif mtype == 49:
msg_content['type'] = 7
app_msg_type = ''
if msg['AppMsgType'] == 3:
app_msg_type = 'music'
elif msg['AppMsgType'] == 5:
app_msg_type = 'link'
elif msg['AppMsgType'] == 7:
app_msg_type = 'weibo'
else:
app_msg_type = 'unknown'
msg_content['data'] = {'type': app_msg_type,
'title': msg['FileName'],
'desc': self.search_content('des', content, 'xml'),
'url': msg['Url'],
'from': self.search_content('appname', content, 'xml')}
if self.DEBUG:
print ' %s[Share] %s' % (msg_prefix, app_msg_type)
print ' --------------------------'
print ' | title: %s' % msg['FileName']
print ' | desc: %s' % self.search_content('des', content, 'xml')
print ' | link: %s' % msg['Url']
print ' | from: %s' % self.search_content('appname', content, 'xml')
print ' --------------------------'
elif mtype == 62:
msg_content['type'] = 8
msg_content['data'] = content
if self.DEBUG:
print ' %s[Video] Please check on mobiles' % msg_prefix
elif mtype == 53:
msg_content['type'] = 9
msg_content['data'] = content
if self.DEBUG:
print ' %s[Video Call]' % msg_prefix
elif mtype == 10002:
msg_content['type'] = 10
msg_content['data'] = content
if self.DEBUG:
print ' %s[Redraw]' % msg_prefix
elif mtype == 10000: # unknown, maybe red packet, or group invite
msg_content['type'] = 12
msg_content['data'] = msg['Content']
if self.DEBUG:
print ' [Unknown]'
else:
msg_content['type'] = 99
msg_content['data'] = content
if self.DEBUG:
print ' %s[Unknown]' % msg_prefix
return msg_content
def handle_msg(self, r):
"""
The inner function that processes raw WeChat messages.
msg_type_id:
0 -> Init
1 -> Self
2 -> FileHelper
3 -> Group
4 -> Contact
5 -> Public
6 -> Special
99 -> Unknown
:param r: The raw data of the messages.
:return: None
"""
for msg in r['AddMsgList']:
msg_type_id = 99
user = {'id': msg['FromUserName'], 'name': 'unknown'}
if msg['MsgType'] == 51: # init message
msg_type_id = 0
user['name'] = 'system'
elif msg['FromUserName'] == self.my_account['UserName']: # Self
msg_type_id = 1
user['name'] = 'self'
elif msg['ToUserName'] == 'filehelper': # File Helper
msg_type_id = 2
user['name'] = 'file_helper'
elif msg['FromUserName'][:2] == '@@': # Group
msg_type_id = 3
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
elif self.is_contact(msg['FromUserName']): # Contact
msg_type_id = 4
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
elif self.is_public(msg['FromUserName']): # Public
msg_type_id = 5
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
elif self.is_special(msg['FromUserName']): # Special
msg_type_id = 6
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
else:
msg_type_id = 99
user['name'] = 'unknown'
if not user['name']:
user['name'] = 'unknown'
user['name'] = HTMLParser.HTMLParser().unescape(user['name'])
if self.DEBUG and msg_type_id != 0:
print '[MSG] %s:' % user['name']
content = self.extract_msg_content(msg_type_id, msg)
message = {'msg_type_id': msg_type_id,
'msg_id': msg['MsgId'],
'content': content,
'to_user_id': msg['ToUserName'],
'user': user}
self.handle_msg_all(message)
def schedule(self):
"""
The function to do schedule works.
This function will be called a lot of times.
Please override this if needed.
:return: None
"""
pass
def proc_msg(self):
self.test_sync_check()
while True:
check_time = time.time()
[retcode, selector] = self.sync_check()
if retcode == '1100': # logout from mobile
break
elif retcode == '1101': # login web WeChat from other devide
break
elif retcode == '0':
if selector == '2': # new message
r = self.sync()
if r is not None:
self.handle_msg(r)
elif selector == '7': # Play WeChat on mobile
r = self.sync()
if r is not None:
self.handle_msg(r)
elif selector == '0': # nothing
pass
else:
pass
self.schedule()
check_time = time.time() - check_time
if check_time < 0.8:
time.sleep(1 - check_time)
def send_msg_by_uid(self, word, dst='filehelper'):
url = self.base_uri + '/webwxsendmsg?pass_ticket=%s' % self.pass_ticket
msg_id = str(int(time.time() * 1000)) + str(random.random())[:5].replace('.', '')
if type(word) == 'str':
word = word.decode('utf-8')
params = {
'BaseRequest': self.base_request,
'Msg': {
"Type": 1,
"Content": word,
"FromUserName": self.my_account['UserName'],
"ToUserName": dst,
"LocalID": msg_id,
"ClientMsgId": msg_id
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def get_user_id(self, name):
if name == '':
return ''
for contact in self.contact_list:
if 'RemarkName' in contact and contact['RemarkName'] == name:
return contact['UserName']
elif 'NickName' in contact and contact['NickName'] == name:
return contact['UserName']
elif 'DisplayName' in contact and contact['DisplayName'] == name:
return contact['UserName']
return ''
def send_msg(self, name, word, isfile=False):
uid = self.get_user_id(name)
if uid:
if isfile:
with open(word, 'r') as f:
result = True
for line in f.readlines():
line = line.replace('\n', '')
print '-> ' + name + ': ' + line
if self.send_msg_by_uid(line, uid):
pass
else:
result = False
time.sleep(1)
return result
else:
if self.send_msg_by_uid(word, uid):
return True
else:
return False
else:
if self.DEBUG:
print '[ERROR] This user does not exist .'
return True
@staticmethod
def search_content(key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if pm:
return pm.group(1)
return 'unknown'
def run(self):
self.get_uuid()
self.gen_qr_code('qr.png')
print '[INFO] Please use WeChat to scan the QR code .'
result = self.wait4login()
if result != SUCCESS:
print '[ERROR] Web WeChat login failed. failed code=%s'%(result, )
return
if self.login():
print '[INFO] Web WeChat login succeed .'
else:
print '[ERROR] Web WeChat login failed .'
return
if self.init():
print '[INFO] Web WeChat init succeed .'
else:
print '[INFO] Web WeChat init failed'
return
self.status_notify()
self.get_contact()
print '[INFO] Get %d contacts' % len(self.contact_list)
print '[INFO] Start to process messages .'
self.proc_msg()
def get_uuid(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': 'wx782c26e4c19acffb',
'fun': 'new',
'lang': 'zh_CN',
'_': int(time.time()) * 1000 + random.randint(1, 999),
}
r = self.session.get(url, params=params)
r.encoding = 'utf-8'
data = r.text
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def gen_qr_code(self, qr_file_path):
string = 'https://login.weixin.qq.com/l/' + self.uuid
qr = pyqrcode.create(string)
if self.conf['qr'] == 'png':
qr.png(qr_file_path, scale=8)
show_image(qr_file_path)
# img = Image.open(qr_file_path)
# img.show()
elif self.conf['qr'] == 'tty':
print(qr.terminal(quiet_zone=1))
def do_request(self, url):
r = self.session.get(url)
r.encoding = 'utf-8'
data = r.text
param = re.search(r'window.code=(\d+);', data)
code = param.group(1)
return code, data
def wait4login(self):
'''
http comet:
tip=1, the request wait for user to scan the qr,
201: scaned
408: timeout
tip=0, the request wait for user confirm,
200: confirmed
'''
LOGIN_TEMPLATE = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s'
tip = 1
try_later_secs = 1
MAX_RETRY_TIMES = 10
code = UNKONWN
retry_time = MAX_RETRY_TIMES
while retry_time > 0:
url = LOGIN_TEMPLATE % (tip, self.uuid, int(time.time()))
code, data = self.do_request(url)
if code == SCANED:
print '[INFO] Please confirm to login .'
tip = 0
elif code == SUCCESS: #confirmed sucess
param = re.search(r'window.redirect_uri="(\S+?)";', data)
redirect_uri = param.group(1) + '&fun=new'
self.redirect_uri = redirect_uri
self.base_uri = redirect_uri[:redirect_uri.rfind('/')]
return code
elif code == TIMEOUT:
print '[ERROR] WeChat login timeout. retry in %s secs later...'%(try_later_secs, )
tip = 1 #need to reset tip, because the server will reset the peer connection
retry_time -= 1
time.sleep(try_later_secs)
else:
print ('[ERROR] WeChat login exception return_code=%s. retry in %s secs later...' %
(code, try_later_secs))
tip = 1
retry_time -= 1
time.sleep(try_later_secs)
return code
def login(self):
if len(self.redirect_uri) < 4:
print '[ERROR] Login failed due to network problem, please try again.'
return False
r = self.session.get(self.redirect_uri)
r.encoding = 'utf-8'
data = r.text
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.base_request = {
'Uin': self.uin,
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.device_id,
}
return True
def init(self):
url = self.base_uri + '/webwxinit?r=%i&lang=en_US&pass_ticket=%s' % (int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.base_request
}
r = self.session.post(url, data=json.dumps(params))
r.encoding = 'utf-8'
dic = json.loads(r.text)
self.sync_key = dic['SyncKey']
self.my_account = dic['User']
self.sync_key_str = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val'])
for keyVal in self.sync_key['List']])
return dic['BaseResponse']['Ret'] == 0
def status_notify(self):
url = self.base_uri + '/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % self.pass_ticket
self.base_request['Uin'] = int(self.base_request['Uin'])
params = {
'BaseRequest': self.base_request,
"Code": 3,
"FromUserName": self.my_account['UserName'],
"ToUserName": self.my_account['UserName'],
"ClientMsgId": int(time.time())
}
r = self.session.post(url, data=json.dumps(params))
r.encoding = 'utf-8'
dic = json.loads(r.text)
return dic['BaseResponse']['Ret'] == 0
def test_sync_check(self):
for host in ['webpush', 'webpush2']:
self.sync_host = host
retcode = self.sync_check()[0]
if retcode == '0':
return True
return False
def sync_check(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.device_id,
'synckey': self.sync_key_str,
'_': int(time.time()),
}
url = 'https://' + self.sync_host + '.weixin.qq.com/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
try:
r = self.session.get(url, timeout=60)
except (ConnectionError, ReadTimeout):
return [-1, -1]
r.encoding = 'utf-8'
data = r.text
pm = re.search(r'window.synccheck=\{retcode:"(\d+)",selector:"(\d+)"\}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def sync(self):
url = self.base_uri + '/webwxsync?sid=%s&skey=%s&lang=en_US&pass_ticket=%s' \
% (self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.base_request,
'SyncKey': self.sync_key,
'rr': ~int(time.time())
}
try:
r = self.session.post(url, data=json.dumps(params), timeout=60)
except (ConnectionError, ReadTimeout):
return None
r.encoding = 'utf-8'
dic = json.loads(r.text)
if dic['BaseResponse']['Ret'] == 0:
self.sync_key = dic['SyncKey']
self.sync_key_str = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val'])
for keyVal in self.sync_key['List']])
return dic
def get_icon(self, uid):
url = self.base_uri + '/webwxgeticon?username=%s&skey=%s' % (uid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'img_' + uid + '.jpg'
with open(fn, 'wb') as f:
f.write(data)
return fn
def get_head_img(self, uid):
url = self.base_uri + '/webwxgetheadimg?username=%s&skey=%s' % (uid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'img_' + uid + '.jpg'
with open(fn, 'wb') as f:
f.write(data)
return fn
def get_msg_img_url(self, msgid):
return self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
def get_msg_img(self, msgid):
url = self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'img_' + msgid + '.jpg'
with open(fn, 'wb') as f:
f.write(data)
return fn
def get_voice_url(self, msgid):
return self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
def get_voice(self, msgid):
url = self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'voice_' + msgid + '.mp3'
with open(fn, 'wb') as f:
f.write(data)
return fn
|
py | 1a3575eba8a86423be4cce4fde0a8b51b3a98e45 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job template.
required: True
type: str
description:
description:
- Description to use for the job template.
type: str
job_type:
description:
- The job type to use for the job template.
required: True
choices: ["run", "check", "scan"]
type: str
inventory:
description:
- Name of the inventory to use for the job template.
type: str
project:
description:
- Name of the project to use for the job template.
required: True
type: str
playbook:
description:
- Path to the playbook to use for the job template within the project provided.
required: True
type: str
credential:
description:
- Name of the credential to use for the job template.
version_added: 2.7
type: str
vault_credential:
description:
- Name of the vault credential to use for the job template.
version_added: 2.7
type: str
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
type: int
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
type: str
verbosity:
description:
- Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug.
choices: [0, 1, 2, 3, 4]
default: 0
type: int
extra_vars_path:
description:
- Path to the C(extra_vars) YAML file.
type: path
job_tags:
description:
- Comma separated list of the tags to use for the job template.
type: str
force_handlers_enabled:
description:
- Enable forcing playbook handlers to run even if a task fails.
version_added: 2.7
type: bool
default: 'no'
skip_tags:
description:
- Comma separated list of the tags to skip for the job template.
type: str
start_at_task:
description:
- Start the playbook at the task matching this name.
version_added: 2.7
type: str
diff_mode_enabled:
description:
- Enable diff mode for the job template.
version_added: 2.7
type: bool
default: 'no'
fact_caching_enabled:
description:
- Enable use of fact caching for the job template.
version_added: 2.7
type: bool
default: 'no'
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
type: str
ask_diff_mode:
description:
- Prompt user to enable diff mode (show changes) to files when supported by modules.
version_added: 2.7
type: bool
default: 'no'
ask_extra_vars:
description:
- Prompt user for (extra_vars) on launch.
type: bool
default: 'no'
ask_limit:
description:
- Prompt user for a limit on launch.
version_added: 2.7
type: bool
default: 'no'
ask_tags:
description:
- Prompt user for job tags on launch.
type: bool
default: 'no'
ask_skip_tags:
description:
- Prompt user for job tags to skip on launch.
version_added: 2.7
type: bool
default: 'no'
ask_job_type:
description:
- Prompt user for job type on launch.
type: bool
default: 'no'
ask_verbosity:
description:
- Prompt user to choose a verbosity level on launch.
version_added: 2.7
type: bool
default: 'no'
ask_inventory:
description:
- Prompt user for inventory on launch.
type: bool
default: 'no'
ask_credential:
description:
- Prompt user for credential on launch.
type: bool
default: 'no'
survey_enabled:
description:
- Enable a survey on the job template.
version_added: 2.7
type: bool
default: 'no'
survey_spec:
description:
- JSON/YAML dict formatted survey definition.
version_added: 2.8
type: dict
required: False
become_enabled:
description:
- Activate privilege escalation.
type: bool
default: 'no'
concurrent_jobs_enabled:
description:
- Allow simultaneous runs of the job template.
version_added: 2.7
type: bool
default: 'no'
timeout:
description:
- Maximum time in seconds to wait for a job to finish (server-side).
type: int
custom_virtualenv:
version_added: "2.9"
description:
- Local absolute file path containing a custom Python virtualenv to use.
type: str
required: False
default: ''
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
type: str
extends_documentation_fragment: awx.awx.auth
notes:
- JSON for survey_spec can be found in Tower API Documentation. See
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create)
for POST operation payload example.
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: "Ping"
job_type: "run"
inventory: "Local"
project: "Demo"
playbook: "ping.yml"
credential: "Local"
state: "present"
tower_config_file: "~/tower_cli.cfg"
survey_enabled: yes
survey_spec: "{{ lookup('file', 'my_survey.json') }}"
custom_virtualenv: "/var/lib/awx/venv/custom-venv/"
'''
from ..module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'fact_caching_enabled': 'use_fact_cache',
'ask_diff_mode': 'ask_diff_mode_on_launch',
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_skip_tags': 'ask_skip_tags_on_launch',
'ask_verbosity': 'ask_verbosity_on_launch',
'ask_inventory': 'ask_inventory_on_launch',
'ask_credential': 'ask_credential_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'diff_mode_enabled': 'diff_mode',
'concurrent_jobs_enabled': 'allow_simultaneous',
'force_handlers_enabled': 'force_handlers',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars_path')
if extra_vars is not None:
params_update['extra_vars'] = ['@' + extra_vars]
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'credential': 'name',
'vault_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
elif k in params:
# unset empty parameters to avoid ValueError: invalid literal for int() with base 10: ''
del(params[k])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(default=''),
job_type=dict(choices=['run', 'check', 'scan'], required=True),
inventory=dict(default=''),
project=dict(required=True),
playbook=dict(required=True),
credential=dict(default=''),
vault_credential=dict(default=''),
custom_virtualenv=dict(type='str', required=False),
forks=dict(type='int'),
limit=dict(default=''),
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4], default=0),
extra_vars_path=dict(type='path', required=False),
job_tags=dict(default=''),
force_handlers_enabled=dict(type='bool', default=False),
skip_tags=dict(default=''),
start_at_task=dict(default=''),
timeout=dict(type='int', default=0),
fact_caching_enabled=dict(type='bool', default=False),
host_config_key=dict(default=''),
ask_diff_mode=dict(type='bool', default=False),
ask_extra_vars=dict(type='bool', default=False),
ask_limit=dict(type='bool', default=False),
ask_tags=dict(type='bool', default=False),
ask_skip_tags=dict(type='bool', default=False),
ask_job_type=dict(type='bool', default=False),
ask_verbosity=dict(type='bool', default=False),
ask_inventory=dict(type='bool', default=False),
ask_credential=dict(type='bool', default=False),
survey_enabled=dict(type='bool', default=False),
survey_spec=dict(type='dict', required=False),
become_enabled=dict(type='bool', default=False),
diff_mode_enabled=dict(type='bool', default=False),
concurrent_jobs_enabled=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
state = module.params.pop('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
py | 1a35762951a1c68a833f6f4ff3ce8fd76d13ed10 | import copy
import numpy as np
import torch
from .utils.utils import get_optimizer_fn
from .utils.schedule import (
PeriodicSchedule,
get_schedule,
)
from .agent import Agent
from .dqn import legal_moves_adapter
from .mlp import DistributionalMLP, ComplexMLP
class RainbowDQNAgent(Agent):
"""An agent implementing the DQN algorithm. Uses an epsilon greedy
exploration policy
"""
def __init__(
self,
*,
obs_dim,
act_dim,
v_min=0,
v_max=200,
atoms=51,
optimizer_fn=None,
id=0,
discount_rate=0.99,
grad_clip=None,
target_net_soft_update=False,
target_net_update_fraction=0.05,
target_net_update_schedule=None,
epsilon_schedule=None,
learn_schedule=None,
lr_schedule=None,
seed=42,
device="cpu",
double=True,
dueling=True,
noisy=True,
distributional=True,
max_replay_buffer_size=50000,
):
"""
Args:
qnet: A network that outputs the q-values of the different actions
for an input observation.
obs_dim: The dimension of the observations.
act_dim: The number of actions available to the agent.
v_min: minimum possible value of the value function
v_max: maximum possible value of the value function
atoms: number of atoms in the distributional DQN context
optimizer_fn: A function that takes in a list of parameters to optimize
and returns the optimizer.
id: ID used to create the timescale in the logger for the agent.
replay_buffer: The replay buffer that the agent will push observations
to and sample from during learning.
discount_rate (float): A number between 0 and 1 specifying how much
future rewards are discounted by the agent.
grad_clip (float): Gradients will be clipped to between
[-grad_clip, gradclip]
target_net_soft_update (bool): Whether the target net parameters are
replaced by the qnet parameters completely or using a weighted
average of the target net parameters and the qnet parameters.
target_net_update_fraction (float): The weight given to the target
net parameters in a soft update.
target_net_update_schedule: Schedule determining how frequently the
target net is updated.
epsilon_schedule: Schedule determining the value of epsilon through
the course of training.
learn_schedule: Schedule determining when the learning process actually
starts.
seed: Seed for numpy random number generator.
batch_size (int): The size of the batch sampled from the replay buffer
during learning.
device: Device on which all computations should be run.
double: whether or not to use the double feature (from double DQN)
distributional: whether or not to use the distributional feature (from distributional DQN)
"""
super().__init__(
id=id,
seed=seed,
obs_dim=obs_dim,
act_dim=act_dim,
learn_schedule=learn_schedule,
epsilon_schedule=epsilon_schedule,
lr_schedule=lr_schedule,
max_replay_buffer_size=max_replay_buffer_size,
)
self._params["double"] = double
self._params["dueling"] = dueling
self._params["noisy"] = noisy
self._params["distributional"] = distributional
self._params["discount_rate"] = discount_rate
self._params["grad_clip"] = grad_clip
self._params["target_net_soft_update"] = target_net_soft_update
self._params["target_net_update_fraction"] = target_net_update_fraction
self._device = torch.device(device)
# qnet = {}
# qnet['kwargs'] = {}
if self._params["distributional"]:
self._params["atoms"] = atoms
self._params["v_min"] = v_min
self._params["v_max"] = v_max
self._supports = torch.linspace(self._params["v_min"], self._params["v_max"], self._params["atoms"]).to(
self._device
)
# qnet["kwargs"]["supports"] = self._supports
self._delta = float(self._params["v_max"] - self._params["v_min"]) / (self._params["atoms"] - 1)
self._nsteps = 1
if self._params["distributional"]:
self._qnet = legal_moves_adapter(DistributionalMLP)(
self._params["obs_dim"],
self._params["act_dim"],
self._supports,
hidden_units=256,
num_hidden_layers=2,
noisy=False,
dueling=True,
sigma_init=0.5,
atoms=atoms,
).to(self._device)
else:
self._qnet = legal_moves_adapter(ComplexMLP)(
self._params["obs_dim"],
self._params["act_dim"],
hidden_units=256,
num_hidden_layers=2,
noisy=self._params["noisy"],
dueling=self._params["dueling"],
sigma_init=0.4,
atoms=1,
).to(self._device)
self._target_qnet = copy.deepcopy(self._qnet).requires_grad_(False)
optimizer_fn = get_optimizer_fn(optimizer_fn)
if optimizer_fn is None:
optimizer_fn = torch.optim.Adam
self._optimizer = optimizer_fn(self._qnet.parameters())
self._loss_fn = torch.nn.SmoothL1Loss()
self._id = id
self._target_net_update_schedule = get_schedule(target_net_update_schedule)
if self._target_net_update_schedule is None:
self._target_net_update_schedule = PeriodicSchedule(False, True, 10000)
self._state = {"episode_start": True}
self._training = True
def projection_distribution(self, batch):
batch_obs = batch["observations"]
batch_next_obs = batch["next_observations"]
batch_reward = batch["rewards"].reshape(-1, 1).to(self._device)
batch_not_done = 1 - batch["done"].reshape(-1, 1).to(self._device)
with torch.no_grad():
next_action = self._target_qnet(batch_next_obs).argmax(1)
next_dist = self._target_qnet.dist(batch_next_obs)
next_dist = next_dist[range(batch["observations"].shape[0]), next_action]
t_z = batch_reward + batch_not_done * self._params["discount_rate"] * self._supports
t_z = t_z.clamp(min=self._params["v_min"], max=self._params["v_max"])
b = (t_z - self._params["v_min"]) / self._delta
l = b.floor().long()
u = b.ceil().long()
l[(u > 0) * (l == u)] -= 1
u[(l < (self._params["atoms"] - 1)) * (l == u)] += 1
offset = (
torch.linspace(0, (batch_obs.shape[0] - 1) * self._params["atoms"], batch_obs.shape[0])
.long()
.unsqueeze(1)
.expand(batch_obs.shape[0], self._params["atoms"])
.to(self._device)
)
proj_dist = torch.zeros(next_dist.size(), device=self._device)
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))
return proj_dist
def train(self):
"""Changes the agent to training mode."""
super().train()
self._qnet.train()
self._target_qnet.train()
def eval(self):
"""Changes the agent to evaluation mode."""
super().eval()
self._qnet.eval()
self._target_qnet.eval()
@torch.no_grad()
def act(self, observation, formatted_legal_moves, update_schedule=True):
self.eval()
observation = torch.tensor(observation).to(self._device).float()
formatted_legal_moves = torch.tensor(formatted_legal_moves).to(self._device).float()
observation = torch.tensor(np.expand_dims(observation, axis=0)).to(self._device).float()
# if not self._params["distributional"]:
epsilon = self.get_epsilon_schedule(update_schedule)
if self._rng.random() < epsilon:
legal_moves = torch.nonzero(formatted_legal_moves == 0).view(-1).cpu().numpy()
action = self._rng.choice(legal_moves)
else:
a = self._qnet(observation, legal_moves=formatted_legal_moves).cpu()
action = torch.argmax(a).numpy()
return action
def learn(self, batch, update_schedule=True):
info = {}
self.train()
info["lr"] = self._lr_schedule.update()
for grp in self._optimizer.param_groups:
grp["lr"] = info["lr"]
# do not modify batch in-place
batch = {key: torch.tensor(value).to(self._device) for key, value in batch.items()}
# Compute predicted Q values
self._optimizer.zero_grad()
pred_qvals = self._qnet(batch["observations"], legal_moves=batch["legal_moves_as_int"])
actions = batch["actions"].long()
if self._params["distributional"]:
# todo: need legal moves??
current_dist = self._qnet.dist(batch["observations"])
log_p = torch.log(current_dist[range(batch["observations"].shape[0]), actions])
target_prob = self.projection_distribution(batch)
loss = -(target_prob * log_p).sum(1)
loss = loss.mean()
else:
pred_qvals = pred_qvals[torch.arange(pred_qvals.size(0)), actions]
# Compute 1-step Q targets
if self._params["double"]:
next_action = self._qnet(batch["next_observations"], legal_moves=batch["legal_moves_as_int"])
else:
next_action = self._target_qnet(batch["next_observations"], legal_moves=batch["legal_moves_as_int"])
_, next_action = torch.max(next_action, dim=1)
next_qvals = self._target_qnet(batch["next_observations"])
next_qvals = next_qvals[torch.arange(next_qvals.size(0)), next_action]
q_targets = batch["rewards"] + self._params["discount_rate"] * next_qvals * (1 - batch["done"])
loss = self._loss_fn(pred_qvals, q_targets)
if self._training:
loss.backward()
if self._params["grad_clip"] is not None:
torch.nn.utils.clip_grad_value_(self._qnet.parameters(), self._params["grad_clip"])
self._optimizer.step()
# Update target network
if self._training and self._target_net_update_schedule.update():
self._update_target()
if update_schedule:
self.get_epsilon_schedule(update_schedule)
# Return loss
info["loss"] = loss.item()
return info
def _update_target(self):
if self._params["target_net_soft_update"]:
target_params = self._target_qnet.state_dict()
current_params = self._qnet.state_dict()
for key in list(target_params.keys()):
target_params[key] = (1 - self._params["target_net_update_fraction"]) * target_params[
key
] + self._params["target_net_update_fraction"] * current_params[key]
self._target_qnet.load_state_dict(target_params)
else:
self._target_qnet.load_state_dict(self._qnet.state_dict())
def save(self, f):
torch.save(
{
"id": self._id,
"params": self._params,
"qnet": self._qnet.state_dict(),
"target_qnet": self._target_qnet.state_dict(),
"optimizer": self._optimizer.state_dict(),
"learn_schedule": self._learn_schedule,
"epsilon_schedule": self._epsilon_schedule,
"target_net_update_schedule": self._target_net_update_schedule,
"rng": self._rng,
"lr_schedule": self._lr_schedule,
},
f,
)
def load(self, f):
super().load(f)
checkpoint = torch.load(f)
self._id = checkpoint["id"]
self._params = checkpoint["params"]
self._qnet.load_state_dict(checkpoint["qnet"])
self._target_qnet.load_state_dict(checkpoint["target_qnet"])
self._optimizer.load_state_dict(checkpoint["optimizer"])
self._learn_schedule = checkpoint["learn_schedule"]
self._epsilon_schedule = checkpoint["epsilon_schedule"]
self._target_net_update_schedule = checkpoint["target_net_update_schedule"]
self._rng = checkpoint["rng"]
self._lr_schedule = checkpoint["lr_schedule"]
|
py | 1a3576c38a4372623c6649d719600ee9507b359e | import os
import subprocess
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from joblib import Parallel, delayed
def main():
file_sra, in_dir, out_dir, n_j = getArgs()
sra_list = loadAccessions(file_sra)
Parallel(n_jobs=n_j, prefer="threads")(
delayed(runAriba)(sra, in_dir, out_dir) for sra in sra_list
)
def loadAccessions(file_sra):
"""
Loads in list of SRA accession numbers from file_sra
There is no jason model in ariba docker, otherwise, we can use:
sra_list = json.load(open(file_sra, "r"))
"""
sra_list = []
text = open(file_sra).read()
tmp = text.split("\n")
sra_list = [k[(k.index('"') + 1) : k.index('"', 8)] for k in tmp[1:-1]]
return sra_list
def getArgs():
parser = ArgumentParser(
formatter_class=RawTextHelpFormatter,
prog="runAribaInLoop_withBam.py",
description="Run Ariba for isolates to output variant report files and intermediate results",
)
parser.add_argument("-f", "--fSRAs", dest="fileSRAs")
parser.add_argument("-i", "--iDir", dest="inDir")
parser.add_argument("-o", "--oDir", dest="outDir")
parser.add_argument("-n", "--n_jobs", dest="nJobs")
args = parser.parse_args()
f_sra = args.fileSRAs
i_dir = args.inDir
o_dir = args.outDir
n_job = args.nJobs
return f_sra, i_dir, o_dir, n_job
def runAriba(sra, in_dir, out_dir):
# print (sra)
fastq_dir = in_dir + "/"
reads1 = fastq_dir + sra + "_1.fastq"
reads2 = fastq_dir + sra + "_2.fastq"
if os.path.isfile(reads1) and os.path.isfile(reads2):
out_dir = out_dir + "/outRun_" + sra
if not (os.path.isfile(out_dir + "/report.tsv")):
if os.path.isdir(out_dir):
subprocess.run(["rm", "-r", out_dir])
cmd = [
"ariba",
"run",
"--noclean",
"out.card.prepareref",
reads1,
reads2,
out_dir,
]
with open("./aribaRunLog.txt", "a+") as f:
subprocess.call(cmd, stdout=f)
else:
print("UGH! invalid path " + reads1 + " or " + reads2)
with open("./sra_paired_read_notFound.txt", "a+") as l:
l.write(sra + "\n")
if __name__ == "__main__":
main()
|
py | 1a35783c32729c16c315e6d77906217727a64e32 | import math
import torch.nn as nn
from mmcv.runner import ModuleList
from mmocr.models.builder import ENCODERS
from mmocr.models.textrecog.layers import (Adaptive2DPositionalEncoding,
SatrnEncoderLayer)
from .base_encoder import BaseEncoder
@ENCODERS.register_module()
class SatrnEncoder(BaseEncoder):
"""Implement encoder for SATRN, see `SATRN.
<https://arxiv.org/abs/1910.04396>`_.
"""
def __init__(self,
n_layers=12,
n_head=8,
d_k=64,
d_v=64,
d_model=512,
n_position=100,
d_inner=256,
dropout=0.1,
init_cfg=None,
**kwargs):
super().__init__(init_cfg=init_cfg)
self.d_model = d_model
self.position_enc = Adaptive2DPositionalEncoding(
d_hid=d_model,
n_height=n_position,
n_width=n_position,
dropout=dropout)
self.layer_stack = ModuleList([
SatrnEncoderLayer(
d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)
])
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, feat, img_metas=None):
valid_ratios = [1.0 for _ in range(feat.size(0))]
if img_metas is not None:
valid_ratios = [
img_meta.get('valid_ratio', 1.0) for img_meta in img_metas
]
feat += self.position_enc(feat)
n, c, h, w = feat.size()
mask = feat.new_zeros((n, h, w))
for i, valid_ratio in enumerate(valid_ratios):
valid_width = min(w, math.ceil(w * valid_ratio))
mask[i, :, :valid_width] = 1
mask = mask.view(n, h * w)
feat = feat.view(n, c, h * w)
output = feat.permute(0, 2, 1).contiguous()
for enc_layer in self.layer_stack:
output = enc_layer(output, h, w, mask)
output = self.layer_norm(output)
output = output.permute(0, 2, 1).contiguous()
output = output.view(n, self.d_model, h, w)
return output
|
py | 1a3578a56a4bccb214d3e2c35a83b6e6b51851e2 | """
Basis Theory API
## Getting Started * Sign-in to [Basis Theory](https://basistheory.com) and go to [Applications](https://portal.basistheory.com/applications) * Create a Basis Theory Server to Server Application * All permissions should be selected * Paste the API Key into the `BT-API-KEY` variable # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from basistheory.api_client import ApiClient, Endpoint as _Endpoint
from basistheory.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
set_request_options
)
from basistheory.model.create_tenant_invitation_request import CreateTenantInvitationRequest
from basistheory.model.problem_details import ProblemDetails
from basistheory.model.tenant import Tenant
from basistheory.model.tenant_invitation_response import TenantInvitationResponse
from basistheory.model.tenant_invitation_response_paginated_list import TenantInvitationResponsePaginatedList
from basistheory.model.tenant_invitation_status import TenantInvitationStatus
from basistheory.model.tenant_member_response_paginated_list import TenantMemberResponsePaginatedList
from basistheory.model.tenant_usage_report import TenantUsageReport
from basistheory.model.update_tenant_request import UpdateTenantRequest
from basistheory.model.validation_problem_details import ValidationProblemDetails
class TenantsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_invitation_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponse,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations',
'operation_id': 'create_invitation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_tenant_invitation_request',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_tenant_invitation_request':
(CreateTenantInvitationRequest,),
},
'attribute_map': {
},
'location_map': {
'create_tenant_invitation_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_invitation_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations/{invitationId}',
'operation_id': 'delete_invitation',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'invitation_id',
'request_options'
],
'required': [
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'invitation_id':
(str,),
},
'attribute_map': {
'invitation_id': 'invitationId',
},
'location_map': {
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_member_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/members/{memberId}',
'operation_id': 'delete_member',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'member_id',
'request_options'
],
'required': [
'member_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'member_id':
(str,),
},
'attribute_map': {
'member_id': 'memberId',
},
'location_map': {
'member_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_endpoint = _Endpoint(
settings={
'response_type': (Tenant,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_invitations_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponsePaginatedList,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations',
'operation_id': 'get_invitations',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'status',
'page',
'size',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'status':
(TenantInvitationStatus,),
'page':
(int,),
'size':
(int,),
},
'attribute_map': {
'status': 'status',
'page': 'page',
'size': 'size',
},
'location_map': {
'status': 'query',
'page': 'query',
'size': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_members_endpoint = _Endpoint(
settings={
'response_type': (TenantMemberResponsePaginatedList,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/members',
'operation_id': 'get_members',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'page',
'size',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'user_id':
([str],),
'page':
(int,),
'size':
(int,),
},
'attribute_map': {
'user_id': 'user_id',
'page': 'page',
'size': 'size',
},
'location_map': {
'user_id': 'query',
'page': 'query',
'size': 'query',
},
'collection_format_map': {
'user_id': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_tenant_operation_report_endpoint = _Endpoint(
settings={
'response_type': (TenantUsageReport,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/reports/operations',
'operation_id': 'get_tenant_operation_report',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_tenant_usage_report_endpoint = _Endpoint(
settings={
'response_type': (TenantUsageReport,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/reports/usage',
'operation_id': 'get_tenant_usage_report',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.resend_invitation_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponse,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations/{invitationId}/resend',
'operation_id': 'resend_invitation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'invitation_id',
'request_options'
],
'required': [
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'invitation_id':
(str,),
},
'attribute_map': {
'invitation_id': 'invitationId',
},
'location_map': {
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_endpoint = _Endpoint(
settings={
'response_type': (Tenant,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'update',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'update_tenant_request',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'update_tenant_request':
(UpdateTenantRequest,),
},
'attribute_map': {
},
'location_map': {
'update_tenant_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_invitation(
self,
**kwargs
):
"""create_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_invitation(async_req=True)
>>> result = thread.get()
Keyword Args:
create_tenant_invitation_request (CreateTenantInvitationRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.create_invitation_endpoint.call_with_http_info(**kwargs)
def delete(
self,
**kwargs
):
"""delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.delete_endpoint.call_with_http_info(**kwargs)
def delete_invitation(
self,
invitation_id,
**kwargs
):
"""delete_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.delete_invitation_endpoint.call_with_http_info(**kwargs)
def delete_member(
self,
member_id,
**kwargs
):
"""delete_member # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_member(member_id, async_req=True)
>>> result = thread.get()
Args:
member_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['member_id'] = \
member_id
return self.delete_member_endpoint.call_with_http_info(**kwargs)
def get(
self,
**kwargs
):
"""get # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_endpoint.call_with_http_info(**kwargs)
def get_invitations(
self,
**kwargs
):
"""get_invitations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invitations(async_req=True)
>>> result = thread.get()
Keyword Args:
status (TenantInvitationStatus): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_invitations_endpoint.call_with_http_info(**kwargs)
def get_members(
self,
**kwargs
):
"""get_members # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_members(async_req=True)
>>> result = thread.get()
Keyword Args:
user_id ([str]): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantMemberResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_members_endpoint.call_with_http_info(**kwargs)
def get_tenant_operation_report(
self,
**kwargs
):
"""get_tenant_operation_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_operation_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_operation_report_endpoint.call_with_http_info(**kwargs)
def get_tenant_usage_report(
self,
**kwargs
):
"""get_tenant_usage_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_usage_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_usage_report_endpoint.call_with_http_info(**kwargs)
def resend_invitation(
self,
invitation_id,
**kwargs
):
"""resend_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resend_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.resend_invitation_endpoint.call_with_http_info(**kwargs)
def update(
self,
**kwargs
):
"""update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(async_req=True)
>>> result = thread.get()
Keyword Args:
update_tenant_request (UpdateTenantRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.update_endpoint.call_with_http_info(**kwargs)
|
py | 1a3578cb16cf233e652177dc8e57e483f31e2d5a | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "edalize",
version = "0.2.3",
packages=['edalize'],
package_data = {'edalize' : [
'templates/yosys/yosys-makefile.j2',
'templates/yosys/yosys-script-tcl.j2',
'templates/icestorm/icestorm-makefile.j2',
'templates/spyglass/Makefile.j2',
'templates/spyglass/spyglass-project.prj.j2',
'templates/spyglass/spyglass-run-goal.tcl.j2',
'templates/vcs/Makefile.j2',
'templates/vivado/vivado-makefile.j2',
'templates/vivado/vivado-program.tcl.j2',
'templates/vivado/vivado-project.tcl.j2',
'templates/vivado/vivado-project-yosys.tcl.j2',
'templates/vivado/vivado-run.tcl.j2',
'templates/vivado/vivado-run-yosys.tcl.j2',
'templates/vivado/vivado-synth.tcl.j2',
'templates/vunit/run.py.j2',
'templates/quartus/quartus-project.tcl.j2',
'templates/quartus/quartus-std-makefile.j2',
'templates/quartus/quartus-pro-makefile.j2',
'templates/trellis/trellis-makefile.j2',
'templates/ascentlint/Makefile.j2',
'templates/ascentlint/run-ascentlint.tcl.j2',
'templates/symbiflow/symbiflow-vpr-makefile.j2',
'templates/libero/libero-project.tcl.j2',
'templates/libero/libero-run.tcl.j2',
'templates/libero/libero-syn-user.tcl.j2',
]},
author = "Olof Kindgren",
author_email = "[email protected]",
description = ("Edalize is a library for interfacing EDA tools, primarily for FPGA development"),
license = "BSD-2-Clause",
keywords = ["VHDL", "verilog", "EDA", "hdl", "rtl", "synthesis", "FPGA", "simulation", "Xilinx", "Altera"],
url = "https://github.com/olofk/edalize",
long_description=read('README.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
"Topic :: Utilities",
],
install_requires=[
# 2.11.0 and .1 introduced an incompatible change in template output,
# which was fixed in 2.11.2 and later.
# https://github.com/pallets/jinja/issues/1138
'Jinja2 >=2.8, !=2.11.0, !=2.11.1',
],
tests_require=[
'pytest>=3.3.0',
'vunit_hdl>=4.0.8'
],
# The reporting modules have dependencies that shouldn't be required for
# all Edalize users.
extras_require={
"reporting": ["pyparsing", "numpy", "pandas"],
},
# Supported Python versions: 3.5+
python_requires=">=3.5, <4",
)
|
py | 1a3578ebd10324296ca038215244731fc1ecb6ce | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SqueezeNet implementation with TPU support.
Training loop and input pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
from hyperparameters import common_hparams_flags
from hyperparameters import common_tpu_flags
from hyperparameters import flags_to_params
from hyperparameters import params_dict
import data_pipeline
import squeezenet_model
from configs import squeezenet_config
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
common_tpu_flags.define_common_tpu_flags()
common_hparams_flags.define_common_hparams_flags()
flags.DEFINE_integer("num_examples_per_epoch", None,
"Number of examples to train per epoch.")
flags.DEFINE_integer("num_eval_examples", None,
"Number of examples to evaluate per run.")
flags.DEFINE_float("init_learning_rate", None, "Learning rate.")
flags.DEFINE_float("end_learning_rate", None, "The minimal end learning rate.")
flags.DEFINE_integer("num_epochs", None,
"Number of epochs of the training set to process.")
flags.DEFINE_integer("num_evals", None,
"How many times to run an evaluation during training.")
flags.DEFINE_integer(
"num_cores_per_replica", default=None,
help=("Number of TPU cores in total. For a single TPU device, this is 8"
" because each TPU has 4 chips each with 2 cores."))
flags.DEFINE_bool(
"use_async_checkpointing", default=None, help=("Enable async checkpoint"))
flags.DEFINE_integer(
"num_classes", default=None, help="Number of classes, at least 2")
FLAGS = flags.FLAGS
def main(unused_argv):
params = params_dict.ParamsDict(
squeezenet_config.SQUEEZENET_CFG,
squeezenet_config.SQUEEZENET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params = flags_to_params.override_params_from_input_flags(params, FLAGS)
total_steps = ((params.train.num_epochs * params.train.num_examples_per_epoch)
// params.train.train_batch_size)
params.override({
"train": {
"total_steps": total_steps
},
"eval": {
"num_steps_per_eval": (total_steps // params.eval.num_evals)
},
}, is_strict=False)
params.validate()
params.lock()
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
if not params.use_async_checkpointing:
save_checkpoints_steps = max(5000, params.train.iterations_per_loop)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=params.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=params.train.iterations_per_loop,
num_shards=params.train.num_cores_per_replica,
),
)
estimator = contrib_tpu.TPUEstimator(
model_fn=squeezenet_model.model_fn,
use_tpu=params.use_tpu,
config=run_config,
train_batch_size=params.train.train_batch_size,
eval_batch_size=params.eval.eval_batch_size,
params=params.as_dict(),
)
for eval_cycle in range(params.eval.num_evals):
current_cycle_last_train_step = ((eval_cycle + 1) *
params.eval.num_steps_per_eval)
estimator.train(
input_fn=data_pipeline.InputReader(FLAGS.data_dir, is_training=True),
steps=current_cycle_last_train_step)
tf.logging.info("Running evaluation")
tf.logging.info("%s",
estimator.evaluate(
input_fn=data_pipeline.InputReader(
FLAGS.data_dir, is_training=False),
steps=(params.eval.num_eval_examples //
params.eval.eval_batch_size)
))
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
py | 1a3579089cc99e91421195a0f9a892a223acafd6 | #!/usr/bin/env python
'''
Color handling
Uses python colorsys module and code from:
http://code.activestate.com/recipes/266466/
'''
import re
import numpy as np
import colorsys
reg_html = re.compile(r"""^#?([0-9a-fA-F]|[0-9a-fA-F]{2})([0-9a-fA-F]|[0-9a-fA-F]{2})([0-9a-fA-F]|[0-9a-fA-F]{2})?$""")
def color_rgb_html(r, g, b):
"""
R, G, B values to #RRGGBB hex string.
Returns None on invalid input
"""
rgb = (r, g, b)
if max(rgb) <= 255 and min(rgb) >= 0:
htmlcolor = '#%02x%02x%02x' % rgb
return htmlcolor
return None
def color_html_rgb(colorstring):
"""#RRGGBB or #RGB hex string to (R,G,B)"""
colorstring = colorstring.strip()
if (len(colorstring) == 6 or len(colorstring) == 3):
reg_m = reg_html.match(colorstring)
if reg_m:
return tuple([int(n, 16) for n in reg_m.groups()])
return None
def color_rgb_hsv(r, g, b):
'''R, G, B [0-255] to HSV [angle, %, %]'''
rgb = (r, g, b)
if max(rgb) <= 255 and min(rgb) >= 0:
r, g, b = [float(x) / 255. for x in rgb]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return (h * 360., s, v)
return None
def color_hsv_rgb(h, s, v):
'''HSV [angle, %, %] to r, g, b [0-255]'''
if ((h >= 0. and h <= 360.) and
(min((s, v)) >= 0. and max((s, v)) <= 1.0)):
rgb = colorsys.hsv_to_rgb(h / 360., s, v)
return tuple([int(x * 255.) for x in rgb])
return None
if __name__ == "__main__":
tests_good = ["#FFF", "#FFFFFF", "FFF", "FFFFFF", "#003333"]
tests_bad = ["#GGG", "00H", "A", "FFF0", "GFGFGF", "FFFFFF00"]
print("test valid codes:")
for tst in tests_good:
print tst, color_html_rgb(tst)
print("\ntest invalid codes:")
for tst in tests_bad:
print tst, color_html_rgb(tst)
|
py | 1a357a3ced93edf8f65b21dde8f04b08849a4669 | import os
from .base import NullBrowser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor # noqa: F401
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {
"product": "servo",
"check_args": "check_args",
"browser": "ServoBrowser",
"executor": {
"testharness": "ServoTestharnessExecutor",
"reftest": "ServoRefTestExecutor",
"wdspec": "ServoWdspecExecutor",
},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"update_properties": "update_properties",
}
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {
"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"],
"binary_args": kwargs["binary_args"],
"user_stylesheets": kwargs.get("user_stylesheets"),
"ca_certificate_path": config.ssl_config["ca_cert_path"],
}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
rv["pause_after_test"] = kwargs["pause_after_test"]
if test_type == "wdspec":
rv["capabilities"] = {}
return rv
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1",
"bind_address": False,
"testharnessreport": "testharnessreport-servo.js",
"supports_debugger": True}
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
class ServoBrowser(NullBrowser):
def __init__(self, logger, binary, debug_info=None, binary_args=None,
user_stylesheets=None, ca_certificate_path=None):
NullBrowser.__init__(self, logger)
self.binary = binary
self.debug_info = debug_info
self.binary_args = binary_args or []
self.user_stylesheets = user_stylesheets or []
self.ca_certificate_path = ca_certificate_path
def executor_browser(self):
return ExecutorBrowser, {
"binary": self.binary,
"debug_info": self.debug_info,
"binary_args": self.binary_args,
"user_stylesheets": self.user_stylesheets,
"ca_certificate_path": self.ca_certificate_path,
}
|
py | 1a357aab456b81950da64e63a2e846655efc5fe6 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
from optparse import make_option
from sys import stdout
from csv import writer
FORMATS = [
'address',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(first_name, last_name, username, **extra):
name = u" ".join(n for n in [first_name, last_name] if n)
if not name: return username
return name
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name'),
make_option('--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of '" + "', '".join(FORMATS) + "'."),
)
help = ("Export user email address list in one of a number of formats.")
args = "[output file]"
label = 'filename to save to'
requires_model_validation = True
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count()==1:
names = u"', '".join(g['name'] for g in Group.objects.values('name')).encode('utf-8')
if names: names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
if len(args) and args[0] != '-':
outfile = file(args[0], 'w')
else:
outfile = stdout
qs = User.objects.all().order_by('last_name', 'first_name', 'username', 'email')
if group: qs = qs.filter(group__name=group).distinct()
qs = qs.values('last_name', 'first_name', 'username', 'email')
getattr(self, options['format'])(qs, outfile)
def address(self, qs, out):
"""simple single entry per line in the format of:
"full name" <[email protected]>;
"""
out.write(u"\n".join(u'"%s" <%s>;' % (full_name(**ent), ent['email'])
for ent in qs).encode(self.encoding))
out.write("\n")
def google(self, qs, out):
"""CSV format suitable for importing into google GMail
"""
csvf = writer(out)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)])
def outlook(self, qs, out):
"""CSV format suitable for importing into outlook
"""
csvf = writer(out)
columns = ['Name','E-mail Address','Notes','E-mail 2 Address','E-mail 3 Address',
'Mobile Phone','Pager','Company','Job Title','Home Phone','Home Phone 2',
'Home Fax','Home Address','Business Phone','Business Phone 2',
'Business Fax','Business Address','Other Phone','Other Fax','Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)] + empty)
def linkedin(self, qs, out):
"""CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = writer(out)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent['first_name'].encode(self.encoding),
ent['last_name'].encode(self.encoding),
ent['email'].encode(self.encoding)])
def vcard(self, qs, out):
try:
import vobject
except ImportError:
print self.style.ERROR_OUTPUT("Please install python-vobjects to use the vcard export format.")
import sys
sys.exit(1)
for ent in qs:
card = vobject.vCard()
card.add('fn').value = full_name(**ent)
if not ent['last_name'] and not ent['first_name']:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(full_name(**ent))
else:
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize().encode(self.encoding))
|
py | 1a357af87975544b92d50e26b4a15ad43338f4c6 | import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="yanchor", parent_name="layout.legend", **kwargs):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "top", "middle", "bottom"]),
**kwargs
)
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="y", parent_name="layout.legend", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xanchor", parent_name="layout.legend", **kwargs):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "left", "center", "right"]),
**kwargs
)
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="layout.legend", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ValignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="valign", parent_name="layout.legend", **kwargs):
super(ValignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["top", "middle", "bottom"]),
**kwargs
)
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="layout.legend", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TraceorderValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="traceorder", parent_name="layout.legend", **kwargs):
super(TraceorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
extras=kwargs.pop("extras", ["normal"]),
flags=kwargs.pop("flags", ["reversed", "grouped"]),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TracegroupgapValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tracegroupgap", parent_name="layout.legend", **kwargs
):
super(TracegroupgapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="layout.legend", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend's title font.
side
Determines the location of legend's title with
respect to the legend items. Defaulted to "top"
with `orientation` is "h". Defaulted to "left"
with `orientation` is "v". The *top left*
options could be used to expand legend area in
both x and y sides.
text
Sets the title of the legend.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="orientation", parent_name="layout.legend", **kwargs
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["v", "h"]),
**kwargs
)
import _plotly_utils.basevalidators
class ItemsizingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="itemsizing", parent_name="layout.legend", **kwargs):
super(ItemsizingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["trace", "constant"]),
**kwargs
)
import _plotly_utils.basevalidators
class ItemdoubleclickValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="itemdoubleclick", parent_name="layout.legend", **kwargs
):
super(ItemdoubleclickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["toggle", "toggleothers", False]),
**kwargs
)
import _plotly_utils.basevalidators
class ItemclickValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="itemclick", parent_name="layout.legend", **kwargs):
super(ItemclickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["toggle", "toggleothers", False]),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="layout.legend", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="layout.legend", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="layout.legend", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="layout.legend", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
py | 1a357b0fbe0665e9fcf0c229739eaccdd28cb15f | # -*- coding: utf-8 -*-
"""Implementation of a trie data structure.
`Trie data structure <http://en.wikipedia.org/wiki/Trie>`_, also known as radix
or prefix tree, is a tree associating keys to values where all the descendants
of a node have a common prefix (associated with that node).
The trie module contains :class:`pygtrie.Trie`, :class:`pygtrie.CharTrie` and
:class:`pygtrie.StringTrie` classes each implementing a mutable mapping
interface, i.e. :class:`dict` interface. As such, in most circumstances,
:class:`pygtrie.Trie` could be used as a drop-in replacement for
a :class:`dict`, but the prefix nature of the data structure is trie’s real
strength.
The module also contains :class:`pygtrie.PrefixSet` class which uses a trie to
store a set of prefixes such that a key is contained in the set if it or its
prefix is stored in the set.
Features
--------
- A full mutable mapping implementation.
- Supports iterating over as well as deleting a subtrie.
- Supports prefix checking as well as shortest and longest prefix
look-up.
- Extensible for any kind of user-defined keys.
- A PrefixSet supports “all keys starting with given prefix” logic.
- Can store any value including None.
For some simple examples see ``example.py`` file.
"""
__author__ = 'Michal Nazarewicz <[email protected]>'
__copyright__ = 'Copyright 2014 Google Inc.'
try:
import collections.abc as _collections
except ImportError:
import collections as _collections
# Python 2.x and 3.x compatibility stuff
if hasattr(dict, 'iteritems'):
# pylint: disable=invalid-name
_iteritems = lambda d: d.iteritems()
_iterkeys = lambda d: d.iterkeys()
def _sorted_iteritems(d):
"""Returns d's items in sorted order."""
items = d.items()
items.sort()
return iter(items)
else:
_sorted_iteritems = lambda d: sorted(d.items()) # pylint: disable=invalid-name
_iteritems = lambda d: iter(d.items()) # pylint: disable=invalid-name
_iterkeys = lambda d: iter(d.keys()) # pylint: disable=invalid-name
try:
_basestring = basestring
except NameError:
_basestring = str
class ShortKeyError(KeyError):
"""Raised when given key is a prefix of a longer key."""
pass
_SENTINEL = object()
class _Node(object):
"""A single node of a trie.
Stores value associated with the node and dictionary of children.
"""
__slots__ = ('children', 'value')
def __init__(self):
self.children = {}
self.value = _SENTINEL
def iterate(self, path, shallow, iteritems):
"""Yields all the nodes with values associated to them in the trie.
Args:
path: Path leading to this node. Used to construct the key when
returning value of this node and as a prefix for children.
shallow: Perform a shallow traversal, i.e. do not yield nodes if
their prefix has been yielded.
iteritems: A function taking dictionary as argument and returning
iterator over its items. Something other than dict.iteritems
may be given to enable sorting.
Yields:
``(path, value)`` tuples.
"""
# Use iterative function with stack on the heap so we don't hit Python's
# recursion depth limits.
node = self
stack = []
while True:
if node.value is not _SENTINEL:
yield path, node.value
if (not shallow or node.value is _SENTINEL) and node.children:
stack.append(iter(iteritems(node.children)))
path.append(None)
while True:
try:
step, node = next(stack[-1])
path[-1] = step
break
except StopIteration:
stack.pop()
path.pop()
except IndexError:
return
def traverse(self, node_factory, path_conv, path, iteritems):
"""Traverses the node and returns another type of node from factory.
Args:
node_factory: Callable function to construct new nodes.
path_conv: Callable function to convert node path to a key.
path: Current path for this node.
iteritems: A function taking dictionary as argument and returning
iterator over its items. Something other than dict.iteritems
may be given to enable sorting.
Returns:
An object constructed by calling node_factory(path_conv, path,
children, value=...), where children are constructed by node_factory
from the children of this node. There doesn't need to be 1:1
correspondence between original nodes in the trie and constructed
nodes (see make_test_node_and_compress in test.py).
"""
def children():
"""Recursively traverses all of node's children."""
for step, node in iteritems(self.children):
yield node.traverse(node_factory, path_conv, path + [step],
iteritems)
args = [path_conv, tuple(path), children()]
if self.value is not _SENTINEL:
args.append(self.value)
return node_factory(*args)
def __eq__(self, other):
# Like iterate, we don't recurse so this works on deep tries.
a, b = self, other
stack = []
while True:
if a.value != b.value or len(a.children) != len(b.children):
return False
if a.children:
stack.append((_iteritems(a.children), b.children))
while True:
try:
key, a = next(stack[-1][0])
b = stack[-1][1].get(key)
if b is None:
return False
break
except StopIteration:
stack.pop()
except IndexError:
return True
return self.value == other.value and self.children == other.children
def __ne__(self, other):
return not self.__eq__(other)
def __bool__(self):
return bool(self.value is not _SENTINEL or self.children)
__nonzero__ = __bool__
__hash__ = None
def __getstate__(self):
"""Get state used for pickling.
The state is encoded as a list of simple commands which consist of an
integer and some command-dependent number of arguments. The commands
modify what the current node is by navigating the trie up and down and
setting node values. Possible commands are:
* [n, step0, step1, ..., stepn-1, value], for n >= 0, specifies step
needed to reach the next current node as well as its new value. There
is no way to create a child node without setting its (or its
descendant's) value.
* [-n], for -n < 0, specifies to go up n steps in the trie.
When encoded as a state, the commands are flattened into a single list.
For example::
[ 0, 'Root',
2, 'Foo', 'Bar', 'Root/Foo/Bar Node',
-1,
1, 'Baz', 'Root/Foo/Baz Node',
-2,
1, 'Qux', 'Root/Qux Node' ]
Creates the following hierarchy::
-* value: Root
+-- Foo --* no value
| +-- Bar -- * value: Root/Foo/Bar Node
| +-- Baz -- * value: Root/Foo/Baz Node
+-- Qux -- * value: Root/Qux Node
Returns:
A pickable state which can be passed to :func:`_Node.__setstate__`
to reconstruct the node and its full hierarchy.
"""
# Like iterate, we don't recurse so pickling works on deep tries.
state = [] if self.value is _SENTINEL else [0]
last_cmd = 0
node = self
stack = []
while True:
if node.value is not _SENTINEL:
last_cmd = 0
state.append(node.value)
stack.append(_iteritems(node.children))
while True:
try:
step, node = next(stack[-1])
except StopIteration:
if last_cmd < 0:
state[-1] -= 1
else:
last_cmd = -1
state.append(-1)
stack.pop()
continue
except IndexError:
if last_cmd < 0:
state.pop()
return state
if last_cmd > 0:
last_cmd += 1
state[-last_cmd] += 1
else:
last_cmd = 1
state.append(1)
state.append(step)
break
def __setstate__(self, state):
"""Unpickles node. See :func:`_Node.__getstate__`."""
self.__init__()
state = iter(state)
stack = [self]
for cmd in state:
if cmd < 0:
del stack[cmd:]
else:
while cmd > 0:
stack.append(type(self)())
stack[-2].children[next(state)] = stack[-1]
cmd -= 1
stack[-1].value = next(state)
_NONE_PAIR = type('NonePair', (tuple,), {
'__nonzero__': lambda _: False,
'__bool__': lambda _: False,
'__slots__': (),
})((None, None))
class Trie(_collections.MutableMapping):
"""A trie implementation with dict interface plus some extensions.
Keys used with the :class:`pygtrie.Trie` must be iterable, yielding hashable
objects. In other words, for a given key, ``dict.fromkeys(key)`` must be
valid.
In particular, strings work fine as trie keys, however when getting keys
back from iterkeys() method for example, instead of strings, tuples of
characters are produced. For that reason, :class:`pygtrie.CharTrie` or
:class:`pygtrie.StringTrie` may be preferred when using
:class:`pygtrie.Trie` with string keys.
"""
def __init__(self, *args, **kwargs):
"""Initialises the trie.
Arguments are interpreted the same way :func:`Trie.update` interprets
them.
"""
self._root = _Node()
self._sorted = False
self.update(*args, **kwargs)
@property
def _iteritems(self):
"""Returns function yielding over dict's items possibly in sorted order.
Returns:
A function iterating over items of a dictionary given as an
argument. If child nodes sorting has been enabled (via
:func:`Trie.enable_sorting` method), returned function will go
through the items in sorted order..
"""
return _sorted_iteritems if self._sorted else _iteritems
def enable_sorting(self, enable=True):
"""Enables sorting of child nodes when iterating and traversing.
Normally, child nodes are not sorted when iterating or traversing over
the trie (just like dict elements are not sorted). This method allows
sorting to be enabled (which was the behaviour prior to pygtrie 2.0
release).
For Trie class, enabling sorting of children is identical to simply
sorting the list of items since Trie returns keys as tuples. However,
for other implementations such as StringTrie the two may behove subtly
different. For example, sorting items might produce::
root/foo-bar
root/foo/baz
even though foo comes before foo-bar.
Args:
enable: Whether to enable sorting of child nodes.
"""
self._sorted = enable
def clear(self):
"""Removes all the values from the trie."""
self._root = _Node()
def update(self, *args, **kwargs):
"""Updates stored values. Works like :func:`dict.update`."""
if len(args) > 1:
raise ValueError('update() takes at most one positional argument, '
'%d given.' % len(args))
# We have this here instead of just letting MutableMapping.update()
# handle things because it will iterate over keys and for each key
# retrieve the value. With Trie, this may be expensive since the path
# to the node would have to be walked twice. Instead, we have our own
# implementation where iteritems() is used avoiding the unnecessary
# value look-up.
if args and isinstance(args[0], Trie):
for key, value in _iteritems(args[0]):
self[key] = value
args = ()
super(Trie, self).update(*args, **kwargs)
def copy(self):
"""Returns a shallow copy of the trie."""
return self.__class__(self)
@classmethod
def fromkeys(cls, keys, value=None):
"""Creates a new trie with given keys set.
This is roughly equivalent to calling the constructor with a ``(key,
value) for key in keys`` generator.
Args:
keys: An iterable of keys that should be set in the new trie.
value: Value to associate with given keys.
Returns:
A new trie where each key from ``keys`` has been set to the given
value.
"""
trie = cls()
for key in keys:
trie[key] = value
return trie
def _get_node(self, key, create=False):
"""Returns node for given key. Creates it if requested.
Args:
key: A key to look for.
create: Whether to create the node if it does not exist.
Returns:
``(node, trace)`` tuple where ``node`` is the node for given key and
``trace`` is a list specifying path to reach the node including all
the encountered nodes. Each element of trace is a ``(step, node)``
tuple where ``step`` is a step from parent node to given node and
``node`` is node on the path. The first element of the path is
always ``(None, self._root)``.
Raises:
KeyError: If there is no node for the key and ``create`` is
``False``.
"""
node = self._root
trace = [(None, node)]
for step in self.__path_from_key(key):
if create:
node = node.children.setdefault(step, _Node())
else:
node = node.children.get(step)
if not node:
raise KeyError(key)
trace.append((step, node))
return node, trace
def __iter__(self):
return self.iterkeys()
# pylint: disable=arguments-differ
def iteritems(self, prefix=_SENTINEL, shallow=False):
"""Yields all nodes with associated values with given prefix.
Only nodes with values are output. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t['qux'] = 'Qux'
>>> t.items()
[('qux', 'Qux'), ('foo', 'Foo'), ('foo/bar/baz', 'Baz')]
Items are generated in topological order but the order of siblings is
unspecified by default. In other words, in the above example, the
``('qux', 'Qux')`` pair might have been at the end of the list. At an
expense of efficiency, this can be changed via
:func:`Trie.enable_sorting`.
With ``prefix`` argument, only items with specified prefix are generated
(i.e. only given subtrie is traversed) as demonstrated by::
>>> t.items(prefix='foo/bar')
[('foo/bar/baz', 'Baz')]
With ``shallow`` argument, if a node has value associated with it, it's
children are not traversed even if they exist which can be seen in::
>>> t.items(shallow=True)
[('qux', 'Qux'), ('foo', 'Foo')]
Args:
prefix: Prefix to limit iteration to.
shallow: Perform a shallow traversal, i.e. do not yield items if
their prefix has been yielded.
Yields:
``(key, value)`` tuples.
Raises:
KeyError: If ``prefix`` does not match any node.
"""
node, _ = self._get_node(prefix)
for path, value in node.iterate(list(self.__path_from_key(prefix)),
shallow, self._iteritems):
yield (self._key_from_path(path), value)
def iterkeys(self, prefix=_SENTINEL, shallow=False):
"""Yields all keys having associated values with given prefix.
This is equivalent to taking first element of tuples generated by
:func:`Trie.iteritems` which see for more detailed documentation.
Args:
prefix: Prefix to limit iteration to.
shallow: Perform a shallow traversal, i.e. do not yield keys if
their prefix has been yielded.
Yields:
All the keys (with given prefix) with associated values in the trie.
Raises:
KeyError: If ``prefix`` does not match any node.
"""
for key, _ in self.iteritems(prefix=prefix, shallow=shallow):
yield key
def itervalues(self, prefix=_SENTINEL, shallow=False):
"""Yields all values associated with keys with given prefix.
This is equivalent to taking second element of tuples generated by
:func:`Trie.iteritems` which see for more detailed documentation.
Args:
prefix: Prefix to limit iteration to.
shallow: Perform a shallow traversal, i.e. do not yield values if
their prefix has been yielded.
Yields:
All the values associated with keys (with given prefix) in the trie.
Raises:
KeyError: If ``prefix`` does not match any node.
"""
node, _ = self._get_node(prefix)
for _, value in node.iterate(list(self.__path_from_key(prefix)),
shallow, self._iteritems):
yield value
def items(self, prefix=_SENTINEL, shallow=False):
"""Returns a list of ``(key, value)`` pairs in given subtrie.
This is equivalent to constructing a list from generator returned by
:func:`Trie.iteritems` which see for more detailed documentation.
"""
return list(self.iteritems(prefix=prefix, shallow=shallow))
def keys(self, prefix=_SENTINEL, shallow=False):
"""Returns a list of all the keys, with given prefix, in the trie.
This is equivalent to constructing a list from generator returned by
:func:`Trie.iterkeys` which see for more detailed documentation.
"""
return list(self.iterkeys(prefix=prefix, shallow=shallow))
def values(self, prefix=_SENTINEL, shallow=False):
"""Returns a list of values in given subtrie.
This is equivalent to constructing a list from generator returned by
:func:`Trie.iterivalues` which see for more detailed documentation.
"""
return list(self.itervalues(prefix=prefix, shallow=shallow))
# pylint: enable=arguments-differ
def __len__(self):
"""Returns number of values in a trie.
Note that this method is expensive as it iterates over the whole trie.
"""
return sum(1 for _ in self.itervalues())
def __nonzero__(self):
return bool(self._root)
HAS_VALUE = 1
HAS_SUBTRIE = 2
def has_node(self, key):
"""Returns whether given node is in the trie.
Return value is a bitwise or of ``HAS_VALUE`` and ``HAS_SUBTRIE``
constants indicating node has a value associated with it and that it is
a prefix of another existing key respectively. Both of those are
independent of each other and all of the four combinations are possible.
For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.has_node('qux') == 0
True
>>> t.has_node('foo/bar/baz') == pygtrie.Trie.HAS_VALUE
True
>>> t.has_node('foo') == pygtrie.Trie.HAS_SUBTRIE
True
>>> t.has_node('foo/bar') == (pygtrie.Trie.HAS_VALUE |
... pygtrie.Trie.HAS_SUBTRIE)
True
There are two higher level methods built on top of this one which give
easier interface for the information. :func:`Trie.has_key` and returns
whether node has a value associated with it and :func:`Trie.has_subtrie`
checks whether node is a prefix. Continuing previous example::
>>> t.has_key('qux'), t.has_subtrie('qux')
False, False
>>> t.has_key('foo/bar/baz'), t.has_subtrie('foo/bar/baz')
True, False
>>> t.has_key('foo'), t.has_subtrie('foo')
False, True
>>> t.has_key('foo/bar'), t.has_subtrie('foo/bar')
True, True
Args:
key: A key to look for.
Returns:
Non-zero if node exists and if it does a bit-field denoting whether
it has a value associated with it and whether it has a subtrie.
"""
try:
node, _ = self._get_node(key)
except KeyError:
return 0
return ((self.HAS_VALUE * int(node.value is not _SENTINEL)) |
(self.HAS_SUBTRIE * int(bool(node.children))))
def has_key(self, key):
"""Indicates whether given key has value associated with it.
See :func:`Trie.has_node` for more detailed documentation.
"""
return bool(self.has_node(key) & self.HAS_VALUE)
def has_subtrie(self, key):
"""Returns whether given key is a prefix of another key in the trie.
See :func:`Trie.has_node` for more detailed documentation.
"""
return bool(self.has_node(key) & self.HAS_SUBTRIE)
@staticmethod
def _slice_maybe(key_or_slice):
"""Checks whether argument is a slice or a plain key.
Args:
key_or_slice: A key or a slice to test.
Returns:
``(key, is_slice)`` tuple. ``is_slice`` indicates whether
``key_or_slice`` is a slice and ``key`` is either ``key_or_slice``
itself (if it's not a slice) or slice's start position.
Raises:
TypeError: If ``key_or_slice`` is a slice whose stop or step are not
``None`` In other words, only ``[key:]`` slices are valid.
"""
if isinstance(key_or_slice, slice):
if key_or_slice.stop is not None or key_or_slice.step is not None:
raise TypeError(key_or_slice)
return key_or_slice.start, True
return key_or_slice, False
def __getitem__(self, key_or_slice):
"""Returns value associated with given key or raises KeyError.
When argument is a single key, value for that key is returned (or
:class:`KeyError` exception is thrown if the node does not exist or has
no value associated with it).
When argument is a slice, it must be one with only `start` set in which
case the access is identical to :func:`Trie.itervalues` invocation with
prefix argument.
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/baz'] = 'Baz'
>>> t['qux'] = 'Qux'
>>> t['foo/bar']
'Bar'
>>> list(t['foo':])
['Baz', 'Bar']
>>> t['foo']
Traceback (most recent call last):
...
pygtrie.ShortKeyError: 'foo'
Args:
key_or_slice: A key or a slice to look for.
Returns:
If a single key is passed, a value associated with given key. If
a slice is passed, a generator of values in specified subtrie.
Raises:
ShortKeyError: If the key has no value associated with it but is
a prefix of some key with a value. Note that
:class:`ShortKeyError` is subclass of :class:`KeyError`.
KeyError: If key has no value associated with it nor is a prefix of
an existing key.
TypeError: If ``key_or_slice`` is a slice but it's stop or step are
not ``None``.
"""
if self._slice_maybe(key_or_slice)[1]:
return self.itervalues(key_or_slice.start)
node, _ = self._get_node(key_or_slice)
if node.value is _SENTINEL:
raise ShortKeyError(key_or_slice)
return node.value
def _set(self, key, value, only_if_missing=False, clear_children=False):
"""Sets value for a given key.
Args:
key: Key to set value of.
value: Value to set to.
only_if_missing: If ``True``, value won't be changed if the key is
already associated with a value.
clear_children: If ``True``, all children of the node, if any, will
be removed.
Returns:
Value of the node.
"""
node, _ = self._get_node(key, create=True)
if not only_if_missing or node.value is _SENTINEL:
node.value = value
if clear_children:
node.children.clear()
return node.value
def __setitem__(self, key_or_slice, value):
"""Sets value associated with given key.
If `key_or_slice` is a key, simply associate it with given value. If it
is a slice (which must have `start` set only), it in addition clears any
subtrie that might have been attached to particular key. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/baz'] = 'Baz'
>>> t.keys()
['foo/baz', 'foo/bar']
>>> t['foo':] = 'Foo'
>>> t.keys()
['foo']
Args:
key_or_slice: A key to look for or a slice. If it is a slice, the
whole subtrie (if present) will be replaced by a single node
with given value set.
value: Value to set.
Raises:
TypeError: If key is a slice whose stop or step are not None.
"""
key, is_slice = self._slice_maybe(key_or_slice)
self._set(key, value, clear_children=is_slice)
def setdefault(self, key, value=None):
"""Sets value of a given node if not set already. Also returns it.
In contrast to :func:`Trie.__setitem__`, this method does not accept
slice as a key.
"""
return self._set(key, value, only_if_missing=True)
@staticmethod
def _cleanup_trace(trace):
"""Removes empty nodes present on specified trace.
Args:
trace: Trace to the node to cleanup as returned by
:func:`Trie._get_node`.
"""
i = len(trace) - 1 # len(path) >= 1 since root is always there
step, node = trace[i]
while i and not node:
i -= 1
parent_step, parent = trace[i]
del parent.children[step]
step, node = parent_step, parent
def _pop_from_node(self, node, trace, default=_SENTINEL):
"""Removes a value from given node.
Args:
node: Node to get value of.
trace: Trace to that node as returned by :func:`Trie._get_node`.
default: A default value to return if node has no value set.
Returns:
Value of the node or ``default``.
Raises:
ShortKeyError: If the node has no value associated with it and
``default`` has not been given.
"""
if node.value is not _SENTINEL:
value = node.value
node.value = _SENTINEL
self._cleanup_trace(trace)
return value
elif default is _SENTINEL:
raise ShortKeyError()
else:
return default
def pop(self, key, default=_SENTINEL):
"""Deletes value associated with given key and returns it.
Args:
key: A key to look for.
default: If specified, value that will be returned if given key has
no value associated with it. If not specified, method will
throw KeyError in such cases.
Returns:
Removed value, if key had value associated with it, or ``default``
(if given).
Raises:
ShortKeyError: If ``default`` has not been specified and the key has
no value associated with it but is a prefix of some key with
a value. Note that :class:`ShortKeyError` is subclass of
:class:`KeyError`.
KeyError: If default has not been specified and key has no value
associated with it nor is a prefix of an existing key.
"""
try:
return self._pop_from_node(*self._get_node(key))
except KeyError:
if default is not _SENTINEL:
return default
raise
def popitem(self):
"""Deletes an arbitrary value from the trie and returns it.
There is no guarantee as to which item is deleted and returned. Neither
in respect to its lexicographical nor topological order.
Returns:
``(key, value)`` tuple indicating deleted key.
Raises:
KeyError: If the trie is empty.
"""
if not self:
raise KeyError()
node = self._root
trace = [(None, node)]
while node.value is _SENTINEL:
step = next(_iterkeys(node.children))
node = node.children[step]
trace.append((step, node))
return (self._key_from_path((step for step, _ in trace[1:])),
self._pop_from_node(node, trace))
def __delitem__(self, key_or_slice):
"""Deletes value associated with given key or raises KeyError.
If argument is a key, value associated with it is deleted. If the key
is also a prefix, its descendents are not affected. On the other hand,
if the argument is a slice (in which case it must have only start set),
the whole subtrie is removed. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar'] = 'Bar'
>>> t['foo/bar/baz'] = 'Baz'
>>> del t['foo/bar']
>>> t.keys()
['foo', 'foo/bar/baz']
>>> del t['foo':]
>>> t.keys()
[]
Args:
key_or_slice: A key to look for or a slice. If key is a slice, the
whole subtrie will be removed.
Raises:
ShortKeyError: If the key has no value associated with it but is
a prefix of some key with a value. This is not thrown is
key_or_slice is a slice -- in such cases, the whole subtrie is
removed. Note that :class:`ShortKeyError` is subclass of
:class:`KeyError`.
KeyError: If key has no value associated with it nor is a prefix of
an existing key.
TypeError: If key is a slice whose stop or step are not ``None``.
"""
key, is_slice = self._slice_maybe(key_or_slice)
node, trace = self._get_node(key)
if is_slice:
node.children.clear()
elif node.value is _SENTINEL:
raise ShortKeyError(key)
node.value = _SENTINEL
self._cleanup_trace(trace)
def prefixes(self, key):
"""Walks towards the node specified by key and yields all found items.
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> list(t.prefixes('foo/bar/baz/qux'))
[('foo', 'Foo'), ('foo/bar/baz', 'Baz')]
>>> list(t.prefixes('does/not/exist'))
[]
Args:
key: Key to look for.
Yields:
``(k, value)`` pairs denoting keys with associated values
encountered on the way towards the specified key.
"""
node = self._root
path = self.__path_from_key(key)
pos = 0
while True:
if node.value is not _SENTINEL:
yield self._key_from_path(path[:pos]), node.value
if pos == len(path):
break
node = node.children.get(path[pos])
if not node:
break
pos += 1
def shortest_prefix(self, key):
"""Finds the shortest prefix of a key with a value.
This is equivalent to taking the first object yielded by
:func:`Trie.prefixes` with a default of `(None, None)` if said method
yields no items. As an added bonus, the pair in that case will be
a falsy value (as opposed to regular two-element tuple of ``None``
values).
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.shortest_prefix('foo/bar/baz/qux')
('foo', 'Foo')
>>> t.shortest_prefix('does/not/exist')
(None, None)
>>> bool(t.shortest_prefix('does/not/exist'))
False
Args:
key: Key to look for.
Returns:
``(k, value)`` where ``k`` is the shortest prefix of ``key`` (it may
equal ``key``) and ``value`` is a value associated with that key.
If no node is found, ``(None, None)`` is returned.
"""
return next(self.prefixes(key), _NONE_PAIR)
def longest_prefix(self, key):
"""Finds the longest prefix of a key with a value.
This is equivalent to taking the last object yielded by
:func:`Trie.prefixes` with a default of `(None, None)` if said method
yields no items. As an added bonus, the pair in that case will be
a falsy value (as opposed to regular two-element tuple of ``None``
values).
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.longest_prefix('foo/bar/baz/qux')
('foo/bar/baz', 'Baz')
>>> t.longest_prefix('does/not/exist')
(None, None)
>>> bool(t.longest_prefix('does/not/exist'))
False
Args:
key: Key to look for.
Returns:
``(k, value)`` where ``k`` is the longest prefix of ``key`` (it may
equal ``key``) and ``value`` is a value associated with that key.
If no node is found, ``(None, None)`` is returned.
"""
ret = _NONE_PAIR
for ret in self.prefixes(key):
pass
return ret
def __eq__(self, other):
return self._root == other._root # pylint: disable=protected-access
def __ne__(self, other):
return self._root != other._root # pylint: disable=protected-access
def __str__(self):
return 'Trie(%s)' % (
', '.join('%s: %s' % item for item in self.iteritems()))
def __repr__(self):
if self:
return 'Trie((%s,))' % (
', '.join('(%r, %r)' % item for item in self.iteritems()))
else:
return 'Trie()'
def __path_from_key(self, key):
"""Converts a user visible key object to internal path representation.
Args:
key: User supplied key or ``_SENTINEL``.
Returns:
An empty tuple if ``key`` was ``_SENTINEL``, otherwise whatever
:func:`Trie._path_from_key` returns.
Raises:
TypeError: If ``key`` is of invalid type.
"""
return () if key is _SENTINEL else self._path_from_key(key)
def _path_from_key(self, key): # pylint: disable=no-self-use
"""Converts a user visible key object to internal path representation.
The default implementation simply returns key.
Args:
key: User supplied key.
Returns:
A path, which is an iterable of steps. Each step must be hashable.
Raises:
TypeError: If key is of invalid type.
"""
return key
def _key_from_path(self, path): # pylint: disable=no-self-use
"""Converts an internal path into a user visible key object.
The default implementation creates a tuple from the path.
Args:
path: Internal path representation.
Returns:
A user visible key object.
"""
return tuple(path)
def traverse(self, node_factory, prefix=_SENTINEL):
"""Traverses the tree using node_factory object.
node_factory is a callable function which accepts (path_conv, path,
children, value=...) arguments, where path_conv is a lambda converting
path representation to key, path is the path to this node, children is
an iterable of children nodes constructed by node_factory, optional
value is the value associated with the path.
node_factory's children argument is a generator which has a few
consequences:
* To traverse into node's children, the generator must be iterated over.
This can by accomplished by a simple "children = list(children)"
statement.
* Ignoring the argument allows node_factory to stop the traversal from
going into the children of the node. In other words, whole subtrie
can be removed from traversal if node_factory chooses so.
* If children is stored as is (i.e. as a generator) when it is iterated
over later on it will see state of the trie as it is during the
iteration and not when traverse method was called.
:func:`Trie.traverse` has two advantages over :func:`Trie.iteritems` and
similar methods:
1. it allows subtries to be skipped completely when going through the
list of nodes based on the property of the parent node; and
2. it represents structure of the trie directly making it easy to
convert structure into a different representation.
For example, the below snippet prints all files in current directory
counting how many HTML files were found but ignores hidden files and
directories (i.e. those whose names start with a dot)::
import os
import pygtrie
t = pygtrie.StringTrie(separator=os.sep)
# Construct a trie with all files in current directory and all
# of its sub-directories. Files get set a True value.
# Directories are represented implicitly by being prefixes of
# files.
for root, _, files in os.walk('.'):
for name in files: t[os.path.join(root, name)] = True
def traverse_callback(path_conv, path, children, is_file=False):
if path and path[-1] != '.' and path[-1][0] == '.':
# Ignore hidden directory (but accept root node and '.')
return 0
elif is_file:
print path_conv(path)
return int(path[-1].endswith('.html'))
else:
# Otherwise, it's a directory. Traverse into children.
return sum(int(is_html) for is_html in children)
print t.traverse(traverse_callback)
As documented, ignoring the children argument causes subtrie to be
omitted and not walked into.
In the next example, the trie is converted to a tree representation
where child nodes include a pointer to their parent. As before, hidden
files and directories are ignored::
import os
import pygtrie
t = pygtrie.StringTrie(separator=os.sep)
for root, _, files in os.walk('.'):
for name in files: t[os.path.join(root, name)] = True
class File(object):
def __init__(self, name):
self.name = name
self.parent = None
class Directory(File):
def __init__(self, name, children):
super(Directory, self).__init__(name)
self._children = children
for child in children:
child.parent = self
def traverse_callback(path_conv, path, children, is_file=False):
if not path or path[-1] == '.' or path[-1][0] != '.':
if is_file:
return File(path[-1])
children = filter(None, children)
return Directory(path[-1] if path else '', children)
root = t.traverse(traverse_callback)
Note: Unlike iterators, traverse method uses stack recursion which means
that using it on deep tries may lead to a RuntimeError exception thrown
once Python's maximum recursion depth is reached.
Args:
node_factory: Makes opaque objects from the keys and values of the
trie.
prefix: Prefix for node to start traversal, by default starts at
root.
Returns:
Node object constructed by node_factory corresponding to the root
node.
"""
node, _ = self._get_node(prefix)
return node.traverse(node_factory, self._key_from_path,
list(self.__path_from_key(prefix)),
self._iteritems)
class CharTrie(Trie):
"""A variant of a :class:`pygtrie.Trie` which accepts strings as keys.
The only difference between :class:`pygtrie.CharTrie` and
:class:`pygtrie.Trie` is that when :class:`pygtrie.CharTrie` returns keys
back to the client (for instance in keys() method is called), those keys are
returned as strings.
Canonical example where this class can be used is a dictionary of words in
a natural language. For example::
>>> import pygtrie
>>> t = pygtrie.CharTrie()
>>> t['wombat'] = True
>>> t['woman'] = True
>>> t['man'] = True
>>> t['manhole'] = True
>>> t.has_subtrie('wo')
True
>>> t.has_key('man')
True
>>> t.has_subtrie('man')
True
>>> t.has_subtrie('manhole')
False
"""
def _key_from_path(self, path):
return ''.join(path)
class StringTrie(Trie):
""":class:`pygtrie.Trie` variant accepting strings with a separator as keys.
The trie accepts strings as keys which are split into components using
a separator specified during initialisation ("/" by default).
Canonical example where this class can be used is when keys are paths. For
example, it could map from a path to a request handler::
import pygtrie
def handle_root(): pass
def handle_admin(): pass
def handle_admin_images(): pass
handlers = pygtrie.StringTrie()
handlers[''] = handle_root
handlers['/admin'] = handle_admin
handlers['/admin/images'] = handle_admin_images
request_path = '/admin/images/foo'
handler = handlers.longest_prefix(request_path)
"""
def __init__(self, *args, **kwargs):
"""Initialises the trie.
Except for a ``separator`` named argument, all other arguments are
interpreted the same way :func:`Trie.update` interprets them.
Args:
*args: Passed to super class initialiser.
**kwargs: Passed to super class initialiser.
separator: A separator to use when splitting keys into paths used by
the trie. "/" is used if this argument is not specified. This
named argument is not specified on the function's prototype
because of Python's limitations.
"""
separator = kwargs.pop('separator', '/')
if not isinstance(separator, _basestring):
raise TypeError('separator must be a string')
if not separator:
raise ValueError('separator can not be empty')
self._separator = separator
super(StringTrie, self).__init__(*args, **kwargs)
@classmethod
def fromkeys(cls, keys, value=None, separator='/'): # pylint: disable=arguments-differ
trie = cls(separator=separator)
for key in keys:
trie[key] = value
return trie
def _path_from_key(self, key):
return key.split(self._separator)
def _key_from_path(self, path):
return self._separator.join(path)
class PrefixSet(_collections.MutableSet): # pylint: disable=abstract-class-not-used
"""A set of prefixes.
:class:`pygtrie.PrefixSet` works similar to a normal set except it is said
to contain a key if the key or it's prefix is stored in the set. For
instance, if "foo" is added to the set, the set contains "foo" as well as
"foobar".
The set supports addition of elements but does *not* support removal of
elements. This is because there's no obvious consistent and intuitive
behaviour for element deletion.
"""
def __init__(self, iterable=None, factory=Trie, **kwargs):
"""Initialises the prefix set.
Args:
iterable: A sequence of keys to add to the set.
factory: A function used to create a trie used by the
:class:`pygtrie.PrefixSet`.
kwargs: Additional keyword arguments passed to the factory function.
"""
super(PrefixSet, self).__init__()
trie = factory(**kwargs)
if iterable:
trie.update((key, True) for key in iterable)
self._trie = trie
def copy(self):
"""Returns a copy of the prefix set."""
return self.__class__(self._trie)
def clear(self):
"""Removes all keys from the set."""
self._trie.clear()
def __contains__(self, key):
"""Checks whether set contains key or its prefix."""
return bool(self._trie.shortest_prefix(key)[1])
def __iter__(self):
"""Return iterator over all prefixes in the set.
See :func:`PrefixSet.iter` method for more info.
"""
return self._trie.iterkeys()
def iter(self, prefix=_SENTINEL):
"""Iterates over all keys in the set optionally starting with a prefix.
Since a key does not have to be explicitly added to the set to be an
element of the set, this method does not iterate over all possible keys
that the set contains, but only over the shortest set of prefixes of all
the keys the set contains.
For example, if "foo" has been added to the set, the set contains also
"foobar", but this method will *not* iterate over "foobar".
If ``prefix`` argument is given, method will iterate over keys with
given prefix only. The keys yielded from the function if prefix is
given does not have to be a subset (in mathematical sense) of the keys
yielded when there is not prefix. This happens, if the set contains
a prefix of the given prefix.
For example, if only "foo" has been added to the set, iter method called
with no arguments will yield "foo" only. However, when called with
"foobar" argument, it will yield "foobar" only.
"""
if prefix is _SENTINEL:
return iter(self)
elif self._trie.has_node(prefix):
return self._trie.iterkeys(prefix=prefix)
elif prefix in self:
# Make sure the type of returned keys is consistent.
# pylint: disable=protected-access
return self._trie._key_from_path(self._trie._path_from_key(prefix)),
else:
return ()
def __len__(self):
"""Returns number of keys stored in the set.
Since a key does not have to be explicitly added to the set to be an
element of the set, this method does not count over all possible keys
that the set contains (since that would be infinity), but only over the
shortest set of prefixes of all the keys the set contains.
For example, if "foo" has been added to the set, the set contains also
"foobar", but this method will *not* count "foobar".
"""
return len(self._trie)
def add(self, key):
"""Adds given key to the set.
If the set already contains prefix of the key being added, this
operation has no effect. If the key being added is a prefix of some
existing keys in the set, those keys are deleted and replaced by
a single entry for the key being added.
For example, if the set contains key "foo" adding a key "foobar" does
not change anything. On the other hand, if the set contains keys
"foobar" and "foobaz", adding a key "foo" will replace those two keys
with a single key "foo".
This makes a difference when iterating over the keys or counting number
of keys. Counter intuitively, adding of a key can *decrease* size of
the set.
Args:
key: Key to add.
"""
if key not in self:
self._trie[key:] = True
def discard(self, key):
raise NotImplementedError(
'Removing keys from PrefixSet is not implemented.')
def remove(self, key):
raise NotImplementedError(
'Removing keys from PrefixSet is not implemented.')
def pop(self):
raise NotImplementedError(
'Removing keys from PrefixSet is not implemented.')
|
py | 1a357b4bed719209bffbbab7e424bf6fe129067c | #Navraj Chohan
import sys
import math
import appscale_datastore
import helper_functions
import os
import time
hf = helper_functions
if "LOCAL_DB_IP" not in os.environ:
os.environ["LOCAL_DB_IP"] = "localhost"
datastore_type = "xxx"
def usage():
print " -t for type of datastore"
for ii in range(1,len(sys.argv)):
if sys.argv[ii] in ("-h", "--help"):
print "help menu:"
usage()
sys.exit()
elif sys.argv[ii] in ('-a', "--apps"):
print "apps location set to ",sys.argv[ii+ 1]
app_location = sys.argv[ii + 1]
ii += 1
elif sys.argv[ii] in ('-t', "--type"):
print "setting datastore type to ",sys.argv[ii+1]
datastore_type = sys.argv[ii + 1]
ii += 1
else:
pass
NUM_COLUMNS = 1
def err(test_num, code):
print "Failed for test at " + sys.argv[0] + ":" + str(test_num) \
+ " with a return of: " + str(code)
exit(1)
def getTotal(points):
total = 0
for ii in points:
total += float(ii)
return total
def getAverage(points, total = None):
if total == None:
total = getTotal(points)
if len(points) == 0:
return 0
return total/len(points)
def getStDev(points, average=None):
total = 0;
if average == None:
average = getAverage(points)
for ii in points:
total += (float(ii) - average) * (float(ii) - average)
if len(points) == 0:
return 0
return math.sqrt(total/len(points))
def createRandomList(number_of_columns, column_name_len):
columns = []
for ii in range(0, number_of_columns):
columns += [hf.random_string(column_name_len)]
return columns
columns = createRandomList(NUM_COLUMNS, 10)
data = createRandomList(NUM_COLUMNS, 100)
table_name = hf.random_string(10)
NUM_ACC = 10001
print "table= " + table_name
#print "columns= " + str(columns)
#print "data= " + str(data)
app_datastore = appscale_datastore.DatastoreFactory.getDatastore(datastore_type)
ERROR_CODES = appscale_datastore.DatastoreFactory.error_codes()
VALID_DATASTORES = appscale_datastore.DatastoreFactory.valid_datastores()
if datastore_type not in VALID_DATASTORES:
print "Bad selection for datastore. Valid selections are:"
print app_datastore.valid_datastores()
exit(1)
# Prime datastore
ret = app_datastore.put_entity(table_name, "000", columns, data)
if ret[0] not in ERROR_CODES or ret[1] != "0":
err(hf.lineno(),ret)
putArray = []
for ii in range(1, NUM_ACC):
start = time.time()
app_datastore.put_entity(table_name, str(ii), columns, data)
end = time.time()
putArray.append(end - start)
print getAverage(putArray),"\t",getStDev(putArray)
getArray = []
for ii in range(1, NUM_ACC):
start = time.time()
app_datastore.get_entity(table_name, str(ii), columns)
end = time.time()
getArray.append(end - start)
print getAverage(getArray),"\t",getStDev(getArray)
exit(0)
|
py | 1a357c7f90576686b69ad178345649dda4660cd1 | # Circuit Diagram:
# Components Required: Buzzer -1, Resistance 47K – 1, Reed Switch – 1
# Program:
# Raspberry Pi Connect
# import RPi.GPIO as GPIO
# import time
# Setup Part
import GPIO as GPIO
reed = 18
# send_capture_signal = 22
# GPIO.setmode(GPIO.BOARD)
# GPIO.setup(reed, GPIO.IN)
# GPIO.setup(send_capture_signal, GPIO.OUT)
# Infinite Loop
try:
while True:
# Check if Reed Sensor Pin is Lowx
reed
_state = GPIO.input(reed)
if reed _state == False
print('Now door is open..')
GPIO.output(buz, True) # Switch on buzzer
else:
# Reed switch is closed so stop buzzer
GPIO.output(buz, False)
time.sleep(0.5) # Wait for half a second before next check
except KeyboardInterrupt:
print ("CTRL + C Pressed")
GPIO.output(buz, False)
# Switch of the buzzer
# GPIO.cleanup()
# Clean up and release all GPIO Pins
|
py | 1a357ca2fb28ff1924dcd738a56ba3ec6386954d | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A convenience script engine to read Gaussian output in a directory tree.
"""
import argparse
import logging
import multiprocessing
import os
import re
from tabulate import tabulate
from pymatgen.apps.borg.hive import GaussianToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
save_file = "gau_data.gz"
def get_energies(rootdir, reanalyze, verbose):
"""
:param rootdir:
:param reanalyze:
:param verbose:
:return:
"""
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
drone = GaussianToComputedEntryDrone(inc_structure=True, parameters=["filename"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(save_file) and not reanalyze:
msg = (
"Using previously assimilated data from {}.".format(save_file)
+ " Use -f to force re-analysis."
)
queen.load_data(save_file)
else:
queen.parallel_assimilate(rootdir)
msg = "Results saved to {} for faster reloading.".format(save_file)
queen.save_data(save_file)
entries = queen.get_data()
entries = sorted(entries, key=lambda x: x.parameters["filename"])
all_data = [
(
e.parameters["filename"].replace("./", ""),
re.sub(r"\s+", "", e.composition.formula),
"{}".format(e.parameters["charge"]),
"{}".format(e.parameters["spin_mult"]),
"{:.5f}".format(e.energy),
"{:.5f}".format(e.energy_per_atom),
)
for e in entries
]
headers = ("Directory", "Formula", "Charge", "Spin Mult.", "Energy", "E/Atom")
print(tabulate(all_data, headers=headers))
print("")
print(msg)
def main():
"""
Main function
"""
desc = """
Convenient Gaussian run analyzer which can recursively go into a directory
to search results.
Author: Shyue Ping Ong
Version: 1.0
Last updated: Jul 6 2012"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"directories",
metavar="dir",
default=".",
type=str,
nargs="*",
help="directory to process",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_const",
const=True,
help="Verbose mode. Provides detailed output on progress.",
)
parser.add_argument(
"-f",
"--force",
dest="reanalyze",
action="store_const",
const=True,
help="Force reanalysis, instead of reusing gaussian_analyzer_data.gz.",
)
args = parser.parse_args()
for d in args.directories:
get_energies(d, args.reanalyze, args.verbose)
if __name__ == "__main__":
main()
|
py | 1a357d583327a72cee2db7dc2a2a3d636c1d66f5 | """Auto-generated file, do not edit by hand. CU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CU = PhoneMetadata(id='CU', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_number_pattern='\\d{3,6}', possible_length=(3, 6)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='10[456]', possible_number_pattern='\\d{3}', example_number='106', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[456]|1(?:6111|8)|40)', possible_number_pattern='\\d{3,6}', example_number='140', possible_length=(3, 6)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
|
py | 1a357f7396a850790105fd10c3e2bce180ca3fb6 | from __future__ import print_function
from tornado import ioloop, gen
from tornado_mysql import pools
pools.DEBUG = True
POOL = pools.Pool(
dict(host='127.0.0.1', port=3306, user='test', passwd='', db='mysql'),
max_idle_connections=1,
max_recycle_sec=3)
@gen.coroutine
def worker(n):
for _ in range(10):
t = 1
print(n, "sleeping", t, "seconds")
cur = yield POOL.execute("SELECT SLEEP(%s)", (t,))
print(n, cur.fetchall())
@gen.coroutine
def main():
workers = [worker(i) for i in range(10)]
yield workers
ioloop.IOLoop.current().run_sync(main)
print(POOL._opened_conns) |
py | 1a357f82d5e8ba6013d3417f82955f64a72c637a | """
@brief test log(time=1s)
You should indicate a time in seconds. The program ``run_unittests.py``
will sort all test files by increasing time and run them.
"""
import unittest
import itertools
from teachpyx.examples.construction_classique import enumerate_permutations_recursive, enumerate_permutations
class TestClassiquesPermutation (unittest.TestCase):
def test_permutation(self):
self.maxDiff = None
ens = list(range(5))
lt = list(tuple(p) for p in enumerate_permutations_recursive(ens))
self.assertEqual(len(lt), 120)
res = list(tuple(p) for p in itertools.permutations(ens))
self.assertEqual(len(res), 120)
self.assertEqual(set(res), set(lt))
res = list(tuple(p) for p in enumerate_permutations(ens))
self.assertEqual(len(res), 120)
self.assertEqual(set(res), set(lt))
res = list(tuple(p) for p in enumerate_permutations([1]))
self.assertEqual(res, [(1,)])
if __name__ == "__main__":
unittest.main()
|
py | 1a357ffa70ca323babd72930d113b00bbd2a3384 | import tensorflow as tf
class Load_Data:
def __init__(self,MAX_LENGTH,tokenizer_en,tokenizer_pt):
self.MAX_LENGTH = MAX_LENGTH
self.tokenizer_pt = tokenizer_pt
self.tokenizer_en = tokenizer_en
def encode(self,lang1, lang2):
lang1 = [self.tokenizer_pt.vocab_size] + self.tokenizer_pt.encode(
lang1.numpy()) + [self.tokenizer_pt.vocab_size+1]
lang2 = [self.tokenizer_en.vocab_size] + self.tokenizer_en.encode(
lang2.numpy()) + [self.tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(self,pt, en):
result_pt, result_en = tf.py_function(self.encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
def filter_max_length(self,x, y):
return tf.logical_and(tf.size(x) <= self.MAX_LENGTH,
tf.size(y) <= self.MAX_LENGTH)
|
py | 1a3580c582b2b8ae9867f5b4d9e76f6e6cfbce67 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Tea Sampling",
"color": "Orange",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Tea Sampling")
}
]
|
py | 1a35813584c37fe3df23389052a12ea2d14e7a4e | import redis
import urllib.parse as parse
local_redis = redis.Redis(host='127.0.0.1', port=6379, db=0)
all_keys = local_redis.keys()
for bt_key in all_keys:
bt_key = bt_key.decode('utf-8')
bt_str = '\n{}\n'.format(parse.unquote(local_redis.get(bt_key).decode('utf-8')))
try:
with open('./User_Method/user_magnet_bt.txt', 'a') as sf:
sf.write(bt_str)
except Exception as error:
print(error)
continue
else:
print(bt_str)
finally:
sf.close()
|
py | 1a3582ad4471ca72d9388e2dc7b6e3fd7f60c10d | import os
import logging
from threading import Thread, Event, Lock
from time import sleep, time
import serial
# for python 2/3 compatibility
try:
reduce
except NameError:
# In python 3, reduce is no longer imported by default.
from functools import reduce
try:
isinstance("", basestring)
def is_str(s):
return isinstance(s, basestring)
def encode2To3(s):
return s
def decode2To3(s):
return s
except NameError:
def is_str(s):
return isinstance(s, str)
def encode2To3(s):
return bytes(s, 'UTF-8')
def decode2To3(s):
return s.decode('UTF-8')
HERE = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler())
#fh = logging.FileHandler(os.path.join(HERE, 'voxelface.log'))
#fh.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s'))
#logger.addHandler(fh)
class Printer(object):
""" The Printer object is responsible for serial communications with a
printer. The printer is expected to be running Marlin firmware.
"""
def __init__(self, port='/dev/tty.usbmodem1421', baudrate=250000):
# USB port and baudrate for communication with the printer.
self.port = port
self.baudrate = baudrate
# The Serial object that the printer is communicating on.
self.s = None
# List of the responses from the printer.
self.responses = []
# List of lines that were sent to the printer.
self.sentlines = []
# True if the print thread is alive and sending lines.
self.printing = False
# Set to True to pause the print.
self.paused = False
# If set to True, the read_thread will be closed as soon as possible.
self.stop_reading = False
# If set to True, the print_thread will be closed as soon as possible.
self.stop_printing = False
# List of all temperature string responses from the printer.
self.temp_readings = []
### Private Attributes ################################################
# List of all lines to be sent to the printer.
self._buffer = []
# Index into the _buffer of the next line to send to the printer.
self._current_line_idx = 0
# This thread continuously sends lines as they appear in self._buffer.
self._print_thread = None
# This thread continuously reads lines as they appear from the printer.
self._read_thread = None
# Flag used to synchronize the print_thread and the read_thread. An 'ok'
# needs to be returned for every line sent. When the print_thread sends
# a line this flag is cleared, and when an 'ok' is received it is set.
self._ok_received = Event()
self._ok_received.set()
# Lock used to ensure serial send/receive events are atomic with the
# setting/clearing of the `_ok_received` flag.
self._communication_lock = Lock()
# Lock used to ensure connecting and disconnecting is atomic.
self._connection_lock = Lock()
# If False the Printer instacnce does not own the serial object passed
# in and it should not be closed when finished with.
self._owns_serial = True
# This is set to true when a disconnect was requested. If a sendline is
# called while this is true an error is raised.
self._disconnect_pending = False
# When we reset the line number Marlin's internal number will differ
# from our own _current_line_idx. This offset is used to keep those two
# in sync.
self._reset_offset = 0
### Printer Interface ###################################################
def connect(self, s=None):
""" Instantiate a Serial object using the stored port and baudrate.
Parameters
----------
s : serial.Serial
If a serial object is passed in then it will be used instead of
creating a new one.
"""
with self._connection_lock:
if s is None:
self.s = serial.Serial(self.port, self.baudrate, timeout=3)
else:
self.s = s
self._owns_serial = False
self._ok_received.set()
self._current_line_idx = 0
self._buffer = []
self.responses = []
self.sentlines = []
self._disconnect_pending = False
self._start_read_thread()
if s is None:
while len(self.responses) == 0:
sleep(0.01) # wait until the start message is recieved.
self.responses = []
logger.debug('Connected to {}'.format(self.s))
def disconnect(self, wait=False):
""" Disconnect from the printer by stopping threads and closing the port
Parameters
----------
wait : Bool (default: False)
If true, this method waits until all lines in the buffer have been
sent and acknowledged before disconnecting. Clearing the buffer
isn't guaranteed. If the read thread isn't running for some reason,
this function may return without waiting even when wait is set to
True.
"""
with self._connection_lock:
self._disconnect_pending = True
if wait:
buf_len = len(self._buffer)
while buf_len > len(self.responses) and \
self._is_read_thread_running():
sleep(0.01) # wait until all lines in the buffer are sent
if self._print_thread is not None:
self.stop_printing = True
if self.s is not None and self.s.writeTimeout is not None:
timeout = self.s.writeTimeout + 1
else:
timeout = 10
self._print_thread.join(timeout)
if self._read_thread is not None:
self.stop_reading = True
if self.s is not None and self.s.timeout is not None:
timeout = self.s.timeout + 1
else:
timeout = 10
self._read_thread.join(timeout)
if self.s is not None and self._owns_serial is True:
self.s.close()
self.s = None
self.printing = False
self._current_line_idx = 0
self._buffer = []
self.responses = []
self.sentlines = []
logger.debug('Disconnected from printer')
def load_file(self, filepath):
""" Load the given file into an internal _buffer. The lines will not be
send until `self._start_print_thread()` is called.
Parameters
----------
filepath : str
The path to a text file containing lines of GCode to be printed.
"""
lines = []
with open(filepath) as f:
for line in f:
line = line.strip()
if ';' in line: # clear out the comments
line = line.split(';')[0]
if line:
lines.append(line)
self._buffer.extend(lines)
def start(self):
""" Starts the read_thread and the _print_thread.
"""
self._start_read_thread()
self._start_print_thread()
def sendline(self, line):
""" Send the given line over serial by appending it to the send buffer
Parameters
----------
line : str
A line of GCode to send to the printer.
"""
if self._disconnect_pending:
msg = 'Attempted to send line after a disconnect was requested: {}'
raise RuntimeError(msg.format(line))
if line:
line = str(line).strip()
if ';' in line: # clear out the comments
line = line.split(';')[0]
if line:
self._buffer.append(line)
def get_response(self, line, timeout=0):
""" Send the given line and return the response from the printer.
Parameters
----------
line : str
The line to send to the printer
Returns
-------
r : str
The response from the printer.
"""
buf_len = len(self._buffer) + 1
self.sendline(line)
start_time = time()
while len(self.responses) != buf_len:
if len(self.responses) > buf_len:
msg = "Received more responses than lines sent"
raise RuntimeError(msg)
if timeout > 0 and (time() - start_time) > timeout:
return '' # return blank string on timeout.
if not self._is_read_thread_running():
raise RuntimeError("can't get response from serial since read thread isn't running")
sleep(0.01)
return self.responses[-1]
def current_position(self):
""" Get the current postion of the printer.
Returns
-------
pos : dict
Dict with keys of 'X', 'Y', 'Z', and 'E' and values of their
positions
"""
# example r: X:0.00 Y:0.00 Z:0.00 E:0.00 Count X: 0.00 Y:0.00 Z:0.00
r = self.get_response("M114")
r = r.split(' Count')[0].strip().split()
r = [x.split(':') for x in r]
pos = dict([(k, float(v)) for k, v in r])
return pos
def reset_linenumber(self, number = 0):
line = "M110 N{}".format(number)
self.sendline(line)
### Private Methods ######################################################
def _start_print_thread(self):
""" Spawns a new thread that will send all lines in the _buffer over
serial to the printer. This thread can be stopped by setting
`stop_printing` to True. If a print_thread already exists and is alive,
this method does nothing.
"""
if self._is_print_thread_running():
return
self.printing = True
self.stop_printing = False
self._print_thread = Thread(target=self._print_worker_entrypoint, name='Print')
self._print_thread.setDaemon(True)
self._print_thread.start()
logger.debug('print_thread started')
def _start_read_thread(self):
""" Spawns a new thread that will continuously read lines from the
printer. This thread can be stopped by setting `stop_reading` to True.
If a print_thread already exists and is alive, this method does
nothing.
"""
if self._is_read_thread_running():
return
self.stop_reading = False
self._read_thread = Thread(target=self._read_worker_entrypoint, name='Read')
self._read_thread.setDaemon(True)
self._read_thread.start()
logger.debug('read_thread started')
def _print_worker_entrypoint(self):
try:
self._print_worker()
except Exception as e:
logger.exception("Exception running print worker: " + str(e))
def _read_worker_entrypoint(self):
try:
self._read_worker()
except Exception as e:
logger.exception("Exception running read worker: " + str(e))
def _is_print_thread_running(self):
return self._print_thread is not None and self._print_thread.is_alive()
def _is_read_thread_running(self):
return self._read_thread is not None and self._read_thread.is_alive()
def _print_worker(self):
""" This method is spawned in the print thread. It loops over every line
in the _buffer and sends it over serial to the printer.
"""
while not self.stop_printing:
_paused = False
while self.paused is True and not self.stop_printing:
if _paused is False:
logger.debug('Printer.paused is True, waiting...')
_paused = True
sleep(0.01)
if _paused is True:
logger.debug('Printer.paused is now False, resuming.')
if self._current_line_idx < len(self._buffer):
self.printing = True
while not self._ok_received.is_set() and not self.stop_printing:
self._ok_received.wait(1)
line = self._next_line()
with self._communication_lock:
self.s.write(encode2To3(line))
self._ok_received.clear()
self._current_line_idx += 1
# Grab the just sent line without line numbers or checksum
plain_line = self._buffer[self._current_line_idx - 1].strip()
self.sentlines.append(plain_line)
else: # if there aren't new lines wait 10ms and check again
sleep(0.01)
self.printing = False
def _read_worker(self):
""" This method is spawned in the read thread. It continuously reads
from the printer over serial and checks for 'ok's.
"""
full_resp = ''
while not self.stop_reading:
if self.s is not None:
line = decode2To3(self.s.readline())
if line.startswith('Resend: '): # example line: "Resend: 143"
self._current_line_idx = int(line.split()[1]) - 1 + self._reset_offset
logger.debug('Resend Requested - {}'.format(line.strip()))
with self._communication_lock:
self._ok_received.set()
continue
if line.startswith('T:'):
self.temp_readings.append(line)
if line:
full_resp += line
# If there is no newline char in the response that means
# serial.readline() hit the timeout before a full line. This
# means communication has broken down so both threads need
# to be closed down.
if '\n' not in line:
self.printing = False
self.stop_printing = True
self.stop_reading = True
with self._communication_lock:
self._ok_received.set()
msg = """readline timed out mid-line.
last sentline: {}
response: {}
"""
raise RuntimeError(msg.format(self.sentlines[-1:],
full_resp))
if 'ok' in line:
with self._communication_lock:
self._ok_received.set()
self.responses.append(full_resp)
full_resp = ''
else: # if no printer is attached, wait 10ms to check again.
sleep(0.01)
def _next_line(self):
""" Prepares the next line to be sent to the printer by prepending the
line number and appending a checksum and newline character.
"""
line = self._buffer[self._current_line_idx].strip()
if line.startswith('M110 N'):
new_number = int(line[6:])
self._reset_offset = self._current_line_idx + 1 - new_number
elif line.startswith('M110'):
self._reset_offset = self._current_line_idx + 1
idx = self._current_line_idx + 1 - self._reset_offset
line = 'N{} {}'.format(idx, line)
checksum = self._checksum(line)
return '{}*{}\n'.format(line, checksum)
def _checksum(self, line):
""" Calclate the checksum by xor'ing all characters together.
"""
if not line:
raise RuntimeError("cannot compute checksum of an empty string")
return reduce(lambda a, b: a ^ b, [ord(char) for char in line])
|
py | 1a35837bc3ebc5f4a0689190c501d2206b5e27d2 | #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name = "netblocks"
from .netblocks import * |
py | 1a3583e03866bb33ac4636b200747c61ebd573bf | from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# terminal urls
url(r'^$', views.terminals, name='terminals'),
url(r'^add/$', views.terminal_add, name='terminal-add'),
url(r'^terminal_process/$', views.terminal_process, name='terminal_process'),
url(r'^edit/(?P<pk>[0-9]+)/$', views.terminal_edit, name='terminal-edit'),
url(r'^terminal/history/(?P<pk>[0-9]+)/$', views.terminal_history, name='terminal-history'),
url(r'^terminal_update(?P<pk>[0-9]+)/$', views.terminal_update, name='terminal-update'),
url(r'^detail/(?P<pk>[0-9]+)/$', views.terminal_detail, name='terminal-detail'),
url(r'^delete/(?P<pk>[0-9]+)/$', views.terminal_delete, name='terminal-delete'),
# cashmovement urls
url(r'^transations/$', views.transactions, name='transactions'),
]
if settings.DEBUG:
# urlpatterns += [ url(r'^static/(?P<path>.*)$', serve)] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
py | 1a35840ac6fd6ed094fc7c7a4aeb310dd6d0f5e2 | """Skolemizer package.
Modules:
skolemizer
"""
try:
from importlib.metadata import version, PackageNotFoundError # type: ignore
except ImportError: # pragma: no cover
from importlib_metadata import version, PackageNotFoundError # type: ignore
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
from .skolemizer import Skolemizer
|
py | 1a35842f444f4f49f866e8b218bee8d73f1d9fb6 | import graphene
from graphene import AbstractType, Node
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from graphene_django.converter import convert_django_field
from graphene import AbstractType, Field, Node, ClientIDMutation, AbstractType
from graphene import ID, Boolean, Float, Int, List, String
from graphql_relay.node.node import from_global_id
from .custom_filter_fields import date_types, string_types, number_types
from .helper_methods import not_none, set_and_save
from virtualization.models import ClusterType, ClusterGroup, Cluster, VirtualMachine
from tenancy.models import Tenant
from dcim.models import Site, Interface, Platform, DeviceRole
from ipam.models import IPAddress
# Nodes
class ClusterTypeNode(DjangoObjectType):
class Meta:
model = ClusterType
interfaces = (Node, )
filter_fields = {
'id': ['exact'],
'name': string_types,
'slug': ['exact'],
}
class ClusterGroupNode(DjangoObjectType):
class Meta:
model = ClusterGroup
interfaces = (Node, )
filter_fields = {
'id': ['exact'],
'name': string_types,
'slug': ['exact'],
}
class ClusterNode(DjangoObjectType):
class Meta:
model = Cluster
interfaces = (Node, )
filter_fields = {
'id': ['exact'],
'name': string_types,
}
class VirtualMachineNode(DjangoObjectType):
class Meta:
model = VirtualMachine
interfaces = (Node, )
filter_fields = {
'id': ['exact'],
'name': string_types,
}
# Queries
class VirtualizationQuery(AbstractType):
cluster_types = DjangoFilterConnectionField(ClusterTypeNode)
cluster_groups = DjangoFilterConnectionField(ClusterGroupNode)
clusters = DjangoFilterConnectionField(ClusterNode)
virtual_machines = DjangoFilterConnectionField(VirtualMachineNode)
# Mutations
class NewClusterType(ClientIDMutation):
cluster_type = Field(ClusterTypeNode)
class Input:
name = String()
slug = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = ClusterType()
fields = [ 'name', 'slug' ]
return NewClusterType(cluster_type=set_and_save(fields, input, temp))
class UpdateClusterType(ClientIDMutation):
cluster_type = Field(ClusterTypeNode)
class Input:
id = String()
name = String()
slug = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = ClusterType.objects.get(pk=from_global_id(input.get('id'))[1])
fields = [ 'name', 'slug' ]
return UpdateClusterType(cluster_type=set_and_save(fields, input, temp))
class DeleteClusterType(ClientIDMutation):
cluster_type = Field(ClusterTypeNode)
class Input:
id = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = ClusterType.objects.get(pk=from_global_id(input.get('id'))[1])
temp.delete()
return DeleteClusterType(cluster_type=temp)
# Cluster Group
class NewClusterGroup(ClientIDMutation):
cluster_group = Field(ClusterGroupNode)
class Input:
name = String()
slug = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = ClusterGroup()
fields = [ 'name', 'slug' ]
return NewClusterGroup(cluster_group=set_and_save(fields, input, temp))
class UpdateClusterGroup(ClientIDMutation):
cluster_group = Field(ClusterGroupNode)
class Input:
id = String()
name = String()
slug = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = ClusterGroup.objects.get(pk=from_global_id(input.get('id'))[1])
fields = [ 'name', 'slug' ]
return UpdateClusterGroup(cluster_group=set_and_save(fields, input, temp))
class DeleteClusterGroup(ClientIDMutation):
cluster_group = Field(ClusterGroupNode)
class Input:
id = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = ClusterGroup.objects.get(pk=from_global_id(input.get('id'))[1])
temp.delete()
return DeleteClusterGroup(cluster_group=temp)
### Cluster
class NewCluster(ClientIDMutation):
cluster = Field(ClusterNode)
class Input:
name = String(default_value=None)
type = String(default_value=None)
group = String(default_value=None)
site = String(default_value=None)
comments = String(default_value=None)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
type = input.get('type')
group = input.get('group')
site = input.get('site')
temp = Cluster()
if not_none(type):
temp.type = ClusterType.objects.get(pk=from_global_id(type)[1])
if not_none(group):
temp.group = ClusterGroup.objects.get(pk=from_global_id(group)[1])
if not_none(site):
temp.site = Site.objects.get(pk=from_global_id(site)[1])
fields = ['name', 'comments']
return NewCluster(cluster=set_and_save(fields, input, temp))
class UpdateCluster(ClientIDMutation):
cluster = Field(ClusterNode)
class Input:
id = String()
name = String(default_value=None)
type = String(default_value=None)
group = String(default_value=None)
site = String(default_value=None)
comments = String(default_value=None)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = Cluster.objects.get(pk=from_global_id(input.get('id'))[1])
type = input.get('type')
group = input.get('group')
site = input.get('site')
if not_none(type):
temp.type = ClusterType.objects.get(pk=from_global_id(type)[1])
if not_none(group):
temp.group = ClusterGroup.objects.get(pk=from_global_id(group)[1])
if not_none(site):
temp.site = Site.objects.get(pk=from_global_id(site)[1])
fields = ['name', 'comments']
return UpdateCluster(cluster=set_and_save(fields, input, temp))
class DeleteCluster(ClientIDMutation):
cluster = Field(ClusterNode)
class Input:
id = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = Cluster.objects.get(pk=from_global_id(input.get('id'))[1])
temp.delete()
return DeleteCluster(cluster=temp)
### Virtual machine
class NewVirtualMachine(ClientIDMutation):
virtual_machine = Field(VirtualMachineNode)
class Input:
cluster = String(default_value=None)
tenant = String(default_value=None)
platform = String(default_value=None)
name = String(default_value=None)
status = Int(default_value=None)
role = String(default_value=None)
primary_ip4 = String(default_value=None)
primary_ip6 = String(default_value=None)
vcpus = Int(default_value=None)
memory = Int(default_value=None)
disk = Int(default_value=None)
comments = String(default_value=None)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
cluster = input.get('cluster')
tenant = input.get('tenant')
platform = input.get('platform')
role = input.get('role')
primary_ip4 = input.get('primary_ip4')
primary_ip6 = input.get('primary_ip6')
temp = VirtualMachine()
if not_none(cluster):
temp.cluster = Cluster.objects.get(pk=from_global_id(cluster)[1])
if not_none(tenant):
temp.tenant = Tenant.objects.get(pk=from_global_id(tenant)[1])
if not_none(platform):
temp.platform = Platform.objects.get(pk=from_global_id(platform)[1])
if not_none(role):
temp.role = DeviceRole.objects.get(pk=from_global_id(role)[1])
if not_none(primary_ip4):
temp.primary_ip4 = IPAddress.objects.get(pk=from_global_id(primary_ip4)[1])
if not_none(primary_ip6):
temp.primary_ip6 = IPAddress.objects.get(pk=from_global_id(primary_ip6)[1])
fields = ['name', 'status', 'vcpus', 'memory', 'disk', 'comments']
return NewVirtualMachine(virtual_machine=set_and_save(fields, input, temp))
class UpdateVirtualMachine(ClientIDMutation):
virtual_machine = Field(VirtualMachineNode)
class Input:
id = String()
cluster = String(default_value=None)
tenant = String(default_value=None)
platform = String(default_value=None)
name = String(default_value=None)
status = Int(default_value=None)
role = String(default_value=None)
primary_ip4 = String(default_value=None)
primary_ip6 = String(default_value=None)
vcpus = Int(default_value=None)
memory = Int(default_value=None)
disk = Int(default_value=None)
comments = String(default_value=None)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = VirtualMachine.objects.get(pk=from_global_id(input.get('id'))[1])
cluster = input.get('cluster')
tenant = input.get('tenant')
platform = input.get('platform')
role = input.get('role')
primary_ip4 = input.get('primary_ip4')
primary_ip6 = input.get('primary_ip6')
if not_none(cluster):
temp.cluster = Cluster.objects.get(pk=from_global_id(cluster)[1])
if not_none(tenant):
temp.tenant = Tenant.objects.get(pk=from_global_id(tenant)[1])
if not_none(platform):
temp.platform = Platform.objects.get(pk=from_global_id(platform)[1])
if not_none(role):
temp.role = DeviceRole.objects.get(pk=from_global_id(role)[1])
if not_none(primary_ip4):
temp.primary_ip4 = IPAddress.objects.get(pk=from_global_id(primary_ip4)[1])
if not_none(primary_ip6):
temp.primary_ip6 = IPAddress.objects.get(pk=from_global_id(primary_ip6)[1])
fields = ['name', 'status', 'vcpus', 'memory', 'disk', 'comments']
return UpdateVirtualMachine(virtual_machine=set_and_save(fields, input, temp))
class DeleteVirtualMachine(ClientIDMutation):
virtual_machine = Field(VirtualMachineNode)
class Input:
id = String()
@classmethod
def mutate_and_get_payload(cls, input, context, info):
temp = VirtualMachine.objects.get(pk=from_global_id(input.get('id'))[1])
temp.delete()
return DeleteVirtualMachine(virtual_machine=temp)
class VirtualizationMutations(AbstractType):
# Cluster Type
new_cluster_type = NewClusterType.Field()
update_cluster_type = UpdateClusterType.Field()
delete_cluster_type = DeleteClusterType.Field()
# Cluster Group
new_cluster_group = NewClusterGroup.Field()
update_cluster_group = UpdateClusterGroup.Field()
delete_cluster_group = DeleteClusterGroup.Field()
# Cluster
new_cluster = NewCluster.Field()
update_cluster = UpdateCluster.Field()
delete_cluster = DeleteCluster.Field()
# Virtual Machine
new_virtual_machine = NewVirtualMachine.Field()
update_virtual_machine = UpdateVirtualMachine.Field()
delete_virtual_machine = DeleteVirtualMachine.Field()
|
py | 1a358441c694c541ac6eeb2b22bb14f0c1664278 | import config
import io
import tensorflow as tf
import joblib
def save_weights(weights, reverse_word_index):
out_v = io.open(f'{config.MODEL_PATH}/vecs.tsv', 'w', encoding='utf-8')
out_m = io.open(f'{config.MODEL_PATH}/meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, config.VOCAB_SIZE):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + '\n')
out_v.write('\t'.join([str(x) for x in embeddings]) + '\n')
out_v.close()
out_m.close()
if __name__ == '__main__':
load_model = tf.keras.models.load_model(f"{config.MODEL_PATH}my_model.h5")
reverse_word_index = joblib.load(f"{config.MODEL_PATH}rev_word_ind.pkl")
e = load_model.layers[0]
weights = e.get_weights()[0]
save_weights(weights, reverse_word_index)
|
py | 1a3584eea22b124006df58c5b66450a6fbc902d8 | """The DeepFool attack
"""
import copy
import logging
import warnings
import numpy as np
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans.model import Model, wrapper_warning_logits, CallableModelWrapper
from cleverhans import utils
from cleverhans import utils_tf
np_dtype = np.dtype('float32')
_logger = utils.create_logger("cleverhans.attacks.deep_fool")
_logger.setLevel(logging.INFO)
class DeepFool(Attack):
"""
DeepFool is an untargeted & iterative attack which is based on an
iterative linearization of the classifier. The implementation here
is w.r.t. the L2 norm.
Paper link: "https://arxiv.org/pdf/1511.04599.pdf"
:param model: cleverhans.model.Model
:param sess: tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess, dtypestr='float32', **kwargs):
"""
Create a DeepFool instance.
"""
if not isinstance(model, Model):
wrapper_warning_logits()
model = CallableModelWrapper(model, 'logits')
super(DeepFool, self).__init__(model, sess, dtypestr, **kwargs)
self.structural_kwargs = [
'overshoot', 'max_iter', 'clip_max', 'clip_min', 'nb_candidate'
]
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
from cleverhans.utils_tf import jacobian_graph
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
# Define graph wrt to this input placeholder
logits = self.model.get_logits(x)
self.nb_classes = logits.get_shape().as_list()[-1]
assert self.nb_candidate <= self.nb_classes, \
'nb_candidate should not be greater than nb_classes'
preds = tf.reshape(
tf.nn.top_k(logits, k=self.nb_candidate)[0],
[-1, self.nb_candidate])
# grads will be the shape [batch_size, nb_candidate, image_size]
grads = tf.stack(jacobian_graph(preds, x, self.nb_candidate), axis=1)
# Define graph
def deepfool_wrap(x_val):
"""deepfool function for py_func"""
return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
self.nb_candidate, self.overshoot,
self.max_iter, self.clip_min, self.clip_max,
self.nb_classes)
wrap = tf.compat.v1.py_func(deepfool_wrap, [x], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
def parse_params(self,
nb_candidate=10,
overshoot=0.02,
max_iter=50,
clip_min=0.,
clip_max=1.,
**kwargs):
"""
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for deepfool
:param clip_min: Minimum component value for clipping
:param clip_max: Maximum component value for clipping
"""
self.nb_candidate = nb_candidate
self.overshoot = overshoot
self.max_iter = max_iter
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
def deepfool_batch(sess,
x,
pred,
logits,
grads,
X,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
nb_classes,
feed=None):
"""
Applies DeepFool to a batch of inputs
:param sess: TF session
:param x: The input placeholder
:param pred: The model's sorted symbolic output of logits, only the top
nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param X: Numpy array with sample inputs
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:param nb_classes: Number of model output classes
:return: Adversarial examples
"""
X_adv = deepfool_attack(
sess,
x,
pred,
logits,
grads,
X,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=feed)
return np.asarray(X_adv, dtype=np_dtype)
def deepfool_attack(sess,
x,
predictions,
logits,
grads,
sample,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=None):
"""
TensorFlow implementation of DeepFool.
Paper link: see https://arxiv.org/pdf/1511.04599.pdf
:param sess: TF session
:param x: The input placeholder
:param predictions: The model's sorted symbolic output of logits, only the
top nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param sample: Numpy array with sample input
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:return: Adversarial examples
"""
adv_x = copy.copy(sample)
# Initialize the loop variables
iteration = 0
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
r_tot = np.zeros(sample.shape)
original = current # use original label as the reference
_logger.debug(
"Starting DeepFool attack up to %s iterations", max_iter)
# Repeat this main loop until we have achieved misclassification
while (np.any(current == original) and iteration < max_iter):
if iteration % 5 == 0 and iteration > 0:
_logger.info("Attack result at iteration %s is %s", iteration, current)
gradients = sess.run(grads, feed_dict={x: adv_x})
predictions_val = sess.run(predictions, feed_dict={x: adv_x})
for idx in range(sample.shape[0]):
pert = np.inf
if current[idx] != original[idx]:
continue
for k in range(1, nb_candidate):
w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
f_k = predictions_val[idx, k] - predictions_val[idx, 0]
# adding value 0.00001 to prevent f_k = 0
pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
if pert_k < pert:
pert = pert_k
w = w_k
r_i = pert * w / np.linalg.norm(w)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
adv_x = np.clip(r_tot + sample, clip_min, clip_max)
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
# Update loop variables
iteration = iteration + 1
# need more revision, including info like how many succeed
_logger.info("Attack result at iteration %s is %s", iteration, current)
_logger.info("%s out of %s become adversarial examples at iteration %s",
sum(current != original),
sample.shape[0],
iteration)
# need to clip this image into the given range
adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
return adv_x
|
py | 1a35865c966ff89b9cc5705c1191e40f05eea4d4 | ############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 Python interface
#
# Author: Leonardo de Moura (leonardo)
############################################
"""Z3 is a high performance theorem prover developed at Microsoft Research.
Z3 is used in many applications such as: software/hardware verification and testing,
constraint solving, analysis of hybrid systems, security, biology (in silico analysis),
and geometrical problems.
Several online tutorials for Z3Py are available at:
http://rise4fun.com/Z3Py/tutorial/guide
Please send feedback, comments and/or corrections on the Issue tracker for
https://github.com/Z3prover/z3.git. Your comments are very valuable.
Small example:
>>> x = Int('x')
>>> y = Int('y')
>>> s = Solver()
>>> s.add(x > 0)
>>> s.add(x < 2)
>>> s.add(y == x + 1)
>>> s.check()
sat
>>> m = s.model()
>>> m[x]
1
>>> m[y]
2
Z3 exceptions:
>>> try:
... x = BitVec('x', 32)
... y = Bool('y')
... # the expression x + y is type incorrect
... n = x + y
... except Z3Exception as ex:
... print("failed: %s" % ex)
failed: sort mismatch
"""
from . import z3core
from .z3core import *
from .z3types import *
from .z3consts import *
from .z3printer import *
from fractions import Fraction
import sys
import io
import math
import copy
if sys.version_info.major >= 3:
from typing import Iterable
Z3_DEBUG = __debug__
def z3_debug():
global Z3_DEBUG
return Z3_DEBUG
if sys.version_info.major < 3:
def _is_int(v):
return isinstance(v, (int, long))
else:
def _is_int(v):
return isinstance(v, int)
def enable_trace(msg):
Z3_enable_trace(msg)
def disable_trace(msg):
Z3_disable_trace(msg)
def get_version_string():
major = ctypes.c_uint(0)
minor = ctypes.c_uint(0)
build = ctypes.c_uint(0)
rev = ctypes.c_uint(0)
Z3_get_version(major, minor, build, rev)
return "%s.%s.%s" % (major.value, minor.value, build.value)
def get_version():
major = ctypes.c_uint(0)
minor = ctypes.c_uint(0)
build = ctypes.c_uint(0)
rev = ctypes.c_uint(0)
Z3_get_version(major, minor, build, rev)
return (major.value, minor.value, build.value, rev.value)
def get_full_version():
return Z3_get_full_version()
# We use _z3_assert instead of the assert command because we want to
# produce nice error messages in Z3Py at rise4fun.com
def _z3_assert(cond, msg):
if not cond:
raise Z3Exception(msg)
def _z3_check_cint_overflow(n, name):
_z3_assert(ctypes.c_int(n).value == n, name + " is too large")
def open_log(fname):
"""Log interaction to a file. This function must be invoked immediately after init(). """
Z3_open_log(fname)
def append_log(s):
"""Append user-defined string to interaction log. """
Z3_append_log(s)
def to_symbol(s, ctx=None):
"""Convert an integer or string into a Z3 symbol."""
if _is_int(s):
return Z3_mk_int_symbol(_get_ctx(ctx).ref(), s)
else:
return Z3_mk_string_symbol(_get_ctx(ctx).ref(), s)
def _symbol2py(ctx, s):
"""Convert a Z3 symbol back into a Python object. """
if Z3_get_symbol_kind(ctx.ref(), s) == Z3_INT_SYMBOL:
return "k!%s" % Z3_get_symbol_int(ctx.ref(), s)
else:
return Z3_get_symbol_string(ctx.ref(), s)
# Hack for having nary functions that can receive one argument that is the
# list of arguments.
# Use this when function takes a single list of arguments
def _get_args(args):
try:
if len(args) == 1 and (isinstance(args[0], tuple) or isinstance(args[0], list)):
return args[0]
elif len(args) == 1 and (isinstance(args[0], set) or isinstance(args[0], AstVector)):
return [arg for arg in args[0]]
else:
return args
except TypeError: # len is not necessarily defined when args is not a sequence (use reflection?)
return args
# Use this when function takes multiple arguments
def _get_args_ast_list(args):
try:
if isinstance(args, (set, AstVector, tuple)):
return [arg for arg in args]
else:
return args
except Exception:
return args
def _to_param_value(val):
if isinstance(val, bool):
return "true" if val else "false"
return str(val)
def z3_error_handler(c, e):
# Do nothing error handler, just avoid exit(0)
# The wrappers in z3core.py will raise a Z3Exception if an error is detected
return
class Context:
"""A Context manages all other Z3 objects, global configuration options, etc.
Z3Py uses a default global context. For most applications this is sufficient.
An application may use multiple Z3 contexts. Objects created in one context
cannot be used in another one. However, several objects may be "translated" from
one context to another. It is not safe to access Z3 objects from multiple threads.
The only exception is the method `interrupt()` that can be used to interrupt() a long
computation.
The initialization method receives global configuration options for the new context.
"""
def __init__(self, *args, **kws):
if z3_debug():
_z3_assert(len(args) % 2 == 0, "Argument list must have an even number of elements.")
conf = Z3_mk_config()
for key in kws:
value = kws[key]
Z3_set_param_value(conf, str(key).upper(), _to_param_value(value))
prev = None
for a in args:
if prev is None:
prev = a
else:
Z3_set_param_value(conf, str(prev), _to_param_value(a))
prev = None
self.ctx = Z3_mk_context_rc(conf)
self.eh = Z3_set_error_handler(self.ctx, z3_error_handler)
Z3_set_ast_print_mode(self.ctx, Z3_PRINT_SMTLIB2_COMPLIANT)
Z3_del_config(conf)
def __del__(self):
Z3_del_context(self.ctx)
self.ctx = None
self.eh = None
def ref(self):
"""Return a reference to the actual C pointer to the Z3 context."""
return self.ctx
def interrupt(self):
"""Interrupt a solver performing a satisfiability test, a tactic processing a goal, or simplify functions.
This method can be invoked from a thread different from the one executing the
interruptible procedure.
"""
Z3_interrupt(self.ref())
# Global Z3 context
_main_ctx = None
def main_ctx():
"""Return a reference to the global Z3 context.
>>> x = Real('x')
>>> x.ctx == main_ctx()
True
>>> c = Context()
>>> c == main_ctx()
False
>>> x2 = Real('x', c)
>>> x2.ctx == c
True
>>> eq(x, x2)
False
"""
global _main_ctx
if _main_ctx is None:
_main_ctx = Context()
return _main_ctx
def _get_ctx(ctx):
if ctx is None:
return main_ctx()
else:
return ctx
def get_ctx(ctx):
return _get_ctx(ctx)
def set_param(*args, **kws):
"""Set Z3 global (or module) parameters.
>>> set_param(precision=10)
"""
if z3_debug():
_z3_assert(len(args) % 2 == 0, "Argument list must have an even number of elements.")
new_kws = {}
for k in kws:
v = kws[k]
if not set_pp_option(k, v):
new_kws[k] = v
for key in new_kws:
value = new_kws[key]
Z3_global_param_set(str(key).upper(), _to_param_value(value))
prev = None
for a in args:
if prev is None:
prev = a
else:
Z3_global_param_set(str(prev), _to_param_value(a))
prev = None
def reset_params():
"""Reset all global (or module) parameters.
"""
Z3_global_param_reset_all()
def set_option(*args, **kws):
"""Alias for 'set_param' for backward compatibility.
"""
return set_param(*args, **kws)
def get_param(name):
"""Return the value of a Z3 global (or module) parameter
>>> get_param('nlsat.reorder')
'true'
"""
ptr = (ctypes.c_char_p * 1)()
if Z3_global_param_get(str(name), ptr):
r = z3core._to_pystr(ptr[0])
return r
raise Z3Exception("failed to retrieve value for '%s'" % name)
#########################################
#
# ASTs base class
#
#########################################
# Mark objects that use pretty printer
class Z3PPObject:
"""Superclass for all Z3 objects that have support for pretty printing."""
def use_pp(self):
return True
def _repr_html_(self):
in_html = in_html_mode()
set_html_mode(True)
res = repr(self)
set_html_mode(in_html)
return res
class AstRef(Z3PPObject):
"""AST are Direct Acyclic Graphs (DAGs) used to represent sorts, declarations and expressions."""
def __init__(self, ast, ctx=None):
self.ast = ast
self.ctx = _get_ctx(ctx)
Z3_inc_ref(self.ctx.ref(), self.as_ast())
def __del__(self):
if self.ctx.ref() is not None and self.ast is not None:
Z3_dec_ref(self.ctx.ref(), self.as_ast())
self.ast = None
def __deepcopy__(self, memo={}):
return _to_ast_ref(self.ast, self.ctx)
def __str__(self):
return obj_to_string(self)
def __repr__(self):
return obj_to_string(self)
def __eq__(self, other):
return self.eq(other)
def __hash__(self):
return self.hash()
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
if is_true(self):
return True
elif is_false(self):
return False
elif is_eq(self) and self.num_args() == 2:
return self.arg(0).eq(self.arg(1))
else:
raise Z3Exception("Symbolic expressions cannot be cast to concrete Boolean values.")
def sexpr(self):
"""Return a string representing the AST node in s-expression notation.
>>> x = Int('x')
>>> ((x + 1)*x).sexpr()
'(* (+ x 1) x)'
"""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def as_ast(self):
"""Return a pointer to the corresponding C Z3_ast object."""
return self.ast
def get_id(self):
"""Return unique identifier for object. It can be used for hash-tables and maps."""
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def ctx_ref(self):
"""Return a reference to the C context where this AST node is stored."""
return self.ctx.ref()
def eq(self, other):
"""Return `True` if `self` and `other` are structurally identical.
>>> x = Int('x')
>>> n1 = x + 1
>>> n2 = 1 + x
>>> n1.eq(n2)
False
>>> n1 = simplify(n1)
>>> n2 = simplify(n2)
>>> n1.eq(n2)
True
"""
if z3_debug():
_z3_assert(is_ast(other), "Z3 AST expected")
return Z3_is_eq_ast(self.ctx_ref(), self.as_ast(), other.as_ast())
def translate(self, target):
"""Translate `self` to the context `target`. That is, return a copy of `self` in the context `target`.
>>> c1 = Context()
>>> c2 = Context()
>>> x = Int('x', c1)
>>> y = Int('y', c2)
>>> # Nodes in different contexts can't be mixed.
>>> # However, we can translate nodes from one context to another.
>>> x.translate(c2) + y
x + y
"""
if z3_debug():
_z3_assert(isinstance(target, Context), "argument must be a Z3 context")
return _to_ast_ref(Z3_translate(self.ctx.ref(), self.as_ast(), target.ref()), target)
def __copy__(self):
return self.translate(self.ctx)
def hash(self):
"""Return a hashcode for the `self`.
>>> n1 = simplify(Int('x') + 1)
>>> n2 = simplify(2 + Int('x') - 1)
>>> n1.hash() == n2.hash()
True
"""
return Z3_get_ast_hash(self.ctx_ref(), self.as_ast())
def is_ast(a):
"""Return `True` if `a` is an AST node.
>>> is_ast(10)
False
>>> is_ast(IntVal(10))
True
>>> is_ast(Int('x'))
True
>>> is_ast(BoolSort())
True
>>> is_ast(Function('f', IntSort(), IntSort()))
True
>>> is_ast("x")
False
>>> is_ast(Solver())
False
"""
return isinstance(a, AstRef)
def eq(a, b):
"""Return `True` if `a` and `b` are structurally identical AST nodes.
>>> x = Int('x')
>>> y = Int('y')
>>> eq(x, y)
False
>>> eq(x + 1, x + 1)
True
>>> eq(x + 1, 1 + x)
False
>>> eq(simplify(x + 1), simplify(1 + x))
True
"""
if z3_debug():
_z3_assert(is_ast(a) and is_ast(b), "Z3 ASTs expected")
return a.eq(b)
def _ast_kind(ctx, a):
if is_ast(a):
a = a.as_ast()
return Z3_get_ast_kind(ctx.ref(), a)
def _ctx_from_ast_arg_list(args, default_ctx=None):
ctx = None
for a in args:
if is_ast(a) or is_probe(a):
if ctx is None:
ctx = a.ctx
else:
if z3_debug():
_z3_assert(ctx == a.ctx, "Context mismatch")
if ctx is None:
ctx = default_ctx
return ctx
def _ctx_from_ast_args(*args):
return _ctx_from_ast_arg_list(args)
def _to_func_decl_array(args):
sz = len(args)
_args = (FuncDecl * sz)()
for i in range(sz):
_args[i] = args[i].as_func_decl()
return _args, sz
def _to_ast_array(args):
sz = len(args)
_args = (Ast * sz)()
for i in range(sz):
_args[i] = args[i].as_ast()
return _args, sz
def _to_ref_array(ref, args):
sz = len(args)
_args = (ref * sz)()
for i in range(sz):
_args[i] = args[i].as_ast()
return _args, sz
def _to_ast_ref(a, ctx):
k = _ast_kind(ctx, a)
if k == Z3_SORT_AST:
return _to_sort_ref(a, ctx)
elif k == Z3_FUNC_DECL_AST:
return _to_func_decl_ref(a, ctx)
else:
return _to_expr_ref(a, ctx)
#########################################
#
# Sorts
#
#########################################
def _sort_kind(ctx, s):
return Z3_get_sort_kind(ctx.ref(), s)
class SortRef(AstRef):
"""A Sort is essentially a type. Every Z3 expression has a sort. A sort is an AST node."""
def as_ast(self):
return Z3_sort_to_ast(self.ctx_ref(), self.ast)
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def kind(self):
"""Return the Z3 internal kind of a sort.
This method can be used to test if `self` is one of the Z3 builtin sorts.
>>> b = BoolSort()
>>> b.kind() == Z3_BOOL_SORT
True
>>> b.kind() == Z3_INT_SORT
False
>>> A = ArraySort(IntSort(), IntSort())
>>> A.kind() == Z3_ARRAY_SORT
True
>>> A.kind() == Z3_INT_SORT
False
"""
return _sort_kind(self.ctx, self.ast)
def subsort(self, other):
"""Return `True` if `self` is a subsort of `other`.
>>> IntSort().subsort(RealSort())
True
"""
return False
def cast(self, val):
"""Try to cast `val` as an element of sort `self`.
This method is used in Z3Py to convert Python objects such as integers,
floats, longs and strings into Z3 expressions.
>>> x = Int('x')
>>> RealSort().cast(x)
ToReal(x)
"""
if z3_debug():
_z3_assert(is_expr(val), "Z3 expression expected")
_z3_assert(self.eq(val.sort()), "Sort mismatch")
return val
def name(self):
"""Return the name (string) of sort `self`.
>>> BoolSort().name()
'Bool'
>>> ArraySort(IntSort(), IntSort()).name()
'Array'
"""
return _symbol2py(self.ctx, Z3_get_sort_name(self.ctx_ref(), self.ast))
def __eq__(self, other):
"""Return `True` if `self` and `other` are the same Z3 sort.
>>> p = Bool('p')
>>> p.sort() == BoolSort()
True
>>> p.sort() == IntSort()
False
"""
if other is None:
return False
return Z3_is_eq_sort(self.ctx_ref(), self.ast, other.ast)
def __ne__(self, other):
"""Return `True` if `self` and `other` are not the same Z3 sort.
>>> p = Bool('p')
>>> p.sort() != BoolSort()
False
>>> p.sort() != IntSort()
True
"""
return not Z3_is_eq_sort(self.ctx_ref(), self.ast, other.ast)
def __hash__(self):
""" Hash code. """
return AstRef.__hash__(self)
def is_sort(s):
"""Return `True` if `s` is a Z3 sort.
>>> is_sort(IntSort())
True
>>> is_sort(Int('x'))
False
>>> is_expr(Int('x'))
True
"""
return isinstance(s, SortRef)
def _to_sort_ref(s, ctx):
if z3_debug():
_z3_assert(isinstance(s, Sort), "Z3 Sort expected")
k = _sort_kind(ctx, s)
if k == Z3_BOOL_SORT:
return BoolSortRef(s, ctx)
elif k == Z3_INT_SORT or k == Z3_REAL_SORT:
return ArithSortRef(s, ctx)
elif k == Z3_BV_SORT:
return BitVecSortRef(s, ctx)
elif k == Z3_ARRAY_SORT:
return ArraySortRef(s, ctx)
elif k == Z3_DATATYPE_SORT:
return DatatypeSortRef(s, ctx)
elif k == Z3_FINITE_DOMAIN_SORT:
return FiniteDomainSortRef(s, ctx)
elif k == Z3_FLOATING_POINT_SORT:
return FPSortRef(s, ctx)
elif k == Z3_ROUNDING_MODE_SORT:
return FPRMSortRef(s, ctx)
elif k == Z3_RE_SORT:
return ReSortRef(s, ctx)
elif k == Z3_SEQ_SORT:
return SeqSortRef(s, ctx)
elif k == Z3_CHAR_SORT:
return CharSortRef(s, ctx)
return SortRef(s, ctx)
def _sort(ctx, a):
return _to_sort_ref(Z3_get_sort(ctx.ref(), a), ctx)
def DeclareSort(name, ctx=None):
"""Create a new uninterpreted sort named `name`.
If `ctx=None`, then the new sort is declared in the global Z3Py context.
>>> A = DeclareSort('A')
>>> a = Const('a', A)
>>> b = Const('b', A)
>>> a.sort() == A
True
>>> b.sort() == A
True
>>> a == b
a == b
"""
ctx = _get_ctx(ctx)
return SortRef(Z3_mk_uninterpreted_sort(ctx.ref(), to_symbol(name, ctx)), ctx)
#########################################
#
# Function Declarations
#
#########################################
class FuncDeclRef(AstRef):
"""Function declaration. Every constant and function have an associated declaration.
The declaration assigns a name, a sort (i.e., type), and for function
the sort (i.e., type) of each of its arguments. Note that, in Z3,
a constant is a function with 0 arguments.
"""
def as_ast(self):
return Z3_func_decl_to_ast(self.ctx_ref(), self.ast)
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def as_func_decl(self):
return self.ast
def name(self):
"""Return the name of the function declaration `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> f.name()
'f'
>>> isinstance(f.name(), str)
True
"""
return _symbol2py(self.ctx, Z3_get_decl_name(self.ctx_ref(), self.ast))
def arity(self):
"""Return the number of arguments of a function declaration.
If `self` is a constant, then `self.arity()` is 0.
>>> f = Function('f', IntSort(), RealSort(), BoolSort())
>>> f.arity()
2
"""
return int(Z3_get_arity(self.ctx_ref(), self.ast))
def domain(self, i):
"""Return the sort of the argument `i` of a function declaration.
This method assumes that `0 <= i < self.arity()`.
>>> f = Function('f', IntSort(), RealSort(), BoolSort())
>>> f.domain(0)
Int
>>> f.domain(1)
Real
"""
if z3_debug():
_z3_assert(i < self.arity(), "Index out of bounds")
return _to_sort_ref(Z3_get_domain(self.ctx_ref(), self.ast, i), self.ctx)
def range(self):
"""Return the sort of the range of a function declaration.
For constants, this is the sort of the constant.
>>> f = Function('f', IntSort(), RealSort(), BoolSort())
>>> f.range()
Bool
"""
return _to_sort_ref(Z3_get_range(self.ctx_ref(), self.ast), self.ctx)
def kind(self):
"""Return the internal kind of a function declaration.
It can be used to identify Z3 built-in functions such as addition, multiplication, etc.
>>> x = Int('x')
>>> d = (x + 1).decl()
>>> d.kind() == Z3_OP_ADD
True
>>> d.kind() == Z3_OP_MUL
False
"""
return Z3_get_decl_kind(self.ctx_ref(), self.ast)
def params(self):
ctx = self.ctx
n = Z3_get_decl_num_parameters(self.ctx_ref(), self.ast)
result = [None for i in range(n)]
for i in range(n):
k = Z3_get_decl_parameter_kind(self.ctx_ref(), self.ast, i)
if k == Z3_PARAMETER_INT:
result[i] = Z3_get_decl_int_parameter(self.ctx_ref(), self.ast, i)
elif k == Z3_PARAMETER_DOUBLE:
result[i] = Z3_get_decl_double_parameter(self.ctx_ref(), self.ast, i)
elif k == Z3_PARAMETER_RATIONAL:
result[i] = Z3_get_decl_rational_parameter(self.ctx_ref(), self.ast, i)
elif k == Z3_PARAMETER_SYMBOL:
result[i] = Z3_get_decl_symbol_parameter(self.ctx_ref(), self.ast, i)
elif k == Z3_PARAMETER_SORT:
result[i] = SortRef(Z3_get_decl_sort_parameter(self.ctx_ref(), self.ast, i), ctx)
elif k == Z3_PARAMETER_AST:
result[i] = ExprRef(Z3_get_decl_ast_parameter(self.ctx_ref(), self.ast, i), ctx)
elif k == Z3_PARAMETER_FUNC_DECL:
result[i] = FuncDeclRef(Z3_get_decl_func_decl_parameter(self.ctx_ref(), self.ast, i), ctx)
else:
assert(False)
return result
def __call__(self, *args):
"""Create a Z3 application expression using the function `self`, and the given arguments.
The arguments must be Z3 expressions. This method assumes that
the sorts of the elements in `args` match the sorts of the
domain. Limited coercion is supported. For example, if
args[0] is a Python integer, and the function expects a Z3
integer, then the argument is automatically converted into a
Z3 integer.
>>> f = Function('f', IntSort(), RealSort(), BoolSort())
>>> x = Int('x')
>>> y = Real('y')
>>> f(x, y)
f(x, y)
>>> f(x, x)
f(x, ToReal(x))
"""
args = _get_args(args)
num = len(args)
if z3_debug():
_z3_assert(num == self.arity(), "Incorrect number of arguments to %s" % self)
_args = (Ast * num)()
saved = []
for i in range(num):
# self.domain(i).cast(args[i]) may create a new Z3 expression,
# then we must save in 'saved' to prevent it from being garbage collected.
tmp = self.domain(i).cast(args[i])
saved.append(tmp)
_args[i] = tmp.as_ast()
return _to_expr_ref(Z3_mk_app(self.ctx_ref(), self.ast, len(args), _args), self.ctx)
def is_func_decl(a):
"""Return `True` if `a` is a Z3 function declaration.
>>> f = Function('f', IntSort(), IntSort())
>>> is_func_decl(f)
True
>>> x = Real('x')
>>> is_func_decl(x)
False
"""
return isinstance(a, FuncDeclRef)
def Function(name, *sig):
"""Create a new Z3 uninterpreted function with the given sorts.
>>> f = Function('f', IntSort(), IntSort())
>>> f(f(0))
f(f(0))
"""
sig = _get_args(sig)
if z3_debug():
_z3_assert(len(sig) > 0, "At least two arguments expected")
arity = len(sig) - 1
rng = sig[arity]
if z3_debug():
_z3_assert(is_sort(rng), "Z3 sort expected")
dom = (Sort * arity)()
for i in range(arity):
if z3_debug():
_z3_assert(is_sort(sig[i]), "Z3 sort expected")
dom[i] = sig[i].ast
ctx = rng.ctx
return FuncDeclRef(Z3_mk_func_decl(ctx.ref(), to_symbol(name, ctx), arity, dom, rng.ast), ctx)
def FreshFunction(*sig):
"""Create a new fresh Z3 uninterpreted function with the given sorts.
"""
sig = _get_args(sig)
if z3_debug():
_z3_assert(len(sig) > 0, "At least two arguments expected")
arity = len(sig) - 1
rng = sig[arity]
if z3_debug():
_z3_assert(is_sort(rng), "Z3 sort expected")
dom = (z3.Sort * arity)()
for i in range(arity):
if z3_debug():
_z3_assert(is_sort(sig[i]), "Z3 sort expected")
dom[i] = sig[i].ast
ctx = rng.ctx
return FuncDeclRef(Z3_mk_fresh_func_decl(ctx.ref(), "f", arity, dom, rng.ast), ctx)
def _to_func_decl_ref(a, ctx):
return FuncDeclRef(a, ctx)
def RecFunction(name, *sig):
"""Create a new Z3 recursive with the given sorts."""
sig = _get_args(sig)
if z3_debug():
_z3_assert(len(sig) > 0, "At least two arguments expected")
arity = len(sig) - 1
rng = sig[arity]
if z3_debug():
_z3_assert(is_sort(rng), "Z3 sort expected")
dom = (Sort * arity)()
for i in range(arity):
if z3_debug():
_z3_assert(is_sort(sig[i]), "Z3 sort expected")
dom[i] = sig[i].ast
ctx = rng.ctx
return FuncDeclRef(Z3_mk_rec_func_decl(ctx.ref(), to_symbol(name, ctx), arity, dom, rng.ast), ctx)
def RecAddDefinition(f, args, body):
"""Set the body of a recursive function.
Recursive definitions can be simplified if they are applied to ground
arguments.
>>> ctx = Context()
>>> fac = RecFunction('fac', IntSort(ctx), IntSort(ctx))
>>> n = Int('n', ctx)
>>> RecAddDefinition(fac, n, If(n == 0, 1, n*fac(n-1)))
>>> simplify(fac(5))
120
>>> s = Solver(ctx=ctx)
>>> s.add(fac(n) < 3)
>>> s.check()
sat
>>> s.model().eval(fac(5))
120
"""
if is_app(args):
args = [args]
ctx = body.ctx
args = _get_args(args)
n = len(args)
_args = (Ast * n)()
for i in range(n):
_args[i] = args[i].ast
Z3_add_rec_def(ctx.ref(), f.ast, n, _args, body.ast)
#########################################
#
# Expressions
#
#########################################
class ExprRef(AstRef):
"""Constraints, formulas and terms are expressions in Z3.
Expressions are ASTs. Every expression has a sort.
There are three main kinds of expressions:
function applications, quantifiers and bounded variables.
A constant is a function application with 0 arguments.
For quantifier free problems, all expressions are
function applications.
"""
def as_ast(self):
return self.ast
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def sort(self):
"""Return the sort of expression `self`.
>>> x = Int('x')
>>> (x + 1).sort()
Int
>>> y = Real('y')
>>> (x + y).sort()
Real
"""
return _sort(self.ctx, self.as_ast())
def sort_kind(self):
"""Shorthand for `self.sort().kind()`.
>>> a = Array('a', IntSort(), IntSort())
>>> a.sort_kind() == Z3_ARRAY_SORT
True
>>> a.sort_kind() == Z3_INT_SORT
False
"""
return self.sort().kind()
def __eq__(self, other):
"""Return a Z3 expression that represents the constraint `self == other`.
If `other` is `None`, then this method simply returns `False`.
>>> a = Int('a')
>>> b = Int('b')
>>> a == b
a == b
>>> a is None
False
"""
if other is None:
return False
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_eq(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __hash__(self):
""" Hash code. """
return AstRef.__hash__(self)
def __ne__(self, other):
"""Return a Z3 expression that represents the constraint `self != other`.
If `other` is `None`, then this method simply returns `True`.
>>> a = Int('a')
>>> b = Int('b')
>>> a != b
a != b
>>> a is not None
True
"""
if other is None:
return True
a, b = _coerce_exprs(self, other)
_args, sz = _to_ast_array((a, b))
return BoolRef(Z3_mk_distinct(self.ctx_ref(), 2, _args), self.ctx)
def params(self):
return self.decl().params()
def decl(self):
"""Return the Z3 function declaration associated with a Z3 application.
>>> f = Function('f', IntSort(), IntSort())
>>> a = Int('a')
>>> t = f(a)
>>> eq(t.decl(), f)
True
>>> (a + 1).decl()
+
"""
if z3_debug():
_z3_assert(is_app(self), "Z3 application expected")
return FuncDeclRef(Z3_get_app_decl(self.ctx_ref(), self.as_ast()), self.ctx)
def num_args(self):
"""Return the number of arguments of a Z3 application.
>>> a = Int('a')
>>> b = Int('b')
>>> (a + b).num_args()
2
>>> f = Function('f', IntSort(), IntSort(), IntSort(), IntSort())
>>> t = f(a, b, 0)
>>> t.num_args()
3
"""
if z3_debug():
_z3_assert(is_app(self), "Z3 application expected")
return int(Z3_get_app_num_args(self.ctx_ref(), self.as_ast()))
def arg(self, idx):
"""Return argument `idx` of the application `self`.
This method assumes that `self` is a function application with at least `idx+1` arguments.
>>> a = Int('a')
>>> b = Int('b')
>>> f = Function('f', IntSort(), IntSort(), IntSort(), IntSort())
>>> t = f(a, b, 0)
>>> t.arg(0)
a
>>> t.arg(1)
b
>>> t.arg(2)
0
"""
if z3_debug():
_z3_assert(is_app(self), "Z3 application expected")
_z3_assert(idx < self.num_args(), "Invalid argument index")
return _to_expr_ref(Z3_get_app_arg(self.ctx_ref(), self.as_ast(), idx), self.ctx)
def children(self):
"""Return a list containing the children of the given expression
>>> a = Int('a')
>>> b = Int('b')
>>> f = Function('f', IntSort(), IntSort(), IntSort(), IntSort())
>>> t = f(a, b, 0)
>>> t.children()
[a, b, 0]
"""
if is_app(self):
return [self.arg(i) for i in range(self.num_args())]
else:
return []
def _to_expr_ref(a, ctx):
if isinstance(a, Pattern):
return PatternRef(a, ctx)
ctx_ref = ctx.ref()
k = Z3_get_ast_kind(ctx_ref, a)
if k == Z3_QUANTIFIER_AST:
return QuantifierRef(a, ctx)
sk = Z3_get_sort_kind(ctx_ref, Z3_get_sort(ctx_ref, a))
if sk == Z3_BOOL_SORT:
return BoolRef(a, ctx)
if sk == Z3_INT_SORT:
if k == Z3_NUMERAL_AST:
return IntNumRef(a, ctx)
return ArithRef(a, ctx)
if sk == Z3_REAL_SORT:
if k == Z3_NUMERAL_AST:
return RatNumRef(a, ctx)
if _is_algebraic(ctx, a):
return AlgebraicNumRef(a, ctx)
return ArithRef(a, ctx)
if sk == Z3_BV_SORT:
if k == Z3_NUMERAL_AST:
return BitVecNumRef(a, ctx)
else:
return BitVecRef(a, ctx)
if sk == Z3_ARRAY_SORT:
return ArrayRef(a, ctx)
if sk == Z3_DATATYPE_SORT:
return DatatypeRef(a, ctx)
if sk == Z3_FLOATING_POINT_SORT:
if k == Z3_APP_AST and _is_numeral(ctx, a):
return FPNumRef(a, ctx)
else:
return FPRef(a, ctx)
if sk == Z3_FINITE_DOMAIN_SORT:
if k == Z3_NUMERAL_AST:
return FiniteDomainNumRef(a, ctx)
else:
return FiniteDomainRef(a, ctx)
if sk == Z3_ROUNDING_MODE_SORT:
return FPRMRef(a, ctx)
if sk == Z3_SEQ_SORT:
return SeqRef(a, ctx)
if sk == Z3_CHAR_SORT:
return CharRef(a, ctx)
if sk == Z3_RE_SORT:
return ReRef(a, ctx)
return ExprRef(a, ctx)
def _coerce_expr_merge(s, a):
if is_expr(a):
s1 = a.sort()
if s is None:
return s1
if s1.eq(s):
return s
elif s.subsort(s1):
return s1
elif s1.subsort(s):
return s
else:
if z3_debug():
_z3_assert(s1.ctx == s.ctx, "context mismatch")
_z3_assert(False, "sort mismatch")
else:
return s
def _coerce_exprs(a, b, ctx=None):
if not is_expr(a) and not is_expr(b):
a = _py2expr(a, ctx)
b = _py2expr(b, ctx)
if isinstance(a, str) and isinstance(b, SeqRef):
a = StringVal(a, b.ctx)
if isinstance(b, str) and isinstance(a, SeqRef):
b = StringVal(b, a.ctx)
s = None
s = _coerce_expr_merge(s, a)
s = _coerce_expr_merge(s, b)
a = s.cast(a)
b = s.cast(b)
return (a, b)
def _reduce(func, sequence, initial):
result = initial
for element in sequence:
result = func(result, element)
return result
def _coerce_expr_list(alist, ctx=None):
has_expr = False
for a in alist:
if is_expr(a):
has_expr = True
break
if not has_expr:
alist = [_py2expr(a, ctx) for a in alist]
s = _reduce(_coerce_expr_merge, alist, None)
return [s.cast(a) for a in alist]
def is_expr(a):
"""Return `True` if `a` is a Z3 expression.
>>> a = Int('a')
>>> is_expr(a)
True
>>> is_expr(a + 1)
True
>>> is_expr(IntSort())
False
>>> is_expr(1)
False
>>> is_expr(IntVal(1))
True
>>> x = Int('x')
>>> is_expr(ForAll(x, x >= 0))
True
>>> is_expr(FPVal(1.0))
True
"""
return isinstance(a, ExprRef)
def is_app(a):
"""Return `True` if `a` is a Z3 function application.
Note that, constants are function applications with 0 arguments.
>>> a = Int('a')
>>> is_app(a)
True
>>> is_app(a + 1)
True
>>> is_app(IntSort())
False
>>> is_app(1)
False
>>> is_app(IntVal(1))
True
>>> x = Int('x')
>>> is_app(ForAll(x, x >= 0))
False
"""
if not isinstance(a, ExprRef):
return False
k = _ast_kind(a.ctx, a)
return k == Z3_NUMERAL_AST or k == Z3_APP_AST
def is_const(a):
"""Return `True` if `a` is Z3 constant/variable expression.
>>> a = Int('a')
>>> is_const(a)
True
>>> is_const(a + 1)
False
>>> is_const(1)
False
>>> is_const(IntVal(1))
True
>>> x = Int('x')
>>> is_const(ForAll(x, x >= 0))
False
"""
return is_app(a) and a.num_args() == 0
def is_var(a):
"""Return `True` if `a` is variable.
Z3 uses de-Bruijn indices for representing bound variables in
quantifiers.
>>> x = Int('x')
>>> is_var(x)
False
>>> is_const(x)
True
>>> f = Function('f', IntSort(), IntSort())
>>> # Z3 replaces x with bound variables when ForAll is executed.
>>> q = ForAll(x, f(x) == x)
>>> b = q.body()
>>> b
f(Var(0)) == Var(0)
>>> b.arg(1)
Var(0)
>>> is_var(b.arg(1))
True
"""
return is_expr(a) and _ast_kind(a.ctx, a) == Z3_VAR_AST
def get_var_index(a):
"""Return the de-Bruijn index of the Z3 bounded variable `a`.
>>> x = Int('x')
>>> y = Int('y')
>>> is_var(x)
False
>>> is_const(x)
True
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> # Z3 replaces x and y with bound variables when ForAll is executed.
>>> q = ForAll([x, y], f(x, y) == x + y)
>>> q.body()
f(Var(1), Var(0)) == Var(1) + Var(0)
>>> b = q.body()
>>> b.arg(0)
f(Var(1), Var(0))
>>> v1 = b.arg(0).arg(0)
>>> v2 = b.arg(0).arg(1)
>>> v1
Var(1)
>>> v2
Var(0)
>>> get_var_index(v1)
1
>>> get_var_index(v2)
0
"""
if z3_debug():
_z3_assert(is_var(a), "Z3 bound variable expected")
return int(Z3_get_index_value(a.ctx.ref(), a.as_ast()))
def is_app_of(a, k):
"""Return `True` if `a` is an application of the given kind `k`.
>>> x = Int('x')
>>> n = x + 1
>>> is_app_of(n, Z3_OP_ADD)
True
>>> is_app_of(n, Z3_OP_MUL)
False
"""
return is_app(a) and a.decl().kind() == k
def If(a, b, c, ctx=None):
"""Create a Z3 if-then-else expression.
>>> x = Int('x')
>>> y = Int('y')
>>> max = If(x > y, x, y)
>>> max
If(x > y, x, y)
>>> simplify(max)
If(x <= y, y, x)
"""
if isinstance(a, Probe) or isinstance(b, Tactic) or isinstance(c, Tactic):
return Cond(a, b, c, ctx)
else:
ctx = _get_ctx(_ctx_from_ast_arg_list([a, b, c], ctx))
s = BoolSort(ctx)
a = s.cast(a)
b, c = _coerce_exprs(b, c, ctx)
if z3_debug():
_z3_assert(a.ctx == b.ctx, "Context mismatch")
return _to_expr_ref(Z3_mk_ite(ctx.ref(), a.as_ast(), b.as_ast(), c.as_ast()), ctx)
def Distinct(*args):
"""Create a Z3 distinct expression.
>>> x = Int('x')
>>> y = Int('y')
>>> Distinct(x, y)
x != y
>>> z = Int('z')
>>> Distinct(x, y, z)
Distinct(x, y, z)
>>> simplify(Distinct(x, y, z))
Distinct(x, y, z)
>>> simplify(Distinct(x, y, z), blast_distinct=True)
And(Not(x == y), Not(x == z), Not(y == z))
"""
args = _get_args(args)
ctx = _ctx_from_ast_arg_list(args)
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression")
args = _coerce_expr_list(args, ctx)
_args, sz = _to_ast_array(args)
return BoolRef(Z3_mk_distinct(ctx.ref(), sz, _args), ctx)
def _mk_bin(f, a, b):
args = (Ast * 2)()
if z3_debug():
_z3_assert(a.ctx == b.ctx, "Context mismatch")
args[0] = a.as_ast()
args[1] = b.as_ast()
return f(a.ctx.ref(), 2, args)
def Const(name, sort):
"""Create a constant of the given sort.
>>> Const('x', IntSort())
x
"""
if z3_debug():
_z3_assert(isinstance(sort, SortRef), "Z3 sort expected")
ctx = sort.ctx
return _to_expr_ref(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), sort.ast), ctx)
def Consts(names, sort):
"""Create several constants of the given sort.
`names` is a string containing the names of all constants to be created.
Blank spaces separate the names of different constants.
>>> x, y, z = Consts('x y z', IntSort())
>>> x + y + z
x + y + z
"""
if isinstance(names, str):
names = names.split(" ")
return [Const(name, sort) for name in names]
def FreshConst(sort, prefix="c"):
"""Create a fresh constant of a specified sort"""
ctx = _get_ctx(sort.ctx)
return _to_expr_ref(Z3_mk_fresh_const(ctx.ref(), prefix, sort.ast), ctx)
def Var(idx, s):
"""Create a Z3 free variable. Free variables are used to create quantified formulas.
>>> Var(0, IntSort())
Var(0)
>>> eq(Var(0, IntSort()), Var(0, BoolSort()))
False
"""
if z3_debug():
_z3_assert(is_sort(s), "Z3 sort expected")
return _to_expr_ref(Z3_mk_bound(s.ctx_ref(), idx, s.ast), s.ctx)
def RealVar(idx, ctx=None):
"""
Create a real free variable. Free variables are used to create quantified formulas.
They are also used to create polynomials.
>>> RealVar(0)
Var(0)
"""
return Var(idx, RealSort(ctx))
def RealVarVector(n, ctx=None):
"""
Create a list of Real free variables.
The variables have ids: 0, 1, ..., n-1
>>> x0, x1, x2, x3 = RealVarVector(4)
>>> x2
Var(2)
"""
return [RealVar(i, ctx) for i in range(n)]
#########################################
#
# Booleans
#
#########################################
class BoolSortRef(SortRef):
"""Boolean sort."""
def cast(self, val):
"""Try to cast `val` as a Boolean.
>>> x = BoolSort().cast(True)
>>> x
True
>>> is_expr(x)
True
>>> is_expr(True)
False
>>> x.sort()
Bool
"""
if isinstance(val, bool):
return BoolVal(val, self.ctx)
if z3_debug():
if not is_expr(val):
msg = "True, False or Z3 Boolean expression expected. Received %s of type %s"
_z3_assert(is_expr(val), msg % (val, type(val)))
if not self.eq(val.sort()):
_z3_assert(self.eq(val.sort()), "Value cannot be converted into a Z3 Boolean value")
return val
def subsort(self, other):
return isinstance(other, ArithSortRef)
def is_int(self):
return True
def is_bool(self):
return True
class BoolRef(ExprRef):
"""All Boolean expressions are instances of this class."""
def sort(self):
return BoolSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def __rmul__(self, other):
return self * other
def __mul__(self, other):
"""Create the Z3 expression `self * other`.
"""
if other == 1:
return self
if other == 0:
return 0
return If(self, other, 0)
def is_bool(a):
"""Return `True` if `a` is a Z3 Boolean expression.
>>> p = Bool('p')
>>> is_bool(p)
True
>>> q = Bool('q')
>>> is_bool(And(p, q))
True
>>> x = Real('x')
>>> is_bool(x)
False
>>> is_bool(x == 0)
True
"""
return isinstance(a, BoolRef)
def is_true(a):
"""Return `True` if `a` is the Z3 true expression.
>>> p = Bool('p')
>>> is_true(p)
False
>>> is_true(simplify(p == p))
True
>>> x = Real('x')
>>> is_true(x == 0)
False
>>> # True is a Python Boolean expression
>>> is_true(True)
False
"""
return is_app_of(a, Z3_OP_TRUE)
def is_false(a):
"""Return `True` if `a` is the Z3 false expression.
>>> p = Bool('p')
>>> is_false(p)
False
>>> is_false(False)
False
>>> is_false(BoolVal(False))
True
"""
return is_app_of(a, Z3_OP_FALSE)
def is_and(a):
"""Return `True` if `a` is a Z3 and expression.
>>> p, q = Bools('p q')
>>> is_and(And(p, q))
True
>>> is_and(Or(p, q))
False
"""
return is_app_of(a, Z3_OP_AND)
def is_or(a):
"""Return `True` if `a` is a Z3 or expression.
>>> p, q = Bools('p q')
>>> is_or(Or(p, q))
True
>>> is_or(And(p, q))
False
"""
return is_app_of(a, Z3_OP_OR)
def is_implies(a):
"""Return `True` if `a` is a Z3 implication expression.
>>> p, q = Bools('p q')
>>> is_implies(Implies(p, q))
True
>>> is_implies(And(p, q))
False
"""
return is_app_of(a, Z3_OP_IMPLIES)
def is_not(a):
"""Return `True` if `a` is a Z3 not expression.
>>> p = Bool('p')
>>> is_not(p)
False
>>> is_not(Not(p))
True
"""
return is_app_of(a, Z3_OP_NOT)
def is_eq(a):
"""Return `True` if `a` is a Z3 equality expression.
>>> x, y = Ints('x y')
>>> is_eq(x == y)
True
"""
return is_app_of(a, Z3_OP_EQ)
def is_distinct(a):
"""Return `True` if `a` is a Z3 distinct expression.
>>> x, y, z = Ints('x y z')
>>> is_distinct(x == y)
False
>>> is_distinct(Distinct(x, y, z))
True
"""
return is_app_of(a, Z3_OP_DISTINCT)
def BoolSort(ctx=None):
"""Return the Boolean Z3 sort. If `ctx=None`, then the global context is used.
>>> BoolSort()
Bool
>>> p = Const('p', BoolSort())
>>> is_bool(p)
True
>>> r = Function('r', IntSort(), IntSort(), BoolSort())
>>> r(0, 1)
r(0, 1)
>>> is_bool(r(0, 1))
True
"""
ctx = _get_ctx(ctx)
return BoolSortRef(Z3_mk_bool_sort(ctx.ref()), ctx)
def BoolVal(val, ctx=None):
"""Return the Boolean value `True` or `False`. If `ctx=None`, then the global context is used.
>>> BoolVal(True)
True
>>> is_true(BoolVal(True))
True
>>> is_true(True)
False
>>> is_false(BoolVal(False))
True
"""
ctx = _get_ctx(ctx)
if val:
return BoolRef(Z3_mk_true(ctx.ref()), ctx)
else:
return BoolRef(Z3_mk_false(ctx.ref()), ctx)
def Bool(name, ctx=None):
"""Return a Boolean constant named `name`. If `ctx=None`, then the global context is used.
>>> p = Bool('p')
>>> q = Bool('q')
>>> And(p, q)
And(p, q)
"""
ctx = _get_ctx(ctx)
return BoolRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), BoolSort(ctx).ast), ctx)
def Bools(names, ctx=None):
"""Return a tuple of Boolean constants.
`names` is a single string containing all names separated by blank spaces.
If `ctx=None`, then the global context is used.
>>> p, q, r = Bools('p q r')
>>> And(p, Or(q, r))
And(p, Or(q, r))
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [Bool(name, ctx) for name in names]
def BoolVector(prefix, sz, ctx=None):
"""Return a list of Boolean constants of size `sz`.
The constants are named using the given prefix.
If `ctx=None`, then the global context is used.
>>> P = BoolVector('p', 3)
>>> P
[p__0, p__1, p__2]
>>> And(P)
And(p__0, p__1, p__2)
"""
return [Bool("%s__%s" % (prefix, i)) for i in range(sz)]
def FreshBool(prefix="b", ctx=None):
"""Return a fresh Boolean constant in the given context using the given prefix.
If `ctx=None`, then the global context is used.
>>> b1 = FreshBool()
>>> b2 = FreshBool()
>>> eq(b1, b2)
False
"""
ctx = _get_ctx(ctx)
return BoolRef(Z3_mk_fresh_const(ctx.ref(), prefix, BoolSort(ctx).ast), ctx)
def Implies(a, b, ctx=None):
"""Create a Z3 implies expression.
>>> p, q = Bools('p q')
>>> Implies(p, q)
Implies(p, q)
"""
ctx = _get_ctx(_ctx_from_ast_arg_list([a, b], ctx))
s = BoolSort(ctx)
a = s.cast(a)
b = s.cast(b)
return BoolRef(Z3_mk_implies(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def Xor(a, b, ctx=None):
"""Create a Z3 Xor expression.
>>> p, q = Bools('p q')
>>> Xor(p, q)
Xor(p, q)
>>> simplify(Xor(p, q))
Not(p == q)
"""
ctx = _get_ctx(_ctx_from_ast_arg_list([a, b], ctx))
s = BoolSort(ctx)
a = s.cast(a)
b = s.cast(b)
return BoolRef(Z3_mk_xor(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def Not(a, ctx=None):
"""Create a Z3 not expression or probe.
>>> p = Bool('p')
>>> Not(Not(p))
Not(Not(p))
>>> simplify(Not(Not(p)))
p
"""
ctx = _get_ctx(_ctx_from_ast_arg_list([a], ctx))
if is_probe(a):
# Not is also used to build probes
return Probe(Z3_probe_not(ctx.ref(), a.probe), ctx)
else:
s = BoolSort(ctx)
a = s.cast(a)
return BoolRef(Z3_mk_not(ctx.ref(), a.as_ast()), ctx)
def mk_not(a):
if is_not(a):
return a.arg(0)
else:
return Not(a)
def _has_probe(args):
"""Return `True` if one of the elements of the given collection is a Z3 probe."""
for arg in args:
if is_probe(arg):
return True
return False
def And(*args):
"""Create a Z3 and-expression or and-probe.
>>> p, q, r = Bools('p q r')
>>> And(p, q, r)
And(p, q, r)
>>> P = BoolVector('p', 5)
>>> And(P)
And(p__0, p__1, p__2, p__3, p__4)
"""
last_arg = None
if len(args) > 0:
last_arg = args[len(args) - 1]
if isinstance(last_arg, Context):
ctx = args[len(args) - 1]
args = args[:len(args) - 1]
elif len(args) == 1 and isinstance(args[0], AstVector):
ctx = args[0].ctx
args = [a for a in args[0]]
else:
ctx = None
args = _get_args(args)
ctx = _get_ctx(_ctx_from_ast_arg_list(args, ctx))
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression or probe")
if _has_probe(args):
return _probe_and(args, ctx)
else:
args = _coerce_expr_list(args, ctx)
_args, sz = _to_ast_array(args)
return BoolRef(Z3_mk_and(ctx.ref(), sz, _args), ctx)
def Or(*args):
"""Create a Z3 or-expression or or-probe.
>>> p, q, r = Bools('p q r')
>>> Or(p, q, r)
Or(p, q, r)
>>> P = BoolVector('p', 5)
>>> Or(P)
Or(p__0, p__1, p__2, p__3, p__4)
"""
last_arg = None
if len(args) > 0:
last_arg = args[len(args) - 1]
if isinstance(last_arg, Context):
ctx = args[len(args) - 1]
args = args[:len(args) - 1]
elif len(args) == 1 and isinstance(args[0], AstVector):
ctx = args[0].ctx
args = [a for a in args[0]]
else:
ctx = None
args = _get_args(args)
ctx = _get_ctx(_ctx_from_ast_arg_list(args, ctx))
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression or probe")
if _has_probe(args):
return _probe_or(args, ctx)
else:
args = _coerce_expr_list(args, ctx)
_args, sz = _to_ast_array(args)
return BoolRef(Z3_mk_or(ctx.ref(), sz, _args), ctx)
#########################################
#
# Patterns
#
#########################################
class PatternRef(ExprRef):
"""Patterns are hints for quantifier instantiation.
"""
def as_ast(self):
return Z3_pattern_to_ast(self.ctx_ref(), self.ast)
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def is_pattern(a):
"""Return `True` if `a` is a Z3 pattern (hint for quantifier instantiation.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0, patterns = [ f(x) ])
>>> q
ForAll(x, f(x) == 0)
>>> q.num_patterns()
1
>>> is_pattern(q.pattern(0))
True
>>> q.pattern(0)
f(Var(0))
"""
return isinstance(a, PatternRef)
def MultiPattern(*args):
"""Create a Z3 multi-pattern using the given expressions `*args`
>>> f = Function('f', IntSort(), IntSort())
>>> g = Function('g', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) != g(x), patterns = [ MultiPattern(f(x), g(x)) ])
>>> q
ForAll(x, f(x) != g(x))
>>> q.num_patterns()
1
>>> is_pattern(q.pattern(0))
True
>>> q.pattern(0)
MultiPattern(f(Var(0)), g(Var(0)))
"""
if z3_debug():
_z3_assert(len(args) > 0, "At least one argument expected")
_z3_assert(all([is_expr(a) for a in args]), "Z3 expressions expected")
ctx = args[0].ctx
args, sz = _to_ast_array(args)
return PatternRef(Z3_mk_pattern(ctx.ref(), sz, args), ctx)
def _to_pattern(arg):
if is_pattern(arg):
return arg
else:
return MultiPattern(arg)
#########################################
#
# Quantifiers
#
#########################################
class QuantifierRef(BoolRef):
"""Universally and Existentially quantified formulas."""
def as_ast(self):
return self.ast
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def sort(self):
"""Return the Boolean sort or sort of Lambda."""
if self.is_lambda():
return _sort(self.ctx, self.as_ast())
return BoolSort(self.ctx)
def is_forall(self):
"""Return `True` if `self` is a universal quantifier.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.is_forall()
True
>>> q = Exists(x, f(x) != 0)
>>> q.is_forall()
False
"""
return Z3_is_quantifier_forall(self.ctx_ref(), self.ast)
def is_exists(self):
"""Return `True` if `self` is an existential quantifier.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.is_exists()
False
>>> q = Exists(x, f(x) != 0)
>>> q.is_exists()
True
"""
return Z3_is_quantifier_exists(self.ctx_ref(), self.ast)
def is_lambda(self):
"""Return `True` if `self` is a lambda expression.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = Lambda(x, f(x))
>>> q.is_lambda()
True
>>> q = Exists(x, f(x) != 0)
>>> q.is_lambda()
False
"""
return Z3_is_lambda(self.ctx_ref(), self.ast)
def __getitem__(self, arg):
"""Return the Z3 expression `self[arg]`.
"""
if z3_debug():
_z3_assert(self.is_lambda(), "quantifier should be a lambda expression")
return _array_select(self, arg)
def weight(self):
"""Return the weight annotation of `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.weight()
1
>>> q = ForAll(x, f(x) == 0, weight=10)
>>> q.weight()
10
"""
return int(Z3_get_quantifier_weight(self.ctx_ref(), self.ast))
def num_patterns(self):
"""Return the number of patterns (i.e., quantifier instantiation hints) in `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> g = Function('g', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) != g(x), patterns = [ f(x), g(x) ])
>>> q.num_patterns()
2
"""
return int(Z3_get_quantifier_num_patterns(self.ctx_ref(), self.ast))
def pattern(self, idx):
"""Return a pattern (i.e., quantifier instantiation hints) in `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> g = Function('g', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) != g(x), patterns = [ f(x), g(x) ])
>>> q.num_patterns()
2
>>> q.pattern(0)
f(Var(0))
>>> q.pattern(1)
g(Var(0))
"""
if z3_debug():
_z3_assert(idx < self.num_patterns(), "Invalid pattern idx")
return PatternRef(Z3_get_quantifier_pattern_ast(self.ctx_ref(), self.ast, idx), self.ctx)
def num_no_patterns(self):
"""Return the number of no-patterns."""
return Z3_get_quantifier_num_no_patterns(self.ctx_ref(), self.ast)
def no_pattern(self, idx):
"""Return a no-pattern."""
if z3_debug():
_z3_assert(idx < self.num_no_patterns(), "Invalid no-pattern idx")
return _to_expr_ref(Z3_get_quantifier_no_pattern_ast(self.ctx_ref(), self.ast, idx), self.ctx)
def body(self):
"""Return the expression being quantified.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.body()
f(Var(0)) == 0
"""
return _to_expr_ref(Z3_get_quantifier_body(self.ctx_ref(), self.ast), self.ctx)
def num_vars(self):
"""Return the number of variables bounded by this quantifier.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> x = Int('x')
>>> y = Int('y')
>>> q = ForAll([x, y], f(x, y) >= x)
>>> q.num_vars()
2
"""
return int(Z3_get_quantifier_num_bound(self.ctx_ref(), self.ast))
def var_name(self, idx):
"""Return a string representing a name used when displaying the quantifier.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> x = Int('x')
>>> y = Int('y')
>>> q = ForAll([x, y], f(x, y) >= x)
>>> q.var_name(0)
'x'
>>> q.var_name(1)
'y'
"""
if z3_debug():
_z3_assert(idx < self.num_vars(), "Invalid variable idx")
return _symbol2py(self.ctx, Z3_get_quantifier_bound_name(self.ctx_ref(), self.ast, idx))
def var_sort(self, idx):
"""Return the sort of a bound variable.
>>> f = Function('f', IntSort(), RealSort(), IntSort())
>>> x = Int('x')
>>> y = Real('y')
>>> q = ForAll([x, y], f(x, y) >= x)
>>> q.var_sort(0)
Int
>>> q.var_sort(1)
Real
"""
if z3_debug():
_z3_assert(idx < self.num_vars(), "Invalid variable idx")
return _to_sort_ref(Z3_get_quantifier_bound_sort(self.ctx_ref(), self.ast, idx), self.ctx)
def children(self):
"""Return a list containing a single element self.body()
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.children()
[f(Var(0)) == 0]
"""
return [self.body()]
def is_quantifier(a):
"""Return `True` if `a` is a Z3 quantifier.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> is_quantifier(q)
True
>>> is_quantifier(f(x))
False
"""
return isinstance(a, QuantifierRef)
def _mk_quantifier(is_forall, vs, body, weight=1, qid="", skid="", patterns=[], no_patterns=[]):
if z3_debug():
_z3_assert(is_bool(body) or is_app(vs) or (len(vs) > 0 and is_app(vs[0])), "Z3 expression expected")
_z3_assert(is_const(vs) or (len(vs) > 0 and all([is_const(v) for v in vs])), "Invalid bounded variable(s)")
_z3_assert(all([is_pattern(a) or is_expr(a) for a in patterns]), "Z3 patterns expected")
_z3_assert(all([is_expr(p) for p in no_patterns]), "no patterns are Z3 expressions")
if is_app(vs):
ctx = vs.ctx
vs = [vs]
else:
ctx = vs[0].ctx
if not is_expr(body):
body = BoolVal(body, ctx)
num_vars = len(vs)
if num_vars == 0:
return body
_vs = (Ast * num_vars)()
for i in range(num_vars):
# TODO: Check if is constant
_vs[i] = vs[i].as_ast()
patterns = [_to_pattern(p) for p in patterns]
num_pats = len(patterns)
_pats = (Pattern * num_pats)()
for i in range(num_pats):
_pats[i] = patterns[i].ast
_no_pats, num_no_pats = _to_ast_array(no_patterns)
qid = to_symbol(qid, ctx)
skid = to_symbol(skid, ctx)
return QuantifierRef(Z3_mk_quantifier_const_ex(ctx.ref(), is_forall, weight, qid, skid,
num_vars, _vs,
num_pats, _pats,
num_no_pats, _no_pats,
body.as_ast()), ctx)
def ForAll(vs, body, weight=1, qid="", skid="", patterns=[], no_patterns=[]):
"""Create a Z3 forall formula.
The parameters `weight`, `qid`, `skid`, `patterns` and `no_patterns` are optional annotations.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> x = Int('x')
>>> y = Int('y')
>>> ForAll([x, y], f(x, y) >= x)
ForAll([x, y], f(x, y) >= x)
>>> ForAll([x, y], f(x, y) >= x, patterns=[ f(x, y) ])
ForAll([x, y], f(x, y) >= x)
>>> ForAll([x, y], f(x, y) >= x, weight=10)
ForAll([x, y], f(x, y) >= x)
"""
return _mk_quantifier(True, vs, body, weight, qid, skid, patterns, no_patterns)
def Exists(vs, body, weight=1, qid="", skid="", patterns=[], no_patterns=[]):
"""Create a Z3 exists formula.
The parameters `weight`, `qif`, `skid`, `patterns` and `no_patterns` are optional annotations.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> x = Int('x')
>>> y = Int('y')
>>> q = Exists([x, y], f(x, y) >= x, skid="foo")
>>> q
Exists([x, y], f(x, y) >= x)
>>> is_quantifier(q)
True
>>> r = Tactic('nnf')(q).as_expr()
>>> is_quantifier(r)
False
"""
return _mk_quantifier(False, vs, body, weight, qid, skid, patterns, no_patterns)
def Lambda(vs, body):
"""Create a Z3 lambda expression.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> mem0 = Array('mem0', IntSort(), IntSort())
>>> lo, hi, e, i = Ints('lo hi e i')
>>> mem1 = Lambda([i], If(And(lo <= i, i <= hi), e, mem0[i]))
>>> mem1
Lambda(i, If(And(lo <= i, i <= hi), e, mem0[i]))
"""
ctx = body.ctx
if is_app(vs):
vs = [vs]
num_vars = len(vs)
_vs = (Ast * num_vars)()
for i in range(num_vars):
# TODO: Check if is constant
_vs[i] = vs[i].as_ast()
return QuantifierRef(Z3_mk_lambda_const(ctx.ref(), num_vars, _vs, body.as_ast()), ctx)
#########################################
#
# Arithmetic
#
#########################################
class ArithSortRef(SortRef):
"""Real and Integer sorts."""
def is_real(self):
"""Return `True` if `self` is of the sort Real.
>>> x = Real('x')
>>> x.is_real()
True
>>> (x + 1).is_real()
True
>>> x = Int('x')
>>> x.is_real()
False
"""
return self.kind() == Z3_REAL_SORT
def is_int(self):
"""Return `True` if `self` is of the sort Integer.
>>> x = Int('x')
>>> x.is_int()
True
>>> (x + 1).is_int()
True
>>> x = Real('x')
>>> x.is_int()
False
"""
return self.kind() == Z3_INT_SORT
def subsort(self, other):
"""Return `True` if `self` is a subsort of `other`."""
return self.is_int() and is_arith_sort(other) and other.is_real()
def cast(self, val):
"""Try to cast `val` as an Integer or Real.
>>> IntSort().cast(10)
10
>>> is_int(IntSort().cast(10))
True
>>> is_int(10)
False
>>> RealSort().cast(10)
10
>>> is_real(RealSort().cast(10))
True
"""
if is_expr(val):
if z3_debug():
_z3_assert(self.ctx == val.ctx, "Context mismatch")
val_s = val.sort()
if self.eq(val_s):
return val
if val_s.is_int() and self.is_real():
return ToReal(val)
if val_s.is_bool() and self.is_int():
return If(val, 1, 0)
if val_s.is_bool() and self.is_real():
return ToReal(If(val, 1, 0))
if z3_debug():
_z3_assert(False, "Z3 Integer/Real expression expected")
else:
if self.is_int():
return IntVal(val, self.ctx)
if self.is_real():
return RealVal(val, self.ctx)
if z3_debug():
msg = "int, long, float, string (numeral), or Z3 Integer/Real expression expected. Got %s"
_z3_assert(False, msg % self)
def is_arith_sort(s):
"""Return `True` if s is an arithmetical sort (type).
>>> is_arith_sort(IntSort())
True
>>> is_arith_sort(RealSort())
True
>>> is_arith_sort(BoolSort())
False
>>> n = Int('x') + 1
>>> is_arith_sort(n.sort())
True
"""
return isinstance(s, ArithSortRef)
class ArithRef(ExprRef):
"""Integer and Real expressions."""
def sort(self):
"""Return the sort (type) of the arithmetical expression `self`.
>>> Int('x').sort()
Int
>>> (Real('x') + 1).sort()
Real
"""
return ArithSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def is_int(self):
"""Return `True` if `self` is an integer expression.
>>> x = Int('x')
>>> x.is_int()
True
>>> (x + 1).is_int()
True
>>> y = Real('y')
>>> (x + y).is_int()
False
"""
return self.sort().is_int()
def is_real(self):
"""Return `True` if `self` is an real expression.
>>> x = Real('x')
>>> x.is_real()
True
>>> (x + 1).is_real()
True
"""
return self.sort().is_real()
def __add__(self, other):
"""Create the Z3 expression `self + other`.
>>> x = Int('x')
>>> y = Int('y')
>>> x + y
x + y
>>> (x + y).sort()
Int
"""
a, b = _coerce_exprs(self, other)
return ArithRef(_mk_bin(Z3_mk_add, a, b), self.ctx)
def __radd__(self, other):
"""Create the Z3 expression `other + self`.
>>> x = Int('x')
>>> 10 + x
10 + x
"""
a, b = _coerce_exprs(self, other)
return ArithRef(_mk_bin(Z3_mk_add, b, a), self.ctx)
def __mul__(self, other):
"""Create the Z3 expression `self * other`.
>>> x = Real('x')
>>> y = Real('y')
>>> x * y
x*y
>>> (x * y).sort()
Real
"""
if isinstance(other, BoolRef):
return If(other, self, 0)
a, b = _coerce_exprs(self, other)
return ArithRef(_mk_bin(Z3_mk_mul, a, b), self.ctx)
def __rmul__(self, other):
"""Create the Z3 expression `other * self`.
>>> x = Real('x')
>>> 10 * x
10*x
"""
a, b = _coerce_exprs(self, other)
return ArithRef(_mk_bin(Z3_mk_mul, b, a), self.ctx)
def __sub__(self, other):
"""Create the Z3 expression `self - other`.
>>> x = Int('x')
>>> y = Int('y')
>>> x - y
x - y
>>> (x - y).sort()
Int
"""
a, b = _coerce_exprs(self, other)
return ArithRef(_mk_bin(Z3_mk_sub, a, b), self.ctx)
def __rsub__(self, other):
"""Create the Z3 expression `other - self`.
>>> x = Int('x')
>>> 10 - x
10 - x
"""
a, b = _coerce_exprs(self, other)
return ArithRef(_mk_bin(Z3_mk_sub, b, a), self.ctx)
def __pow__(self, other):
"""Create the Z3 expression `self**other` (** is the power operator).
>>> x = Real('x')
>>> x**3
x**3
>>> (x**3).sort()
Real
>>> simplify(IntVal(2)**8)
256
"""
a, b = _coerce_exprs(self, other)
return ArithRef(Z3_mk_power(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rpow__(self, other):
"""Create the Z3 expression `other**self` (** is the power operator).
>>> x = Real('x')
>>> 2**x
2**x
>>> (2**x).sort()
Real
>>> simplify(2**IntVal(8))
256
"""
a, b = _coerce_exprs(self, other)
return ArithRef(Z3_mk_power(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __div__(self, other):
"""Create the Z3 expression `other/self`.
>>> x = Int('x')
>>> y = Int('y')
>>> x/y
x/y
>>> (x/y).sort()
Int
>>> (x/y).sexpr()
'(div x y)'
>>> x = Real('x')
>>> y = Real('y')
>>> x/y
x/y
>>> (x/y).sort()
Real
>>> (x/y).sexpr()
'(/ x y)'
"""
a, b = _coerce_exprs(self, other)
return ArithRef(Z3_mk_div(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __truediv__(self, other):
"""Create the Z3 expression `other/self`."""
return self.__div__(other)
def __rdiv__(self, other):
"""Create the Z3 expression `other/self`.
>>> x = Int('x')
>>> 10/x
10/x
>>> (10/x).sexpr()
'(div 10 x)'
>>> x = Real('x')
>>> 10/x
10/x
>>> (10/x).sexpr()
'(/ 10.0 x)'
"""
a, b = _coerce_exprs(self, other)
return ArithRef(Z3_mk_div(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __rtruediv__(self, other):
"""Create the Z3 expression `other/self`."""
return self.__rdiv__(other)
def __mod__(self, other):
"""Create the Z3 expression `other%self`.
>>> x = Int('x')
>>> y = Int('y')
>>> x % y
x%y
>>> simplify(IntVal(10) % IntVal(3))
1
"""
a, b = _coerce_exprs(self, other)
if z3_debug():
_z3_assert(a.is_int(), "Z3 integer expression expected")
return ArithRef(Z3_mk_mod(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rmod__(self, other):
"""Create the Z3 expression `other%self`.
>>> x = Int('x')
>>> 10 % x
10%x
"""
a, b = _coerce_exprs(self, other)
if z3_debug():
_z3_assert(a.is_int(), "Z3 integer expression expected")
return ArithRef(Z3_mk_mod(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __neg__(self):
"""Return an expression representing `-self`.
>>> x = Int('x')
>>> -x
-x
>>> simplify(-(-x))
x
"""
return ArithRef(Z3_mk_unary_minus(self.ctx_ref(), self.as_ast()), self.ctx)
def __pos__(self):
"""Return `self`.
>>> x = Int('x')
>>> +x
x
"""
return self
def __le__(self, other):
"""Create the Z3 expression `other <= self`.
>>> x, y = Ints('x y')
>>> x <= y
x <= y
>>> y = Real('y')
>>> x <= y
ToReal(x) <= y
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_le(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __lt__(self, other):
"""Create the Z3 expression `other < self`.
>>> x, y = Ints('x y')
>>> x < y
x < y
>>> y = Real('y')
>>> x < y
ToReal(x) < y
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_lt(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __gt__(self, other):
"""Create the Z3 expression `other > self`.
>>> x, y = Ints('x y')
>>> x > y
x > y
>>> y = Real('y')
>>> x > y
ToReal(x) > y
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_gt(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __ge__(self, other):
"""Create the Z3 expression `other >= self`.
>>> x, y = Ints('x y')
>>> x >= y
x >= y
>>> y = Real('y')
>>> x >= y
ToReal(x) >= y
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_ge(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def is_arith(a):
"""Return `True` if `a` is an arithmetical expression.
>>> x = Int('x')
>>> is_arith(x)
True
>>> is_arith(x + 1)
True
>>> is_arith(1)
False
>>> is_arith(IntVal(1))
True
>>> y = Real('y')
>>> is_arith(y)
True
>>> is_arith(y + 1)
True
"""
return isinstance(a, ArithRef)
def is_int(a):
"""Return `True` if `a` is an integer expression.
>>> x = Int('x')
>>> is_int(x + 1)
True
>>> is_int(1)
False
>>> is_int(IntVal(1))
True
>>> y = Real('y')
>>> is_int(y)
False
>>> is_int(y + 1)
False
"""
return is_arith(a) and a.is_int()
def is_real(a):
"""Return `True` if `a` is a real expression.
>>> x = Int('x')
>>> is_real(x + 1)
False
>>> y = Real('y')
>>> is_real(y)
True
>>> is_real(y + 1)
True
>>> is_real(1)
False
>>> is_real(RealVal(1))
True
"""
return is_arith(a) and a.is_real()
def _is_numeral(ctx, a):
return Z3_is_numeral_ast(ctx.ref(), a)
def _is_algebraic(ctx, a):
return Z3_is_algebraic_number(ctx.ref(), a)
def is_int_value(a):
"""Return `True` if `a` is an integer value of sort Int.
>>> is_int_value(IntVal(1))
True
>>> is_int_value(1)
False
>>> is_int_value(Int('x'))
False
>>> n = Int('x') + 1
>>> n
x + 1
>>> n.arg(1)
1
>>> is_int_value(n.arg(1))
True
>>> is_int_value(RealVal("1/3"))
False
>>> is_int_value(RealVal(1))
False
"""
return is_arith(a) and a.is_int() and _is_numeral(a.ctx, a.as_ast())
def is_rational_value(a):
"""Return `True` if `a` is rational value of sort Real.
>>> is_rational_value(RealVal(1))
True
>>> is_rational_value(RealVal("3/5"))
True
>>> is_rational_value(IntVal(1))
False
>>> is_rational_value(1)
False
>>> n = Real('x') + 1
>>> n.arg(1)
1
>>> is_rational_value(n.arg(1))
True
>>> is_rational_value(Real('x'))
False
"""
return is_arith(a) and a.is_real() and _is_numeral(a.ctx, a.as_ast())
def is_algebraic_value(a):
"""Return `True` if `a` is an algebraic value of sort Real.
>>> is_algebraic_value(RealVal("3/5"))
False
>>> n = simplify(Sqrt(2))
>>> n
1.4142135623?
>>> is_algebraic_value(n)
True
"""
return is_arith(a) and a.is_real() and _is_algebraic(a.ctx, a.as_ast())
def is_add(a):
"""Return `True` if `a` is an expression of the form b + c.
>>> x, y = Ints('x y')
>>> is_add(x + y)
True
>>> is_add(x - y)
False
"""
return is_app_of(a, Z3_OP_ADD)
def is_mul(a):
"""Return `True` if `a` is an expression of the form b * c.
>>> x, y = Ints('x y')
>>> is_mul(x * y)
True
>>> is_mul(x - y)
False
"""
return is_app_of(a, Z3_OP_MUL)
def is_sub(a):
"""Return `True` if `a` is an expression of the form b - c.
>>> x, y = Ints('x y')
>>> is_sub(x - y)
True
>>> is_sub(x + y)
False
"""
return is_app_of(a, Z3_OP_SUB)
def is_div(a):
"""Return `True` if `a` is an expression of the form b / c.
>>> x, y = Reals('x y')
>>> is_div(x / y)
True
>>> is_div(x + y)
False
>>> x, y = Ints('x y')
>>> is_div(x / y)
False
>>> is_idiv(x / y)
True
"""
return is_app_of(a, Z3_OP_DIV)
def is_idiv(a):
"""Return `True` if `a` is an expression of the form b div c.
>>> x, y = Ints('x y')
>>> is_idiv(x / y)
True
>>> is_idiv(x + y)
False
"""
return is_app_of(a, Z3_OP_IDIV)
def is_mod(a):
"""Return `True` if `a` is an expression of the form b % c.
>>> x, y = Ints('x y')
>>> is_mod(x % y)
True
>>> is_mod(x + y)
False
"""
return is_app_of(a, Z3_OP_MOD)
def is_le(a):
"""Return `True` if `a` is an expression of the form b <= c.
>>> x, y = Ints('x y')
>>> is_le(x <= y)
True
>>> is_le(x < y)
False
"""
return is_app_of(a, Z3_OP_LE)
def is_lt(a):
"""Return `True` if `a` is an expression of the form b < c.
>>> x, y = Ints('x y')
>>> is_lt(x < y)
True
>>> is_lt(x == y)
False
"""
return is_app_of(a, Z3_OP_LT)
def is_ge(a):
"""Return `True` if `a` is an expression of the form b >= c.
>>> x, y = Ints('x y')
>>> is_ge(x >= y)
True
>>> is_ge(x == y)
False
"""
return is_app_of(a, Z3_OP_GE)
def is_gt(a):
"""Return `True` if `a` is an expression of the form b > c.
>>> x, y = Ints('x y')
>>> is_gt(x > y)
True
>>> is_gt(x == y)
False
"""
return is_app_of(a, Z3_OP_GT)
def is_is_int(a):
"""Return `True` if `a` is an expression of the form IsInt(b).
>>> x = Real('x')
>>> is_is_int(IsInt(x))
True
>>> is_is_int(x)
False
"""
return is_app_of(a, Z3_OP_IS_INT)
def is_to_real(a):
"""Return `True` if `a` is an expression of the form ToReal(b).
>>> x = Int('x')
>>> n = ToReal(x)
>>> n
ToReal(x)
>>> is_to_real(n)
True
>>> is_to_real(x)
False
"""
return is_app_of(a, Z3_OP_TO_REAL)
def is_to_int(a):
"""Return `True` if `a` is an expression of the form ToInt(b).
>>> x = Real('x')
>>> n = ToInt(x)
>>> n
ToInt(x)
>>> is_to_int(n)
True
>>> is_to_int(x)
False
"""
return is_app_of(a, Z3_OP_TO_INT)
class IntNumRef(ArithRef):
"""Integer values."""
def as_long(self):
"""Return a Z3 integer numeral as a Python long (bignum) numeral.
>>> v = IntVal(1)
>>> v + 1
1 + 1
>>> v.as_long() + 1
2
"""
if z3_debug():
_z3_assert(self.is_int(), "Integer value expected")
return int(self.as_string())
def as_string(self):
"""Return a Z3 integer numeral as a Python string.
>>> v = IntVal(100)
>>> v.as_string()
'100'
"""
return Z3_get_numeral_string(self.ctx_ref(), self.as_ast())
def as_binary_string(self):
"""Return a Z3 integer numeral as a Python binary string.
>>> v = IntVal(10)
>>> v.as_binary_string()
'1010'
"""
return Z3_get_numeral_binary_string(self.ctx_ref(), self.as_ast())
class RatNumRef(ArithRef):
"""Rational values."""
def numerator(self):
""" Return the numerator of a Z3 rational numeral.
>>> is_rational_value(RealVal("3/5"))
True
>>> n = RealVal("3/5")
>>> n.numerator()
3
>>> is_rational_value(Q(3,5))
True
>>> Q(3,5).numerator()
3
"""
return IntNumRef(Z3_get_numerator(self.ctx_ref(), self.as_ast()), self.ctx)
def denominator(self):
""" Return the denominator of a Z3 rational numeral.
>>> is_rational_value(Q(3,5))
True
>>> n = Q(3,5)
>>> n.denominator()
5
"""
return IntNumRef(Z3_get_denominator(self.ctx_ref(), self.as_ast()), self.ctx)
def numerator_as_long(self):
""" Return the numerator as a Python long.
>>> v = RealVal(10000000000)
>>> v
10000000000
>>> v + 1
10000000000 + 1
>>> v.numerator_as_long() + 1 == 10000000001
True
"""
return self.numerator().as_long()
def denominator_as_long(self):
""" Return the denominator as a Python long.
>>> v = RealVal("1/3")
>>> v
1/3
>>> v.denominator_as_long()
3
"""
return self.denominator().as_long()
def is_int(self):
return False
def is_real(self):
return True
def is_int_value(self):
return self.denominator().is_int() and self.denominator_as_long() == 1
def as_long(self):
_z3_assert(self.is_int_value(), "Expected integer fraction")
return self.numerator_as_long()
def as_decimal(self, prec):
""" Return a Z3 rational value as a string in decimal notation using at most `prec` decimal places.
>>> v = RealVal("1/5")
>>> v.as_decimal(3)
'0.2'
>>> v = RealVal("1/3")
>>> v.as_decimal(3)
'0.333?'
"""
return Z3_get_numeral_decimal_string(self.ctx_ref(), self.as_ast(), prec)
def as_string(self):
"""Return a Z3 rational numeral as a Python string.
>>> v = Q(3,6)
>>> v.as_string()
'1/2'
"""
return Z3_get_numeral_string(self.ctx_ref(), self.as_ast())
def as_fraction(self):
"""Return a Z3 rational as a Python Fraction object.
>>> v = RealVal("1/5")
>>> v.as_fraction()
Fraction(1, 5)
"""
return Fraction(self.numerator_as_long(), self.denominator_as_long())
class AlgebraicNumRef(ArithRef):
"""Algebraic irrational values."""
def approx(self, precision=10):
"""Return a Z3 rational number that approximates the algebraic number `self`.
The result `r` is such that |r - self| <= 1/10^precision
>>> x = simplify(Sqrt(2))
>>> x.approx(20)
6838717160008073720548335/4835703278458516698824704
>>> x.approx(5)
2965821/2097152
"""
return RatNumRef(Z3_get_algebraic_number_upper(self.ctx_ref(), self.as_ast(), precision), self.ctx)
def as_decimal(self, prec):
"""Return a string representation of the algebraic number `self` in decimal notation
using `prec` decimal places.
>>> x = simplify(Sqrt(2))
>>> x.as_decimal(10)
'1.4142135623?'
>>> x.as_decimal(20)
'1.41421356237309504880?'
"""
return Z3_get_numeral_decimal_string(self.ctx_ref(), self.as_ast(), prec)
def poly(self):
return AstVector(Z3_algebraic_get_poly(self.ctx_ref(), self.as_ast()), self.ctx)
def index(self):
return Z3_algebraic_get_i(self.ctx_ref(), self.as_ast())
def _py2expr(a, ctx=None):
if isinstance(a, bool):
return BoolVal(a, ctx)
if _is_int(a):
return IntVal(a, ctx)
if isinstance(a, float):
return RealVal(a, ctx)
if isinstance(a, str):
return StringVal(a, ctx)
if is_expr(a):
return a
if z3_debug():
_z3_assert(False, "Python bool, int, long or float expected")
def IntSort(ctx=None):
"""Return the integer sort in the given context. If `ctx=None`, then the global context is used.
>>> IntSort()
Int
>>> x = Const('x', IntSort())
>>> is_int(x)
True
>>> x.sort() == IntSort()
True
>>> x.sort() == BoolSort()
False
"""
ctx = _get_ctx(ctx)
return ArithSortRef(Z3_mk_int_sort(ctx.ref()), ctx)
def RealSort(ctx=None):
"""Return the real sort in the given context. If `ctx=None`, then the global context is used.
>>> RealSort()
Real
>>> x = Const('x', RealSort())
>>> is_real(x)
True
>>> is_int(x)
False
>>> x.sort() == RealSort()
True
"""
ctx = _get_ctx(ctx)
return ArithSortRef(Z3_mk_real_sort(ctx.ref()), ctx)
def _to_int_str(val):
if isinstance(val, float):
return str(int(val))
elif isinstance(val, bool):
if val:
return "1"
else:
return "0"
elif _is_int(val):
return str(val)
elif isinstance(val, str):
return val
if z3_debug():
_z3_assert(False, "Python value cannot be used as a Z3 integer")
def IntVal(val, ctx=None):
"""Return a Z3 integer value. If `ctx=None`, then the global context is used.
>>> IntVal(1)
1
>>> IntVal("100")
100
"""
ctx = _get_ctx(ctx)
return IntNumRef(Z3_mk_numeral(ctx.ref(), _to_int_str(val), IntSort(ctx).ast), ctx)
def RealVal(val, ctx=None):
"""Return a Z3 real value.
`val` may be a Python int, long, float or string representing a number in decimal or rational notation.
If `ctx=None`, then the global context is used.
>>> RealVal(1)
1
>>> RealVal(1).sort()
Real
>>> RealVal("3/5")
3/5
>>> RealVal("1.5")
3/2
"""
ctx = _get_ctx(ctx)
return RatNumRef(Z3_mk_numeral(ctx.ref(), str(val), RealSort(ctx).ast), ctx)
def RatVal(a, b, ctx=None):
"""Return a Z3 rational a/b.
If `ctx=None`, then the global context is used.
>>> RatVal(3,5)
3/5
>>> RatVal(3,5).sort()
Real
"""
if z3_debug():
_z3_assert(_is_int(a) or isinstance(a, str), "First argument cannot be converted into an integer")
_z3_assert(_is_int(b) or isinstance(b, str), "Second argument cannot be converted into an integer")
return simplify(RealVal(a, ctx) / RealVal(b, ctx))
def Q(a, b, ctx=None):
"""Return a Z3 rational a/b.
If `ctx=None`, then the global context is used.
>>> Q(3,5)
3/5
>>> Q(3,5).sort()
Real
"""
return simplify(RatVal(a, b, ctx=ctx))
def Int(name, ctx=None):
"""Return an integer constant named `name`. If `ctx=None`, then the global context is used.
>>> x = Int('x')
>>> is_int(x)
True
>>> is_int(x + 1)
True
"""
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), IntSort(ctx).ast), ctx)
def Ints(names, ctx=None):
"""Return a tuple of Integer constants.
>>> x, y, z = Ints('x y z')
>>> Sum(x, y, z)
x + y + z
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [Int(name, ctx) for name in names]
def IntVector(prefix, sz, ctx=None):
"""Return a list of integer constants of size `sz`.
>>> X = IntVector('x', 3)
>>> X
[x__0, x__1, x__2]
>>> Sum(X)
x__0 + x__1 + x__2
"""
ctx = _get_ctx(ctx)
return [Int("%s__%s" % (prefix, i), ctx) for i in range(sz)]
def FreshInt(prefix="x", ctx=None):
"""Return a fresh integer constant in the given context using the given prefix.
>>> x = FreshInt()
>>> y = FreshInt()
>>> eq(x, y)
False
>>> x.sort()
Int
"""
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_fresh_const(ctx.ref(), prefix, IntSort(ctx).ast), ctx)
def Real(name, ctx=None):
"""Return a real constant named `name`. If `ctx=None`, then the global context is used.
>>> x = Real('x')
>>> is_real(x)
True
>>> is_real(x + 1)
True
"""
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), RealSort(ctx).ast), ctx)
def Reals(names, ctx=None):
"""Return a tuple of real constants.
>>> x, y, z = Reals('x y z')
>>> Sum(x, y, z)
x + y + z
>>> Sum(x, y, z).sort()
Real
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [Real(name, ctx) for name in names]
def RealVector(prefix, sz, ctx=None):
"""Return a list of real constants of size `sz`.
>>> X = RealVector('x', 3)
>>> X
[x__0, x__1, x__2]
>>> Sum(X)
x__0 + x__1 + x__2
>>> Sum(X).sort()
Real
"""
ctx = _get_ctx(ctx)
return [Real("%s__%s" % (prefix, i), ctx) for i in range(sz)]
def FreshReal(prefix="b", ctx=None):
"""Return a fresh real constant in the given context using the given prefix.
>>> x = FreshReal()
>>> y = FreshReal()
>>> eq(x, y)
False
>>> x.sort()
Real
"""
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_fresh_const(ctx.ref(), prefix, RealSort(ctx).ast), ctx)
def ToReal(a):
""" Return the Z3 expression ToReal(a).
>>> x = Int('x')
>>> x.sort()
Int
>>> n = ToReal(x)
>>> n
ToReal(x)
>>> n.sort()
Real
"""
if z3_debug():
_z3_assert(a.is_int(), "Z3 integer expression expected.")
ctx = a.ctx
return ArithRef(Z3_mk_int2real(ctx.ref(), a.as_ast()), ctx)
def ToInt(a):
""" Return the Z3 expression ToInt(a).
>>> x = Real('x')
>>> x.sort()
Real
>>> n = ToInt(x)
>>> n
ToInt(x)
>>> n.sort()
Int
"""
if z3_debug():
_z3_assert(a.is_real(), "Z3 real expression expected.")
ctx = a.ctx
return ArithRef(Z3_mk_real2int(ctx.ref(), a.as_ast()), ctx)
def IsInt(a):
""" Return the Z3 predicate IsInt(a).
>>> x = Real('x')
>>> IsInt(x + "1/2")
IsInt(x + 1/2)
>>> solve(IsInt(x + "1/2"), x > 0, x < 1)
[x = 1/2]
>>> solve(IsInt(x + "1/2"), x > 0, x < 1, x != "1/2")
no solution
"""
if z3_debug():
_z3_assert(a.is_real(), "Z3 real expression expected.")
ctx = a.ctx
return BoolRef(Z3_mk_is_int(ctx.ref(), a.as_ast()), ctx)
def Sqrt(a, ctx=None):
""" Return a Z3 expression which represents the square root of a.
>>> x = Real('x')
>>> Sqrt(x)
x**(1/2)
"""
if not is_expr(a):
ctx = _get_ctx(ctx)
a = RealVal(a, ctx)
return a ** "1/2"
def Cbrt(a, ctx=None):
""" Return a Z3 expression which represents the cubic root of a.
>>> x = Real('x')
>>> Cbrt(x)
x**(1/3)
"""
if not is_expr(a):
ctx = _get_ctx(ctx)
a = RealVal(a, ctx)
return a ** "1/3"
#########################################
#
# Bit-Vectors
#
#########################################
class BitVecSortRef(SortRef):
"""Bit-vector sort."""
def size(self):
"""Return the size (number of bits) of the bit-vector sort `self`.
>>> b = BitVecSort(32)
>>> b.size()
32
"""
return int(Z3_get_bv_sort_size(self.ctx_ref(), self.ast))
def subsort(self, other):
return is_bv_sort(other) and self.size() < other.size()
def cast(self, val):
"""Try to cast `val` as a Bit-Vector.
>>> b = BitVecSort(32)
>>> b.cast(10)
10
>>> b.cast(10).sexpr()
'#x0000000a'
"""
if is_expr(val):
if z3_debug():
_z3_assert(self.ctx == val.ctx, "Context mismatch")
# Idea: use sign_extend if sort of val is a bitvector of smaller size
return val
else:
return BitVecVal(val, self)
def is_bv_sort(s):
"""Return True if `s` is a Z3 bit-vector sort.
>>> is_bv_sort(BitVecSort(32))
True
>>> is_bv_sort(IntSort())
False
"""
return isinstance(s, BitVecSortRef)
class BitVecRef(ExprRef):
"""Bit-vector expressions."""
def sort(self):
"""Return the sort of the bit-vector expression `self`.
>>> x = BitVec('x', 32)
>>> x.sort()
BitVec(32)
>>> x.sort() == BitVecSort(32)
True
"""
return BitVecSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def size(self):
"""Return the number of bits of the bit-vector expression `self`.
>>> x = BitVec('x', 32)
>>> (x + 1).size()
32
>>> Concat(x, x).size()
64
"""
return self.sort().size()
def __add__(self, other):
"""Create the Z3 expression `self + other`.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x + y
x + y
>>> (x + y).sort()
BitVec(32)
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvadd(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __radd__(self, other):
"""Create the Z3 expression `other + self`.
>>> x = BitVec('x', 32)
>>> 10 + x
10 + x
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvadd(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __mul__(self, other):
"""Create the Z3 expression `self * other`.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x * y
x*y
>>> (x * y).sort()
BitVec(32)
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvmul(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rmul__(self, other):
"""Create the Z3 expression `other * self`.
>>> x = BitVec('x', 32)
>>> 10 * x
10*x
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvmul(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __sub__(self, other):
"""Create the Z3 expression `self - other`.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x - y
x - y
>>> (x - y).sort()
BitVec(32)
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvsub(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rsub__(self, other):
"""Create the Z3 expression `other - self`.
>>> x = BitVec('x', 32)
>>> 10 - x
10 - x
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvsub(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __or__(self, other):
"""Create the Z3 expression bitwise-or `self | other`.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x | y
x | y
>>> (x | y).sort()
BitVec(32)
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvor(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __ror__(self, other):
"""Create the Z3 expression bitwise-or `other | self`.
>>> x = BitVec('x', 32)
>>> 10 | x
10 | x
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvor(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __and__(self, other):
"""Create the Z3 expression bitwise-and `self & other`.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x & y
x & y
>>> (x & y).sort()
BitVec(32)
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvand(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rand__(self, other):
"""Create the Z3 expression bitwise-or `other & self`.
>>> x = BitVec('x', 32)
>>> 10 & x
10 & x
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvand(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __xor__(self, other):
"""Create the Z3 expression bitwise-xor `self ^ other`.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x ^ y
x ^ y
>>> (x ^ y).sort()
BitVec(32)
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvxor(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rxor__(self, other):
"""Create the Z3 expression bitwise-xor `other ^ self`.
>>> x = BitVec('x', 32)
>>> 10 ^ x
10 ^ x
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvxor(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __pos__(self):
"""Return `self`.
>>> x = BitVec('x', 32)
>>> +x
x
"""
return self
def __neg__(self):
"""Return an expression representing `-self`.
>>> x = BitVec('x', 32)
>>> -x
-x
>>> simplify(-(-x))
x
"""
return BitVecRef(Z3_mk_bvneg(self.ctx_ref(), self.as_ast()), self.ctx)
def __invert__(self):
"""Create the Z3 expression bitwise-not `~self`.
>>> x = BitVec('x', 32)
>>> ~x
~x
>>> simplify(~(~x))
x
"""
return BitVecRef(Z3_mk_bvnot(self.ctx_ref(), self.as_ast()), self.ctx)
def __div__(self, other):
"""Create the Z3 expression (signed) division `self / other`.
Use the function UDiv() for unsigned division.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x / y
x/y
>>> (x / y).sort()
BitVec(32)
>>> (x / y).sexpr()
'(bvsdiv x y)'
>>> UDiv(x, y).sexpr()
'(bvudiv x y)'
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvsdiv(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __truediv__(self, other):
"""Create the Z3 expression (signed) division `self / other`."""
return self.__div__(other)
def __rdiv__(self, other):
"""Create the Z3 expression (signed) division `other / self`.
Use the function UDiv() for unsigned division.
>>> x = BitVec('x', 32)
>>> 10 / x
10/x
>>> (10 / x).sexpr()
'(bvsdiv #x0000000a x)'
>>> UDiv(10, x).sexpr()
'(bvudiv #x0000000a x)'
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvsdiv(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __rtruediv__(self, other):
"""Create the Z3 expression (signed) division `other / self`."""
return self.__rdiv__(other)
def __mod__(self, other):
"""Create the Z3 expression (signed) mod `self % other`.
Use the function URem() for unsigned remainder, and SRem() for signed remainder.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> x % y
x%y
>>> (x % y).sort()
BitVec(32)
>>> (x % y).sexpr()
'(bvsmod x y)'
>>> URem(x, y).sexpr()
'(bvurem x y)'
>>> SRem(x, y).sexpr()
'(bvsrem x y)'
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvsmod(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rmod__(self, other):
"""Create the Z3 expression (signed) mod `other % self`.
Use the function URem() for unsigned remainder, and SRem() for signed remainder.
>>> x = BitVec('x', 32)
>>> 10 % x
10%x
>>> (10 % x).sexpr()
'(bvsmod #x0000000a x)'
>>> URem(10, x).sexpr()
'(bvurem #x0000000a x)'
>>> SRem(10, x).sexpr()
'(bvsrem #x0000000a x)'
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvsmod(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __le__(self, other):
"""Create the Z3 expression (signed) `other <= self`.
Use the function ULE() for unsigned less than or equal to.
>>> x, y = BitVecs('x y', 32)
>>> x <= y
x <= y
>>> (x <= y).sexpr()
'(bvsle x y)'
>>> ULE(x, y).sexpr()
'(bvule x y)'
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_bvsle(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __lt__(self, other):
"""Create the Z3 expression (signed) `other < self`.
Use the function ULT() for unsigned less than.
>>> x, y = BitVecs('x y', 32)
>>> x < y
x < y
>>> (x < y).sexpr()
'(bvslt x y)'
>>> ULT(x, y).sexpr()
'(bvult x y)'
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_bvslt(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __gt__(self, other):
"""Create the Z3 expression (signed) `other > self`.
Use the function UGT() for unsigned greater than.
>>> x, y = BitVecs('x y', 32)
>>> x > y
x > y
>>> (x > y).sexpr()
'(bvsgt x y)'
>>> UGT(x, y).sexpr()
'(bvugt x y)'
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_bvsgt(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __ge__(self, other):
"""Create the Z3 expression (signed) `other >= self`.
Use the function UGE() for unsigned greater than or equal to.
>>> x, y = BitVecs('x y', 32)
>>> x >= y
x >= y
>>> (x >= y).sexpr()
'(bvsge x y)'
>>> UGE(x, y).sexpr()
'(bvuge x y)'
"""
a, b = _coerce_exprs(self, other)
return BoolRef(Z3_mk_bvsge(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rshift__(self, other):
"""Create the Z3 expression (arithmetical) right shift `self >> other`
Use the function LShR() for the right logical shift
>>> x, y = BitVecs('x y', 32)
>>> x >> y
x >> y
>>> (x >> y).sexpr()
'(bvashr x y)'
>>> LShR(x, y).sexpr()
'(bvlshr x y)'
>>> BitVecVal(4, 3)
4
>>> BitVecVal(4, 3).as_signed_long()
-4
>>> simplify(BitVecVal(4, 3) >> 1).as_signed_long()
-2
>>> simplify(BitVecVal(4, 3) >> 1)
6
>>> simplify(LShR(BitVecVal(4, 3), 1))
2
>>> simplify(BitVecVal(2, 3) >> 1)
1
>>> simplify(LShR(BitVecVal(2, 3), 1))
1
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvashr(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __lshift__(self, other):
"""Create the Z3 expression left shift `self << other`
>>> x, y = BitVecs('x y', 32)
>>> x << y
x << y
>>> (x << y).sexpr()
'(bvshl x y)'
>>> simplify(BitVecVal(2, 3) << 1)
4
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvshl(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
def __rrshift__(self, other):
"""Create the Z3 expression (arithmetical) right shift `other` >> `self`.
Use the function LShR() for the right logical shift
>>> x = BitVec('x', 32)
>>> 10 >> x
10 >> x
>>> (10 >> x).sexpr()
'(bvashr #x0000000a x)'
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvashr(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
def __rlshift__(self, other):
"""Create the Z3 expression left shift `other << self`.
Use the function LShR() for the right logical shift
>>> x = BitVec('x', 32)
>>> 10 << x
10 << x
>>> (10 << x).sexpr()
'(bvshl #x0000000a x)'
"""
a, b = _coerce_exprs(self, other)
return BitVecRef(Z3_mk_bvshl(self.ctx_ref(), b.as_ast(), a.as_ast()), self.ctx)
class BitVecNumRef(BitVecRef):
"""Bit-vector values."""
def as_long(self):
"""Return a Z3 bit-vector numeral as a Python long (bignum) numeral.
>>> v = BitVecVal(0xbadc0de, 32)
>>> v
195936478
>>> print("0x%.8x" % v.as_long())
0x0badc0de
"""
return int(self.as_string())
def as_signed_long(self):
"""Return a Z3 bit-vector numeral as a Python long (bignum) numeral.
The most significant bit is assumed to be the sign.
>>> BitVecVal(4, 3).as_signed_long()
-4
>>> BitVecVal(7, 3).as_signed_long()
-1
>>> BitVecVal(3, 3).as_signed_long()
3
>>> BitVecVal(2**32 - 1, 32).as_signed_long()
-1
>>> BitVecVal(2**64 - 1, 64).as_signed_long()
-1
"""
sz = self.size()
val = self.as_long()
if val >= 2**(sz - 1):
val = val - 2**sz
if val < -2**(sz - 1):
val = val + 2**sz
return int(val)
def as_string(self):
return Z3_get_numeral_string(self.ctx_ref(), self.as_ast())
def as_binary_string(self):
return Z3_get_numeral_binary_string(self.ctx_ref(), self.as_ast())
def is_bv(a):
"""Return `True` if `a` is a Z3 bit-vector expression.
>>> b = BitVec('b', 32)
>>> is_bv(b)
True
>>> is_bv(b + 10)
True
>>> is_bv(Int('x'))
False
"""
return isinstance(a, BitVecRef)
def is_bv_value(a):
"""Return `True` if `a` is a Z3 bit-vector numeral value.
>>> b = BitVec('b', 32)
>>> is_bv_value(b)
False
>>> b = BitVecVal(10, 32)
>>> b
10
>>> is_bv_value(b)
True
"""
return is_bv(a) and _is_numeral(a.ctx, a.as_ast())
def BV2Int(a, is_signed=False):
"""Return the Z3 expression BV2Int(a).
>>> b = BitVec('b', 3)
>>> BV2Int(b).sort()
Int
>>> x = Int('x')
>>> x > BV2Int(b)
x > BV2Int(b)
>>> x > BV2Int(b, is_signed=False)
x > BV2Int(b)
>>> x > BV2Int(b, is_signed=True)
x > If(b < 0, BV2Int(b) - 8, BV2Int(b))
>>> solve(x > BV2Int(b), b == 1, x < 3)
[x = 2, b = 1]
"""
if z3_debug():
_z3_assert(is_bv(a), "First argument must be a Z3 bit-vector expression")
ctx = a.ctx
# investigate problem with bv2int
return ArithRef(Z3_mk_bv2int(ctx.ref(), a.as_ast(), is_signed), ctx)
def Int2BV(a, num_bits):
"""Return the z3 expression Int2BV(a, num_bits).
It is a bit-vector of width num_bits and represents the
modulo of a by 2^num_bits
"""
ctx = a.ctx
return BitVecRef(Z3_mk_int2bv(ctx.ref(), num_bits, a.as_ast()), ctx)
def BitVecSort(sz, ctx=None):
"""Return a Z3 bit-vector sort of the given size. If `ctx=None`, then the global context is used.
>>> Byte = BitVecSort(8)
>>> Word = BitVecSort(16)
>>> Byte
BitVec(8)
>>> x = Const('x', Byte)
>>> eq(x, BitVec('x', 8))
True
"""
ctx = _get_ctx(ctx)
return BitVecSortRef(Z3_mk_bv_sort(ctx.ref(), sz), ctx)
def BitVecVal(val, bv, ctx=None):
"""Return a bit-vector value with the given number of bits. If `ctx=None`, then the global context is used.
>>> v = BitVecVal(10, 32)
>>> v
10
>>> print("0x%.8x" % v.as_long())
0x0000000a
"""
if is_bv_sort(bv):
ctx = bv.ctx
return BitVecNumRef(Z3_mk_numeral(ctx.ref(), _to_int_str(val), bv.ast), ctx)
else:
ctx = _get_ctx(ctx)
return BitVecNumRef(Z3_mk_numeral(ctx.ref(), _to_int_str(val), BitVecSort(bv, ctx).ast), ctx)
def BitVec(name, bv, ctx=None):
"""Return a bit-vector constant named `name`. `bv` may be the number of bits of a bit-vector sort.
If `ctx=None`, then the global context is used.
>>> x = BitVec('x', 16)
>>> is_bv(x)
True
>>> x.size()
16
>>> x.sort()
BitVec(16)
>>> word = BitVecSort(16)
>>> x2 = BitVec('x', word)
>>> eq(x, x2)
True
"""
if isinstance(bv, BitVecSortRef):
ctx = bv.ctx
else:
ctx = _get_ctx(ctx)
bv = BitVecSort(bv, ctx)
return BitVecRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), bv.ast), ctx)
def BitVecs(names, bv, ctx=None):
"""Return a tuple of bit-vector constants of size bv.
>>> x, y, z = BitVecs('x y z', 16)
>>> x.size()
16
>>> x.sort()
BitVec(16)
>>> Sum(x, y, z)
0 + x + y + z
>>> Product(x, y, z)
1*x*y*z
>>> simplify(Product(x, y, z))
x*y*z
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [BitVec(name, bv, ctx) for name in names]
def Concat(*args):
"""Create a Z3 bit-vector concatenation expression.
>>> v = BitVecVal(1, 4)
>>> Concat(v, v+1, v)
Concat(Concat(1, 1 + 1), 1)
>>> simplify(Concat(v, v+1, v))
289
>>> print("%.3x" % simplify(Concat(v, v+1, v)).as_long())
121
"""
args = _get_args(args)
sz = len(args)
if z3_debug():
_z3_assert(sz >= 2, "At least two arguments expected.")
ctx = None
for a in args:
if is_expr(a):
ctx = a.ctx
break
if is_seq(args[0]) or isinstance(args[0], str):
args = [_coerce_seq(s, ctx) for s in args]
if z3_debug():
_z3_assert(all([is_seq(a) for a in args]), "All arguments must be sequence expressions.")
v = (Ast * sz)()
for i in range(sz):
v[i] = args[i].as_ast()
return SeqRef(Z3_mk_seq_concat(ctx.ref(), sz, v), ctx)
if is_re(args[0]):
if z3_debug():
_z3_assert(all([is_re(a) for a in args]), "All arguments must be regular expressions.")
v = (Ast * sz)()
for i in range(sz):
v[i] = args[i].as_ast()
return ReRef(Z3_mk_re_concat(ctx.ref(), sz, v), ctx)
if z3_debug():
_z3_assert(all([is_bv(a) for a in args]), "All arguments must be Z3 bit-vector expressions.")
r = args[0]
for i in range(sz - 1):
r = BitVecRef(Z3_mk_concat(ctx.ref(), r.as_ast(), args[i + 1].as_ast()), ctx)
return r
def Extract(high, low, a):
"""Create a Z3 bit-vector extraction expression.
Extract is overloaded to also work on sequence extraction.
The functions SubString and SubSeq are redirected to Extract.
For this case, the arguments are reinterpreted as:
high - is a sequence (string)
low - is an offset
a - is the length to be extracted
>>> x = BitVec('x', 8)
>>> Extract(6, 2, x)
Extract(6, 2, x)
>>> Extract(6, 2, x).sort()
BitVec(5)
>>> simplify(Extract(StringVal("abcd"),2,1))
"c"
"""
if isinstance(high, str):
high = StringVal(high)
if is_seq(high):
s = high
offset, length = _coerce_exprs(low, a, s.ctx)
return SeqRef(Z3_mk_seq_extract(s.ctx_ref(), s.as_ast(), offset.as_ast(), length.as_ast()), s.ctx)
if z3_debug():
_z3_assert(low <= high, "First argument must be greater than or equal to second argument")
_z3_assert(_is_int(high) and high >= 0 and _is_int(low) and low >= 0,
"First and second arguments must be non negative integers")
_z3_assert(is_bv(a), "Third argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_extract(a.ctx_ref(), high, low, a.as_ast()), a.ctx)
def _check_bv_args(a, b):
if z3_debug():
_z3_assert(is_bv(a) or is_bv(b), "First or second argument must be a Z3 bit-vector expression")
def ULE(a, b):
"""Create the Z3 expression (unsigned) `other <= self`.
Use the operator <= for signed less than or equal to.
>>> x, y = BitVecs('x y', 32)
>>> ULE(x, y)
ULE(x, y)
>>> (x <= y).sexpr()
'(bvsle x y)'
>>> ULE(x, y).sexpr()
'(bvule x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvule(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def ULT(a, b):
"""Create the Z3 expression (unsigned) `other < self`.
Use the operator < for signed less than.
>>> x, y = BitVecs('x y', 32)
>>> ULT(x, y)
ULT(x, y)
>>> (x < y).sexpr()
'(bvslt x y)'
>>> ULT(x, y).sexpr()
'(bvult x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvult(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def UGE(a, b):
"""Create the Z3 expression (unsigned) `other >= self`.
Use the operator >= for signed greater than or equal to.
>>> x, y = BitVecs('x y', 32)
>>> UGE(x, y)
UGE(x, y)
>>> (x >= y).sexpr()
'(bvsge x y)'
>>> UGE(x, y).sexpr()
'(bvuge x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvuge(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def UGT(a, b):
"""Create the Z3 expression (unsigned) `other > self`.
Use the operator > for signed greater than.
>>> x, y = BitVecs('x y', 32)
>>> UGT(x, y)
UGT(x, y)
>>> (x > y).sexpr()
'(bvsgt x y)'
>>> UGT(x, y).sexpr()
'(bvugt x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvugt(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def UDiv(a, b):
"""Create the Z3 expression (unsigned) division `self / other`.
Use the operator / for signed division.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> UDiv(x, y)
UDiv(x, y)
>>> UDiv(x, y).sort()
BitVec(32)
>>> (x / y).sexpr()
'(bvsdiv x y)'
>>> UDiv(x, y).sexpr()
'(bvudiv x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvudiv(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def URem(a, b):
"""Create the Z3 expression (unsigned) remainder `self % other`.
Use the operator % for signed modulus, and SRem() for signed remainder.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> URem(x, y)
URem(x, y)
>>> URem(x, y).sort()
BitVec(32)
>>> (x % y).sexpr()
'(bvsmod x y)'
>>> URem(x, y).sexpr()
'(bvurem x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvurem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def SRem(a, b):
"""Create the Z3 expression signed remainder.
Use the operator % for signed modulus, and URem() for unsigned remainder.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> SRem(x, y)
SRem(x, y)
>>> SRem(x, y).sort()
BitVec(32)
>>> (x % y).sexpr()
'(bvsmod x y)'
>>> SRem(x, y).sexpr()
'(bvsrem x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvsrem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def LShR(a, b):
"""Create the Z3 expression logical right shift.
Use the operator >> for the arithmetical right shift.
>>> x, y = BitVecs('x y', 32)
>>> LShR(x, y)
LShR(x, y)
>>> (x >> y).sexpr()
'(bvashr x y)'
>>> LShR(x, y).sexpr()
'(bvlshr x y)'
>>> BitVecVal(4, 3)
4
>>> BitVecVal(4, 3).as_signed_long()
-4
>>> simplify(BitVecVal(4, 3) >> 1).as_signed_long()
-2
>>> simplify(BitVecVal(4, 3) >> 1)
6
>>> simplify(LShR(BitVecVal(4, 3), 1))
2
>>> simplify(BitVecVal(2, 3) >> 1)
1
>>> simplify(LShR(BitVecVal(2, 3), 1))
1
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvlshr(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def RotateLeft(a, b):
"""Return an expression representing `a` rotated to the left `b` times.
>>> a, b = BitVecs('a b', 16)
>>> RotateLeft(a, b)
RotateLeft(a, b)
>>> simplify(RotateLeft(a, 0))
a
>>> simplify(RotateLeft(a, 16))
a
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_ext_rotate_left(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def RotateRight(a, b):
"""Return an expression representing `a` rotated to the right `b` times.
>>> a, b = BitVecs('a b', 16)
>>> RotateRight(a, b)
RotateRight(a, b)
>>> simplify(RotateRight(a, 0))
a
>>> simplify(RotateRight(a, 16))
a
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_ext_rotate_right(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def SignExt(n, a):
"""Return a bit-vector expression with `n` extra sign-bits.
>>> x = BitVec('x', 16)
>>> n = SignExt(8, x)
>>> n.size()
24
>>> n
SignExt(8, x)
>>> n.sort()
BitVec(24)
>>> v0 = BitVecVal(2, 2)
>>> v0
2
>>> v0.size()
2
>>> v = simplify(SignExt(6, v0))
>>> v
254
>>> v.size()
8
>>> print("%.x" % v.as_long())
fe
"""
if z3_debug():
_z3_assert(_is_int(n), "First argument must be an integer")
_z3_assert(is_bv(a), "Second argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_sign_ext(a.ctx_ref(), n, a.as_ast()), a.ctx)
def ZeroExt(n, a):
"""Return a bit-vector expression with `n` extra zero-bits.
>>> x = BitVec('x', 16)
>>> n = ZeroExt(8, x)
>>> n.size()
24
>>> n
ZeroExt(8, x)
>>> n.sort()
BitVec(24)
>>> v0 = BitVecVal(2, 2)
>>> v0
2
>>> v0.size()
2
>>> v = simplify(ZeroExt(6, v0))
>>> v
2
>>> v.size()
8
"""
if z3_debug():
_z3_assert(_is_int(n), "First argument must be an integer")
_z3_assert(is_bv(a), "Second argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_zero_ext(a.ctx_ref(), n, a.as_ast()), a.ctx)
def RepeatBitVec(n, a):
"""Return an expression representing `n` copies of `a`.
>>> x = BitVec('x', 8)
>>> n = RepeatBitVec(4, x)
>>> n
RepeatBitVec(4, x)
>>> n.size()
32
>>> v0 = BitVecVal(10, 4)
>>> print("%.x" % v0.as_long())
a
>>> v = simplify(RepeatBitVec(4, v0))
>>> v.size()
16
>>> print("%.x" % v.as_long())
aaaa
"""
if z3_debug():
_z3_assert(_is_int(n), "First argument must be an integer")
_z3_assert(is_bv(a), "Second argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_repeat(a.ctx_ref(), n, a.as_ast()), a.ctx)
def BVRedAnd(a):
"""Return the reduction-and expression of `a`."""
if z3_debug():
_z3_assert(is_bv(a), "First argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_bvredand(a.ctx_ref(), a.as_ast()), a.ctx)
def BVRedOr(a):
"""Return the reduction-or expression of `a`."""
if z3_debug():
_z3_assert(is_bv(a), "First argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_bvredor(a.ctx_ref(), a.as_ast()), a.ctx)
def BVAddNoOverflow(a, b, signed):
"""A predicate the determines that bit-vector addition does not overflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvadd_no_overflow(a.ctx_ref(), a.as_ast(), b.as_ast(), signed), a.ctx)
def BVAddNoUnderflow(a, b):
"""A predicate the determines that signed bit-vector addition does not underflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvadd_no_underflow(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def BVSubNoOverflow(a, b):
"""A predicate the determines that bit-vector subtraction does not overflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvsub_no_overflow(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def BVSubNoUnderflow(a, b, signed):
"""A predicate the determines that bit-vector subtraction does not underflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvsub_no_underflow(a.ctx_ref(), a.as_ast(), b.as_ast(), signed), a.ctx)
def BVSDivNoOverflow(a, b):
"""A predicate the determines that bit-vector signed division does not overflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvsdiv_no_overflow(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def BVSNegNoOverflow(a):
"""A predicate the determines that bit-vector unary negation does not overflow"""
if z3_debug():
_z3_assert(is_bv(a), "First argument must be a Z3 bit-vector expression")
return BoolRef(Z3_mk_bvneg_no_overflow(a.ctx_ref(), a.as_ast()), a.ctx)
def BVMulNoOverflow(a, b, signed):
"""A predicate the determines that bit-vector multiplication does not overflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvmul_no_overflow(a.ctx_ref(), a.as_ast(), b.as_ast(), signed), a.ctx)
def BVMulNoUnderflow(a, b):
"""A predicate the determines that bit-vector signed multiplication does not underflow"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvmul_no_underflow(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
#########################################
#
# Arrays
#
#########################################
class ArraySortRef(SortRef):
"""Array sorts."""
def domain(self):
"""Return the domain of the array sort `self`.
>>> A = ArraySort(IntSort(), BoolSort())
>>> A.domain()
Int
"""
return _to_sort_ref(Z3_get_array_sort_domain(self.ctx_ref(), self.ast), self.ctx)
def domain_n(self, i):
"""Return the domain of the array sort `self`.
"""
return _to_sort_ref(Z3_get_array_sort_domain_n(self.ctx_ref(), self.ast, i), self.ctx)
def range(self):
"""Return the range of the array sort `self`.
>>> A = ArraySort(IntSort(), BoolSort())
>>> A.range()
Bool
"""
return _to_sort_ref(Z3_get_array_sort_range(self.ctx_ref(), self.ast), self.ctx)
class ArrayRef(ExprRef):
"""Array expressions. """
def sort(self):
"""Return the array sort of the array expression `self`.
>>> a = Array('a', IntSort(), BoolSort())
>>> a.sort()
Array(Int, Bool)
"""
return ArraySortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def domain(self):
"""Shorthand for `self.sort().domain()`.
>>> a = Array('a', IntSort(), BoolSort())
>>> a.domain()
Int
"""
return self.sort().domain()
def domain_n(self, i):
"""Shorthand for self.sort().domain_n(i)`."""
return self.sort().domain_n(i)
def range(self):
"""Shorthand for `self.sort().range()`.
>>> a = Array('a', IntSort(), BoolSort())
>>> a.range()
Bool
"""
return self.sort().range()
def __getitem__(self, arg):
"""Return the Z3 expression `self[arg]`.
>>> a = Array('a', IntSort(), BoolSort())
>>> i = Int('i')
>>> a[i]
a[i]
>>> a[i].sexpr()
'(select a i)'
"""
return _array_select(self, arg)
def default(self):
return _to_expr_ref(Z3_mk_array_default(self.ctx_ref(), self.as_ast()), self.ctx)
def _array_select(ar, arg):
if isinstance(arg, tuple):
args = [ar.domain_n(i).cast(arg[i]) for i in range(len(arg))]
_args, sz = _to_ast_array(args)
return _to_expr_ref(Z3_mk_select_n(ar.ctx_ref(), ar.as_ast(), sz, _args), ar.ctx)
arg = ar.domain().cast(arg)
return _to_expr_ref(Z3_mk_select(ar.ctx_ref(), ar.as_ast(), arg.as_ast()), ar.ctx)
def is_array_sort(a):
return Z3_get_sort_kind(a.ctx.ref(), Z3_get_sort(a.ctx.ref(), a.ast)) == Z3_ARRAY_SORT
def is_array(a):
"""Return `True` if `a` is a Z3 array expression.
>>> a = Array('a', IntSort(), IntSort())
>>> is_array(a)
True
>>> is_array(Store(a, 0, 1))
True
>>> is_array(a[0])
False
"""
return isinstance(a, ArrayRef)
def is_const_array(a):
"""Return `True` if `a` is a Z3 constant array.
>>> a = K(IntSort(), 10)
>>> is_const_array(a)
True
>>> a = Array('a', IntSort(), IntSort())
>>> is_const_array(a)
False
"""
return is_app_of(a, Z3_OP_CONST_ARRAY)
def is_K(a):
"""Return `True` if `a` is a Z3 constant array.
>>> a = K(IntSort(), 10)
>>> is_K(a)
True
>>> a = Array('a', IntSort(), IntSort())
>>> is_K(a)
False
"""
return is_app_of(a, Z3_OP_CONST_ARRAY)
def is_map(a):
"""Return `True` if `a` is a Z3 map array expression.
>>> f = Function('f', IntSort(), IntSort())
>>> b = Array('b', IntSort(), IntSort())
>>> a = Map(f, b)
>>> a
Map(f, b)
>>> is_map(a)
True
>>> is_map(b)
False
"""
return is_app_of(a, Z3_OP_ARRAY_MAP)
def is_default(a):
"""Return `True` if `a` is a Z3 default array expression.
>>> d = Default(K(IntSort(), 10))
>>> is_default(d)
True
"""
return is_app_of(a, Z3_OP_ARRAY_DEFAULT)
def get_map_func(a):
"""Return the function declaration associated with a Z3 map array expression.
>>> f = Function('f', IntSort(), IntSort())
>>> b = Array('b', IntSort(), IntSort())
>>> a = Map(f, b)
>>> eq(f, get_map_func(a))
True
>>> get_map_func(a)
f
>>> get_map_func(a)(0)
f(0)
"""
if z3_debug():
_z3_assert(is_map(a), "Z3 array map expression expected.")
return FuncDeclRef(
Z3_to_func_decl(
a.ctx_ref(),
Z3_get_decl_ast_parameter(a.ctx_ref(), a.decl().ast, 0),
),
ctx=a.ctx,
)
def ArraySort(*sig):
"""Return the Z3 array sort with the given domain and range sorts.
>>> A = ArraySort(IntSort(), BoolSort())
>>> A
Array(Int, Bool)
>>> A.domain()
Int
>>> A.range()
Bool
>>> AA = ArraySort(IntSort(), A)
>>> AA
Array(Int, Array(Int, Bool))
"""
sig = _get_args(sig)
if z3_debug():
_z3_assert(len(sig) > 1, "At least two arguments expected")
arity = len(sig) - 1
r = sig[arity]
d = sig[0]
if z3_debug():
for s in sig:
_z3_assert(is_sort(s), "Z3 sort expected")
_z3_assert(s.ctx == r.ctx, "Context mismatch")
ctx = d.ctx
if len(sig) == 2:
return ArraySortRef(Z3_mk_array_sort(ctx.ref(), d.ast, r.ast), ctx)
dom = (Sort * arity)()
for i in range(arity):
dom[i] = sig[i].ast
return ArraySortRef(Z3_mk_array_sort_n(ctx.ref(), arity, dom, r.ast), ctx)
def Array(name, *sorts):
"""Return an array constant named `name` with the given domain and range sorts.
>>> a = Array('a', IntSort(), IntSort())
>>> a.sort()
Array(Int, Int)
>>> a[0]
a[0]
"""
s = ArraySort(sorts)
ctx = s.ctx
return ArrayRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), s.ast), ctx)
def Update(a, *args):
"""Return a Z3 store array expression.
>>> a = Array('a', IntSort(), IntSort())
>>> i, v = Ints('i v')
>>> s = Update(a, i, v)
>>> s.sort()
Array(Int, Int)
>>> prove(s[i] == v)
proved
>>> j = Int('j')
>>> prove(Implies(i != j, s[j] == a[j]))
proved
"""
if z3_debug():
_z3_assert(is_array_sort(a), "First argument must be a Z3 array expression")
args = _get_args(args)
ctx = a.ctx
if len(args) <= 1:
raise Z3Exception("array update requires index and value arguments")
if len(args) == 2:
i = args[0]
v = args[1]
i = a.sort().domain().cast(i)
v = a.sort().range().cast(v)
return _to_expr_ref(Z3_mk_store(ctx.ref(), a.as_ast(), i.as_ast(), v.as_ast()), ctx)
v = a.sort().range().cast(args[-1])
idxs = [a.sort().domain_n(i).cast(args[i]) for i in range(len(args)-1)]
_args, sz = _to_ast_array(idxs)
return _to_expr_ref(Z3_mk_store_n(ctx.ref(), a.as_ast(), sz, _args, v.as_ast()), ctx)
def Default(a):
""" Return a default value for array expression.
>>> b = K(IntSort(), 1)
>>> prove(Default(b) == 1)
proved
"""
if z3_debug():
_z3_assert(is_array_sort(a), "First argument must be a Z3 array expression")
return a.default()
def Store(a, *args):
"""Return a Z3 store array expression.
>>> a = Array('a', IntSort(), IntSort())
>>> i, v = Ints('i v')
>>> s = Store(a, i, v)
>>> s.sort()
Array(Int, Int)
>>> prove(s[i] == v)
proved
>>> j = Int('j')
>>> prove(Implies(i != j, s[j] == a[j]))
proved
"""
return Update(a, args)
def Select(a, *args):
"""Return a Z3 select array expression.
>>> a = Array('a', IntSort(), IntSort())
>>> i = Int('i')
>>> Select(a, i)
a[i]
>>> eq(Select(a, i), a[i])
True
"""
args = _get_args(args)
if z3_debug():
_z3_assert(is_array_sort(a), "First argument must be a Z3 array expression")
return a[args]
def Map(f, *args):
"""Return a Z3 map array expression.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> a1 = Array('a1', IntSort(), IntSort())
>>> a2 = Array('a2', IntSort(), IntSort())
>>> b = Map(f, a1, a2)
>>> b
Map(f, a1, a2)
>>> prove(b[0] == f(a1[0], a2[0]))
proved
"""
args = _get_args(args)
if z3_debug():
_z3_assert(len(args) > 0, "At least one Z3 array expression expected")
_z3_assert(is_func_decl(f), "First argument must be a Z3 function declaration")
_z3_assert(all([is_array(a) for a in args]), "Z3 array expected expected")
_z3_assert(len(args) == f.arity(), "Number of arguments mismatch")
_args, sz = _to_ast_array(args)
ctx = f.ctx
return ArrayRef(Z3_mk_map(ctx.ref(), f.ast, sz, _args), ctx)
def K(dom, v):
"""Return a Z3 constant array expression.
>>> a = K(IntSort(), 10)
>>> a
K(Int, 10)
>>> a.sort()
Array(Int, Int)
>>> i = Int('i')
>>> a[i]
K(Int, 10)[i]
>>> simplify(a[i])
10
"""
if z3_debug():
_z3_assert(is_sort(dom), "Z3 sort expected")
ctx = dom.ctx
if not is_expr(v):
v = _py2expr(v, ctx)
return ArrayRef(Z3_mk_const_array(ctx.ref(), dom.ast, v.as_ast()), ctx)
def Ext(a, b):
"""Return extensionality index for one-dimensional arrays.
>> a, b = Consts('a b', SetSort(IntSort()))
>> Ext(a, b)
Ext(a, b)
"""
ctx = a.ctx
if z3_debug():
_z3_assert(is_array_sort(a) and (is_array(b) or b.is_lambda()), "arguments must be arrays")
return _to_expr_ref(Z3_mk_array_ext(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def SetHasSize(a, k):
ctx = a.ctx
k = _py2expr(k, ctx)
return _to_expr_ref(Z3_mk_set_has_size(ctx.ref(), a.as_ast(), k.as_ast()), ctx)
def is_select(a):
"""Return `True` if `a` is a Z3 array select application.
>>> a = Array('a', IntSort(), IntSort())
>>> is_select(a)
False
>>> i = Int('i')
>>> is_select(a[i])
True
"""
return is_app_of(a, Z3_OP_SELECT)
def is_store(a):
"""Return `True` if `a` is a Z3 array store application.
>>> a = Array('a', IntSort(), IntSort())
>>> is_store(a)
False
>>> is_store(Store(a, 0, 1))
True
"""
return is_app_of(a, Z3_OP_STORE)
#########################################
#
# Sets
#
#########################################
def SetSort(s):
""" Create a set sort over element sort s"""
return ArraySort(s, BoolSort())
def EmptySet(s):
"""Create the empty set
>>> EmptySet(IntSort())
K(Int, False)
"""
ctx = s.ctx
return ArrayRef(Z3_mk_empty_set(ctx.ref(), s.ast), ctx)
def FullSet(s):
"""Create the full set
>>> FullSet(IntSort())
K(Int, True)
"""
ctx = s.ctx
return ArrayRef(Z3_mk_full_set(ctx.ref(), s.ast), ctx)
def SetUnion(*args):
""" Take the union of sets
>>> a = Const('a', SetSort(IntSort()))
>>> b = Const('b', SetSort(IntSort()))
>>> SetUnion(a, b)
union(a, b)
"""
args = _get_args(args)
ctx = _ctx_from_ast_arg_list(args)
_args, sz = _to_ast_array(args)
return ArrayRef(Z3_mk_set_union(ctx.ref(), sz, _args), ctx)
def SetIntersect(*args):
""" Take the union of sets
>>> a = Const('a', SetSort(IntSort()))
>>> b = Const('b', SetSort(IntSort()))
>>> SetIntersect(a, b)
intersection(a, b)
"""
args = _get_args(args)
ctx = _ctx_from_ast_arg_list(args)
_args, sz = _to_ast_array(args)
return ArrayRef(Z3_mk_set_intersect(ctx.ref(), sz, _args), ctx)
def SetAdd(s, e):
""" Add element e to set s
>>> a = Const('a', SetSort(IntSort()))
>>> SetAdd(a, 1)
Store(a, 1, True)
"""
ctx = _ctx_from_ast_arg_list([s, e])
e = _py2expr(e, ctx)
return ArrayRef(Z3_mk_set_add(ctx.ref(), s.as_ast(), e.as_ast()), ctx)
def SetDel(s, e):
""" Remove element e to set s
>>> a = Const('a', SetSort(IntSort()))
>>> SetDel(a, 1)
Store(a, 1, False)
"""
ctx = _ctx_from_ast_arg_list([s, e])
e = _py2expr(e, ctx)
return ArrayRef(Z3_mk_set_del(ctx.ref(), s.as_ast(), e.as_ast()), ctx)
def SetComplement(s):
""" The complement of set s
>>> a = Const('a', SetSort(IntSort()))
>>> SetComplement(a)
complement(a)
"""
ctx = s.ctx
return ArrayRef(Z3_mk_set_complement(ctx.ref(), s.as_ast()), ctx)
def SetDifference(a, b):
""" The set difference of a and b
>>> a = Const('a', SetSort(IntSort()))
>>> b = Const('b', SetSort(IntSort()))
>>> SetDifference(a, b)
setminus(a, b)
"""
ctx = _ctx_from_ast_arg_list([a, b])
return ArrayRef(Z3_mk_set_difference(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def IsMember(e, s):
""" Check if e is a member of set s
>>> a = Const('a', SetSort(IntSort()))
>>> IsMember(1, a)
a[1]
"""
ctx = _ctx_from_ast_arg_list([s, e])
e = _py2expr(e, ctx)
return BoolRef(Z3_mk_set_member(ctx.ref(), e.as_ast(), s.as_ast()), ctx)
def IsSubset(a, b):
""" Check if a is a subset of b
>>> a = Const('a', SetSort(IntSort()))
>>> b = Const('b', SetSort(IntSort()))
>>> IsSubset(a, b)
subset(a, b)
"""
ctx = _ctx_from_ast_arg_list([a, b])
return BoolRef(Z3_mk_set_subset(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
#########################################
#
# Datatypes
#
#########################################
def _valid_accessor(acc):
"""Return `True` if acc is pair of the form (String, Datatype or Sort). """
if not isinstance(acc, tuple):
return False
if len(acc) != 2:
return False
return isinstance(acc[0], str) and (isinstance(acc[1], Datatype) or is_sort(acc[1]))
class Datatype:
"""Helper class for declaring Z3 datatypes.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> # List is now a Z3 declaration
>>> List.nil
nil
>>> List.cons(10, List.nil)
cons(10, nil)
>>> List.cons(10, List.nil).sort()
List
>>> cons = List.cons
>>> nil = List.nil
>>> car = List.car
>>> cdr = List.cdr
>>> n = cons(1, cons(0, nil))
>>> n
cons(1, cons(0, nil))
>>> simplify(cdr(n))
cons(0, nil)
>>> simplify(car(n))
1
"""
def __init__(self, name, ctx=None):
self.ctx = _get_ctx(ctx)
self.name = name
self.constructors = []
def __deepcopy__(self, memo={}):
r = Datatype(self.name, self.ctx)
r.constructors = copy.deepcopy(self.constructors)
return r
def declare_core(self, name, rec_name, *args):
if z3_debug():
_z3_assert(isinstance(name, str), "String expected")
_z3_assert(isinstance(rec_name, str), "String expected")
_z3_assert(
all([_valid_accessor(a) for a in args]),
"Valid list of accessors expected. An accessor is a pair of the form (String, Datatype|Sort)",
)
self.constructors.append((name, rec_name, args))
def declare(self, name, *args):
"""Declare constructor named `name` with the given accessors `args`.
Each accessor is a pair `(name, sort)`, where `name` is a string and `sort` a Z3 sort
or a reference to the datatypes being declared.
In the following example `List.declare('cons', ('car', IntSort()), ('cdr', List))`
declares the constructor named `cons` that builds a new List using an integer and a List.
It also declares the accessors `car` and `cdr`. The accessor `car` extracts the integer
of a `cons` cell, and `cdr` the list of a `cons` cell. After all constructors were declared,
we use the method create() to create the actual datatype in Z3.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
"""
if z3_debug():
_z3_assert(isinstance(name, str), "String expected")
_z3_assert(name != "", "Constructor name cannot be empty")
return self.declare_core(name, "is-" + name, *args)
def __repr__(self):
return "Datatype(%s, %s)" % (self.name, self.constructors)
def create(self):
"""Create a Z3 datatype based on the constructors declared using the method `declare()`.
The function `CreateDatatypes()` must be used to define mutually recursive datatypes.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> List.nil
nil
>>> List.cons(10, List.nil)
cons(10, nil)
"""
return CreateDatatypes([self])[0]
class ScopedConstructor:
"""Auxiliary object used to create Z3 datatypes."""
def __init__(self, c, ctx):
self.c = c
self.ctx = ctx
def __del__(self):
if self.ctx.ref() is not None:
Z3_del_constructor(self.ctx.ref(), self.c)
class ScopedConstructorList:
"""Auxiliary object used to create Z3 datatypes."""
def __init__(self, c, ctx):
self.c = c
self.ctx = ctx
def __del__(self):
if self.ctx.ref() is not None:
Z3_del_constructor_list(self.ctx.ref(), self.c)
def CreateDatatypes(*ds):
"""Create mutually recursive Z3 datatypes using 1 or more Datatype helper objects.
In the following example we define a Tree-List using two mutually recursive datatypes.
>>> TreeList = Datatype('TreeList')
>>> Tree = Datatype('Tree')
>>> # Tree has two constructors: leaf and node
>>> Tree.declare('leaf', ('val', IntSort()))
>>> # a node contains a list of trees
>>> Tree.declare('node', ('children', TreeList))
>>> TreeList.declare('nil')
>>> TreeList.declare('cons', ('car', Tree), ('cdr', TreeList))
>>> Tree, TreeList = CreateDatatypes(Tree, TreeList)
>>> Tree.val(Tree.leaf(10))
val(leaf(10))
>>> simplify(Tree.val(Tree.leaf(10)))
10
>>> n1 = Tree.node(TreeList.cons(Tree.leaf(10), TreeList.cons(Tree.leaf(20), TreeList.nil)))
>>> n1
node(cons(leaf(10), cons(leaf(20), nil)))
>>> n2 = Tree.node(TreeList.cons(n1, TreeList.nil))
>>> simplify(n2 == n1)
False
>>> simplify(TreeList.car(Tree.children(n2)) == n1)
True
"""
ds = _get_args(ds)
if z3_debug():
_z3_assert(len(ds) > 0, "At least one Datatype must be specified")
_z3_assert(all([isinstance(d, Datatype) for d in ds]), "Arguments must be Datatypes")
_z3_assert(all([d.ctx == ds[0].ctx for d in ds]), "Context mismatch")
_z3_assert(all([d.constructors != [] for d in ds]), "Non-empty Datatypes expected")
ctx = ds[0].ctx
num = len(ds)
names = (Symbol * num)()
out = (Sort * num)()
clists = (ConstructorList * num)()
to_delete = []
for i in range(num):
d = ds[i]
names[i] = to_symbol(d.name, ctx)
num_cs = len(d.constructors)
cs = (Constructor * num_cs)()
for j in range(num_cs):
c = d.constructors[j]
cname = to_symbol(c[0], ctx)
rname = to_symbol(c[1], ctx)
fs = c[2]
num_fs = len(fs)
fnames = (Symbol * num_fs)()
sorts = (Sort * num_fs)()
refs = (ctypes.c_uint * num_fs)()
for k in range(num_fs):
fname = fs[k][0]
ftype = fs[k][1]
fnames[k] = to_symbol(fname, ctx)
if isinstance(ftype, Datatype):
if z3_debug():
_z3_assert(
ds.count(ftype) == 1,
"One and only one occurrence of each datatype is expected",
)
sorts[k] = None
refs[k] = ds.index(ftype)
else:
if z3_debug():
_z3_assert(is_sort(ftype), "Z3 sort expected")
sorts[k] = ftype.ast
refs[k] = 0
cs[j] = Z3_mk_constructor(ctx.ref(), cname, rname, num_fs, fnames, sorts, refs)
to_delete.append(ScopedConstructor(cs[j], ctx))
clists[i] = Z3_mk_constructor_list(ctx.ref(), num_cs, cs)
to_delete.append(ScopedConstructorList(clists[i], ctx))
Z3_mk_datatypes(ctx.ref(), num, names, out, clists)
result = []
# Create a field for every constructor, recognizer and accessor
for i in range(num):
dref = DatatypeSortRef(out[i], ctx)
num_cs = dref.num_constructors()
for j in range(num_cs):
cref = dref.constructor(j)
cref_name = cref.name()
cref_arity = cref.arity()
if cref.arity() == 0:
cref = cref()
setattr(dref, cref_name, cref)
rref = dref.recognizer(j)
setattr(dref, "is_" + cref_name, rref)
for k in range(cref_arity):
aref = dref.accessor(j, k)
setattr(dref, aref.name(), aref)
result.append(dref)
return tuple(result)
class DatatypeSortRef(SortRef):
"""Datatype sorts."""
def num_constructors(self):
"""Return the number of constructors in the given Z3 datatype.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> # List is now a Z3 declaration
>>> List.num_constructors()
2
"""
return int(Z3_get_datatype_sort_num_constructors(self.ctx_ref(), self.ast))
def constructor(self, idx):
"""Return a constructor of the datatype `self`.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> # List is now a Z3 declaration
>>> List.num_constructors()
2
>>> List.constructor(0)
cons
>>> List.constructor(1)
nil
"""
if z3_debug():
_z3_assert(idx < self.num_constructors(), "Invalid constructor index")
return FuncDeclRef(Z3_get_datatype_sort_constructor(self.ctx_ref(), self.ast, idx), self.ctx)
def recognizer(self, idx):
"""In Z3, each constructor has an associated recognizer predicate.
If the constructor is named `name`, then the recognizer `is_name`.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> # List is now a Z3 declaration
>>> List.num_constructors()
2
>>> List.recognizer(0)
is(cons)
>>> List.recognizer(1)
is(nil)
>>> simplify(List.is_nil(List.cons(10, List.nil)))
False
>>> simplify(List.is_cons(List.cons(10, List.nil)))
True
>>> l = Const('l', List)
>>> simplify(List.is_cons(l))
is(cons, l)
"""
if z3_debug():
_z3_assert(idx < self.num_constructors(), "Invalid recognizer index")
return FuncDeclRef(Z3_get_datatype_sort_recognizer(self.ctx_ref(), self.ast, idx), self.ctx)
def accessor(self, i, j):
"""In Z3, each constructor has 0 or more accessor.
The number of accessors is equal to the arity of the constructor.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> List.num_constructors()
2
>>> List.constructor(0)
cons
>>> num_accs = List.constructor(0).arity()
>>> num_accs
2
>>> List.accessor(0, 0)
car
>>> List.accessor(0, 1)
cdr
>>> List.constructor(1)
nil
>>> num_accs = List.constructor(1).arity()
>>> num_accs
0
"""
if z3_debug():
_z3_assert(i < self.num_constructors(), "Invalid constructor index")
_z3_assert(j < self.constructor(i).arity(), "Invalid accessor index")
return FuncDeclRef(
Z3_get_datatype_sort_constructor_accessor(self.ctx_ref(), self.ast, i, j),
ctx=self.ctx,
)
class DatatypeRef(ExprRef):
"""Datatype expressions."""
def sort(self):
"""Return the datatype sort of the datatype expression `self`."""
return DatatypeSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def TupleSort(name, sorts, ctx=None):
"""Create a named tuple sort base on a set of underlying sorts
Example:
>>> pair, mk_pair, (first, second) = TupleSort("pair", [IntSort(), StringSort()])
"""
tuple = Datatype(name, ctx)
projects = [("project%d" % i, sorts[i]) for i in range(len(sorts))]
tuple.declare(name, *projects)
tuple = tuple.create()
return tuple, tuple.constructor(0), [tuple.accessor(0, i) for i in range(len(sorts))]
def DisjointSum(name, sorts, ctx=None):
"""Create a named tagged union sort base on a set of underlying sorts
Example:
>>> sum, ((inject0, extract0), (inject1, extract1)) = DisjointSum("+", [IntSort(), StringSort()])
"""
sum = Datatype(name, ctx)
for i in range(len(sorts)):
sum.declare("inject%d" % i, ("project%d" % i, sorts[i]))
sum = sum.create()
return sum, [(sum.constructor(i), sum.accessor(i, 0)) for i in range(len(sorts))]
def EnumSort(name, values, ctx=None):
"""Return a new enumeration sort named `name` containing the given values.
The result is a pair (sort, list of constants).
Example:
>>> Color, (red, green, blue) = EnumSort('Color', ['red', 'green', 'blue'])
"""
if z3_debug():
_z3_assert(isinstance(name, str), "Name must be a string")
_z3_assert(all([isinstance(v, str) for v in values]), "Eumeration sort values must be strings")
_z3_assert(len(values) > 0, "At least one value expected")
ctx = _get_ctx(ctx)
num = len(values)
_val_names = (Symbol * num)()
for i in range(num):
_val_names[i] = to_symbol(values[i])
_values = (FuncDecl * num)()
_testers = (FuncDecl * num)()
name = to_symbol(name)
S = DatatypeSortRef(Z3_mk_enumeration_sort(ctx.ref(), name, num, _val_names, _values, _testers), ctx)
V = []
for i in range(num):
V.append(FuncDeclRef(_values[i], ctx))
V = [a() for a in V]
return S, V
#########################################
#
# Parameter Sets
#
#########################################
class ParamsRef:
"""Set of parameters used to configure Solvers, Tactics and Simplifiers in Z3.
Consider using the function `args2params` to create instances of this object.
"""
def __init__(self, ctx=None, params=None):
self.ctx = _get_ctx(ctx)
if params is None:
self.params = Z3_mk_params(self.ctx.ref())
else:
self.params = params
Z3_params_inc_ref(self.ctx.ref(), self.params)
def __deepcopy__(self, memo={}):
return ParamsRef(self.ctx, self.params)
def __del__(self):
if self.ctx.ref() is not None:
Z3_params_dec_ref(self.ctx.ref(), self.params)
def set(self, name, val):
"""Set parameter name with value val."""
if z3_debug():
_z3_assert(isinstance(name, str), "parameter name must be a string")
name_sym = to_symbol(name, self.ctx)
if isinstance(val, bool):
Z3_params_set_bool(self.ctx.ref(), self.params, name_sym, val)
elif _is_int(val):
Z3_params_set_uint(self.ctx.ref(), self.params, name_sym, val)
elif isinstance(val, float):
Z3_params_set_double(self.ctx.ref(), self.params, name_sym, val)
elif isinstance(val, str):
Z3_params_set_symbol(self.ctx.ref(), self.params, name_sym, to_symbol(val, self.ctx))
else:
if z3_debug():
_z3_assert(False, "invalid parameter value")
def __repr__(self):
return Z3_params_to_string(self.ctx.ref(), self.params)
def validate(self, ds):
_z3_assert(isinstance(ds, ParamDescrsRef), "parameter description set expected")
Z3_params_validate(self.ctx.ref(), self.params, ds.descr)
def args2params(arguments, keywords, ctx=None):
"""Convert python arguments into a Z3_params object.
A ':' is added to the keywords, and '_' is replaced with '-'
>>> args2params(['model', True, 'relevancy', 2], {'elim_and' : True})
(params model true relevancy 2 elim_and true)
"""
if z3_debug():
_z3_assert(len(arguments) % 2 == 0, "Argument list must have an even number of elements.")
prev = None
r = ParamsRef(ctx)
for a in arguments:
if prev is None:
prev = a
else:
r.set(prev, a)
prev = None
for k in keywords:
v = keywords[k]
r.set(k, v)
return r
class ParamDescrsRef:
"""Set of parameter descriptions for Solvers, Tactics and Simplifiers in Z3.
"""
def __init__(self, descr, ctx=None):
_z3_assert(isinstance(descr, ParamDescrs), "parameter description object expected")
self.ctx = _get_ctx(ctx)
self.descr = descr
Z3_param_descrs_inc_ref(self.ctx.ref(), self.descr)
def __deepcopy__(self, memo={}):
return ParamsDescrsRef(self.descr, self.ctx)
def __del__(self):
if self.ctx.ref() is not None:
Z3_param_descrs_dec_ref(self.ctx.ref(), self.descr)
def size(self):
"""Return the size of in the parameter description `self`.
"""
return int(Z3_param_descrs_size(self.ctx.ref(), self.descr))
def __len__(self):
"""Return the size of in the parameter description `self`.
"""
return self.size()
def get_name(self, i):
"""Return the i-th parameter name in the parameter description `self`.
"""
return _symbol2py(self.ctx, Z3_param_descrs_get_name(self.ctx.ref(), self.descr, i))
def get_kind(self, n):
"""Return the kind of the parameter named `n`.
"""
return Z3_param_descrs_get_kind(self.ctx.ref(), self.descr, to_symbol(n, self.ctx))
def get_documentation(self, n):
"""Return the documentation string of the parameter named `n`.
"""
return Z3_param_descrs_get_documentation(self.ctx.ref(), self.descr, to_symbol(n, self.ctx))
def __getitem__(self, arg):
if _is_int(arg):
return self.get_name(arg)
else:
return self.get_kind(arg)
def __repr__(self):
return Z3_param_descrs_to_string(self.ctx.ref(), self.descr)
#########################################
#
# Goals
#
#########################################
class Goal(Z3PPObject):
"""Goal is a collection of constraints we want to find a solution or show to be unsatisfiable (infeasible).
Goals are processed using Tactics. A Tactic transforms a goal into a set of subgoals.
A goal has a solution if one of its subgoals has a solution.
A goal is unsatisfiable if all subgoals are unsatisfiable.
"""
def __init__(self, models=True, unsat_cores=False, proofs=False, ctx=None, goal=None):
if z3_debug():
_z3_assert(goal is None or ctx is not None,
"If goal is different from None, then ctx must be also different from None")
self.ctx = _get_ctx(ctx)
self.goal = goal
if self.goal is None:
self.goal = Z3_mk_goal(self.ctx.ref(), models, unsat_cores, proofs)
Z3_goal_inc_ref(self.ctx.ref(), self.goal)
def __del__(self):
if self.goal is not None and self.ctx.ref() is not None:
Z3_goal_dec_ref(self.ctx.ref(), self.goal)
def depth(self):
"""Return the depth of the goal `self`.
The depth corresponds to the number of tactics applied to `self`.
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.add(x == 0, y >= x + 1)
>>> g.depth()
0
>>> r = Then('simplify', 'solve-eqs')(g)
>>> # r has 1 subgoal
>>> len(r)
1
>>> r[0].depth()
2
"""
return int(Z3_goal_depth(self.ctx.ref(), self.goal))
def inconsistent(self):
"""Return `True` if `self` contains the `False` constraints.
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.inconsistent()
False
>>> g.add(x == 0, x == 1)
>>> g
[x == 0, x == 1]
>>> g.inconsistent()
False
>>> g2 = Tactic('propagate-values')(g)[0]
>>> g2.inconsistent()
True
"""
return Z3_goal_inconsistent(self.ctx.ref(), self.goal)
def prec(self):
"""Return the precision (under-approximation, over-approximation, or precise) of the goal `self`.
>>> g = Goal()
>>> g.prec() == Z3_GOAL_PRECISE
True
>>> x, y = Ints('x y')
>>> g.add(x == y + 1)
>>> g.prec() == Z3_GOAL_PRECISE
True
>>> t = With(Tactic('add-bounds'), add_bound_lower=0, add_bound_upper=10)
>>> g2 = t(g)[0]
>>> g2
[x == y + 1, x <= 10, x >= 0, y <= 10, y >= 0]
>>> g2.prec() == Z3_GOAL_PRECISE
False
>>> g2.prec() == Z3_GOAL_UNDER
True
"""
return Z3_goal_precision(self.ctx.ref(), self.goal)
def precision(self):
"""Alias for `prec()`.
>>> g = Goal()
>>> g.precision() == Z3_GOAL_PRECISE
True
"""
return self.prec()
def size(self):
"""Return the number of constraints in the goal `self`.
>>> g = Goal()
>>> g.size()
0
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> g.size()
2
"""
return int(Z3_goal_size(self.ctx.ref(), self.goal))
def __len__(self):
"""Return the number of constraints in the goal `self`.
>>> g = Goal()
>>> len(g)
0
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> len(g)
2
"""
return self.size()
def get(self, i):
"""Return a constraint in the goal `self`.
>>> g = Goal()
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> g.get(0)
x == 0
>>> g.get(1)
y > x
"""
return _to_expr_ref(Z3_goal_formula(self.ctx.ref(), self.goal, i), self.ctx)
def __getitem__(self, arg):
"""Return a constraint in the goal `self`.
>>> g = Goal()
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> g[0]
x == 0
>>> g[1]
y > x
"""
if arg >= len(self):
raise IndexError
return self.get(arg)
def assert_exprs(self, *args):
"""Assert constraints into the goal.
>>> x = Int('x')
>>> g = Goal()
>>> g.assert_exprs(x > 0, x < 2)
>>> g
[x > 0, x < 2]
"""
args = _get_args(args)
s = BoolSort(self.ctx)
for arg in args:
arg = s.cast(arg)
Z3_goal_assert(self.ctx.ref(), self.goal, arg.as_ast())
def append(self, *args):
"""Add constraints.
>>> x = Int('x')
>>> g = Goal()
>>> g.append(x > 0, x < 2)
>>> g
[x > 0, x < 2]
"""
self.assert_exprs(*args)
def insert(self, *args):
"""Add constraints.
>>> x = Int('x')
>>> g = Goal()
>>> g.insert(x > 0, x < 2)
>>> g
[x > 0, x < 2]
"""
self.assert_exprs(*args)
def add(self, *args):
"""Add constraints.
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0, x < 2)
>>> g
[x > 0, x < 2]
"""
self.assert_exprs(*args)
def convert_model(self, model):
"""Retrieve model from a satisfiable goal
>>> a, b = Ints('a b')
>>> g = Goal()
>>> g.add(Or(a == 0, a == 1), Or(b == 0, b == 1), a > b)
>>> t = Then(Tactic('split-clause'), Tactic('solve-eqs'))
>>> r = t(g)
>>> r[0]
[Or(b == 0, b == 1), Not(0 <= b)]
>>> r[1]
[Or(b == 0, b == 1), Not(1 <= b)]
>>> # Remark: the subgoal r[0] is unsatisfiable
>>> # Creating a solver for solving the second subgoal
>>> s = Solver()
>>> s.add(r[1])
>>> s.check()
sat
>>> s.model()
[b = 0]
>>> # Model s.model() does not assign a value to `a`
>>> # It is a model for subgoal `r[1]`, but not for goal `g`
>>> # The method convert_model creates a model for `g` from a model for `r[1]`.
>>> r[1].convert_model(s.model())
[b = 0, a = 1]
"""
if z3_debug():
_z3_assert(isinstance(model, ModelRef), "Z3 Model expected")
return ModelRef(Z3_goal_convert_model(self.ctx.ref(), self.goal, model.model), self.ctx)
def __repr__(self):
return obj_to_string(self)
def sexpr(self):
"""Return a textual representation of the s-expression representing the goal."""
return Z3_goal_to_string(self.ctx.ref(), self.goal)
def dimacs(self, include_names=True):
"""Return a textual representation of the goal in DIMACS format."""
return Z3_goal_to_dimacs_string(self.ctx.ref(), self.goal, include_names)
def translate(self, target):
"""Copy goal `self` to context `target`.
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 10)
>>> g
[x > 10]
>>> c2 = Context()
>>> g2 = g.translate(c2)
>>> g2
[x > 10]
>>> g.ctx == main_ctx()
True
>>> g2.ctx == c2
True
>>> g2.ctx == main_ctx()
False
"""
if z3_debug():
_z3_assert(isinstance(target, Context), "target must be a context")
return Goal(goal=Z3_goal_translate(self.ctx.ref(), self.goal, target.ref()), ctx=target)
def __copy__(self):
return self.translate(self.ctx)
def __deepcopy__(self, memo={}):
return self.translate(self.ctx)
def simplify(self, *arguments, **keywords):
"""Return a new simplified goal.
This method is essentially invoking the simplify tactic.
>>> g = Goal()
>>> x = Int('x')
>>> g.add(x + 1 >= 2)
>>> g
[x + 1 >= 2]
>>> g2 = g.simplify()
>>> g2
[x >= 1]
>>> # g was not modified
>>> g
[x + 1 >= 2]
"""
t = Tactic("simplify")
return t.apply(self, *arguments, **keywords)[0]
def as_expr(self):
"""Return goal `self` as a single Z3 expression.
>>> x = Int('x')
>>> g = Goal()
>>> g.as_expr()
True
>>> g.add(x > 1)
>>> g.as_expr()
x > 1
>>> g.add(x < 10)
>>> g.as_expr()
And(x > 1, x < 10)
"""
sz = len(self)
if sz == 0:
return BoolVal(True, self.ctx)
elif sz == 1:
return self.get(0)
else:
return And([self.get(i) for i in range(len(self))], self.ctx)
#########################################
#
# AST Vector
#
#########################################
class AstVector(Z3PPObject):
"""A collection (vector) of ASTs."""
def __init__(self, v=None, ctx=None):
self.vector = None
if v is None:
self.ctx = _get_ctx(ctx)
self.vector = Z3_mk_ast_vector(self.ctx.ref())
else:
self.vector = v
assert ctx is not None
self.ctx = ctx
Z3_ast_vector_inc_ref(self.ctx.ref(), self.vector)
def __del__(self):
if self.vector is not None and self.ctx.ref() is not None:
Z3_ast_vector_dec_ref(self.ctx.ref(), self.vector)
def __len__(self):
"""Return the size of the vector `self`.
>>> A = AstVector()
>>> len(A)
0
>>> A.push(Int('x'))
>>> A.push(Int('x'))
>>> len(A)
2
"""
return int(Z3_ast_vector_size(self.ctx.ref(), self.vector))
def __getitem__(self, i):
"""Return the AST at position `i`.
>>> A = AstVector()
>>> A.push(Int('x') + 1)
>>> A.push(Int('y'))
>>> A[0]
x + 1
>>> A[1]
y
"""
if isinstance(i, int):
if i < 0:
i += self.__len__()
if i >= self.__len__():
raise IndexError
return _to_ast_ref(Z3_ast_vector_get(self.ctx.ref(), self.vector, i), self.ctx)
elif isinstance(i, slice):
result = []
for ii in range(*i.indices(self.__len__())):
result.append(_to_ast_ref(
Z3_ast_vector_get(self.ctx.ref(), self.vector, ii),
self.ctx,
))
return result
def __setitem__(self, i, v):
"""Update AST at position `i`.
>>> A = AstVector()
>>> A.push(Int('x') + 1)
>>> A.push(Int('y'))
>>> A[0]
x + 1
>>> A[0] = Int('x')
>>> A[0]
x
"""
if i >= self.__len__():
raise IndexError
Z3_ast_vector_set(self.ctx.ref(), self.vector, i, v.as_ast())
def push(self, v):
"""Add `v` in the end of the vector.
>>> A = AstVector()
>>> len(A)
0
>>> A.push(Int('x'))
>>> len(A)
1
"""
Z3_ast_vector_push(self.ctx.ref(), self.vector, v.as_ast())
def resize(self, sz):
"""Resize the vector to `sz` elements.
>>> A = AstVector()
>>> A.resize(10)
>>> len(A)
10
>>> for i in range(10): A[i] = Int('x')
>>> A[5]
x
"""
Z3_ast_vector_resize(self.ctx.ref(), self.vector, sz)
def __contains__(self, item):
"""Return `True` if the vector contains `item`.
>>> x = Int('x')
>>> A = AstVector()
>>> x in A
False
>>> A.push(x)
>>> x in A
True
>>> (x+1) in A
False
>>> A.push(x+1)
>>> (x+1) in A
True
>>> A
[x, x + 1]
"""
for elem in self:
if elem.eq(item):
return True
return False
def translate(self, other_ctx):
"""Copy vector `self` to context `other_ctx`.
>>> x = Int('x')
>>> A = AstVector()
>>> A.push(x)
>>> c2 = Context()
>>> B = A.translate(c2)
>>> B
[x]
"""
return AstVector(
Z3_ast_vector_translate(self.ctx.ref(), self.vector, other_ctx.ref()),
ctx=other_ctx,
)
def __copy__(self):
return self.translate(self.ctx)
def __deepcopy__(self, memo={}):
return self.translate(self.ctx)
def __repr__(self):
return obj_to_string(self)
def sexpr(self):
"""Return a textual representation of the s-expression representing the vector."""
return Z3_ast_vector_to_string(self.ctx.ref(), self.vector)
#########################################
#
# AST Map
#
#########################################
class AstMap:
"""A mapping from ASTs to ASTs."""
def __init__(self, m=None, ctx=None):
self.map = None
if m is None:
self.ctx = _get_ctx(ctx)
self.map = Z3_mk_ast_map(self.ctx.ref())
else:
self.map = m
assert ctx is not None
self.ctx = ctx
Z3_ast_map_inc_ref(self.ctx.ref(), self.map)
def __deepcopy__(self, memo={}):
return AstMap(self.map, self.ctx)
def __del__(self):
if self.map is not None and self.ctx.ref() is not None:
Z3_ast_map_dec_ref(self.ctx.ref(), self.map)
def __len__(self):
"""Return the size of the map.
>>> M = AstMap()
>>> len(M)
0
>>> x = Int('x')
>>> M[x] = IntVal(1)
>>> len(M)
1
"""
return int(Z3_ast_map_size(self.ctx.ref(), self.map))
def __contains__(self, key):
"""Return `True` if the map contains key `key`.
>>> M = AstMap()
>>> x = Int('x')
>>> M[x] = x + 1
>>> x in M
True
>>> x+1 in M
False
"""
return Z3_ast_map_contains(self.ctx.ref(), self.map, key.as_ast())
def __getitem__(self, key):
"""Retrieve the value associated with key `key`.
>>> M = AstMap()
>>> x = Int('x')
>>> M[x] = x + 1
>>> M[x]
x + 1
"""
return _to_ast_ref(Z3_ast_map_find(self.ctx.ref(), self.map, key.as_ast()), self.ctx)
def __setitem__(self, k, v):
"""Add/Update key `k` with value `v`.
>>> M = AstMap()
>>> x = Int('x')
>>> M[x] = x + 1
>>> len(M)
1
>>> M[x]
x + 1
>>> M[x] = IntVal(1)
>>> M[x]
1
"""
Z3_ast_map_insert(self.ctx.ref(), self.map, k.as_ast(), v.as_ast())
def __repr__(self):
return Z3_ast_map_to_string(self.ctx.ref(), self.map)
def erase(self, k):
"""Remove the entry associated with key `k`.
>>> M = AstMap()
>>> x = Int('x')
>>> M[x] = x + 1
>>> len(M)
1
>>> M.erase(x)
>>> len(M)
0
"""
Z3_ast_map_erase(self.ctx.ref(), self.map, k.as_ast())
def reset(self):
"""Remove all entries from the map.
>>> M = AstMap()
>>> x = Int('x')
>>> M[x] = x + 1
>>> M[x+x] = IntVal(1)
>>> len(M)
2
>>> M.reset()
>>> len(M)
0
"""
Z3_ast_map_reset(self.ctx.ref(), self.map)
def keys(self):
"""Return an AstVector containing all keys in the map.
>>> M = AstMap()
>>> x = Int('x')
>>> M[x] = x + 1
>>> M[x+x] = IntVal(1)
>>> M.keys()
[x, x + x]
"""
return AstVector(Z3_ast_map_keys(self.ctx.ref(), self.map), self.ctx)
#########################################
#
# Model
#
#########################################
class FuncEntry:
"""Store the value of the interpretation of a function in a particular point."""
def __init__(self, entry, ctx):
self.entry = entry
self.ctx = ctx
Z3_func_entry_inc_ref(self.ctx.ref(), self.entry)
def __deepcopy__(self, memo={}):
return FuncEntry(self.entry, self.ctx)
def __del__(self):
if self.ctx.ref() is not None:
Z3_func_entry_dec_ref(self.ctx.ref(), self.entry)
def num_args(self):
"""Return the number of arguments in the given entry.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0, 1) == 10, f(1, 2) == 20, f(1, 0) == 10)
>>> s.check()
sat
>>> m = s.model()
>>> f_i = m[f]
>>> f_i.num_entries()
1
>>> e = f_i.entry(0)
>>> e.num_args()
2
"""
return int(Z3_func_entry_get_num_args(self.ctx.ref(), self.entry))
def arg_value(self, idx):
"""Return the value of argument `idx`.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0, 1) == 10, f(1, 2) == 20, f(1, 0) == 10)
>>> s.check()
sat
>>> m = s.model()
>>> f_i = m[f]
>>> f_i.num_entries()
1
>>> e = f_i.entry(0)
>>> e
[1, 2, 20]
>>> e.num_args()
2
>>> e.arg_value(0)
1
>>> e.arg_value(1)
2
>>> try:
... e.arg_value(2)
... except IndexError:
... print("index error")
index error
"""
if idx >= self.num_args():
raise IndexError
return _to_expr_ref(Z3_func_entry_get_arg(self.ctx.ref(), self.entry, idx), self.ctx)
def value(self):
"""Return the value of the function at point `self`.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0, 1) == 10, f(1, 2) == 20, f(1, 0) == 10)
>>> s.check()
sat
>>> m = s.model()
>>> f_i = m[f]
>>> f_i.num_entries()
1
>>> e = f_i.entry(0)
>>> e
[1, 2, 20]
>>> e.num_args()
2
>>> e.value()
20
"""
return _to_expr_ref(Z3_func_entry_get_value(self.ctx.ref(), self.entry), self.ctx)
def as_list(self):
"""Return entry `self` as a Python list.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0, 1) == 10, f(1, 2) == 20, f(1, 0) == 10)
>>> s.check()
sat
>>> m = s.model()
>>> f_i = m[f]
>>> f_i.num_entries()
1
>>> e = f_i.entry(0)
>>> e.as_list()
[1, 2, 20]
"""
args = [self.arg_value(i) for i in range(self.num_args())]
args.append(self.value())
return args
def __repr__(self):
return repr(self.as_list())
class FuncInterp(Z3PPObject):
"""Stores the interpretation of a function in a Z3 model."""
def __init__(self, f, ctx):
self.f = f
self.ctx = ctx
if self.f is not None:
Z3_func_interp_inc_ref(self.ctx.ref(), self.f)
def __del__(self):
if self.f is not None and self.ctx.ref() is not None:
Z3_func_interp_dec_ref(self.ctx.ref(), self.f)
def else_value(self):
"""
Return the `else` value for a function interpretation.
Return None if Z3 did not specify the `else` value for
this object.
>>> f = Function('f', IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0) == 1, f(1) == 1, f(2) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m[f]
[2 -> 0, else -> 1]
>>> m[f].else_value()
1
"""
r = Z3_func_interp_get_else(self.ctx.ref(), self.f)
if r:
return _to_expr_ref(r, self.ctx)
else:
return None
def num_entries(self):
"""Return the number of entries/points in the function interpretation `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0) == 1, f(1) == 1, f(2) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m[f]
[2 -> 0, else -> 1]
>>> m[f].num_entries()
1
"""
return int(Z3_func_interp_get_num_entries(self.ctx.ref(), self.f))
def arity(self):
"""Return the number of arguments for each entry in the function interpretation `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0) == 1, f(1) == 1, f(2) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m[f].arity()
1
"""
return int(Z3_func_interp_get_arity(self.ctx.ref(), self.f))
def entry(self, idx):
"""Return an entry at position `idx < self.num_entries()` in the function interpretation `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0) == 1, f(1) == 1, f(2) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m[f]
[2 -> 0, else -> 1]
>>> m[f].num_entries()
1
>>> m[f].entry(0)
[2, 0]
"""
if idx >= self.num_entries():
raise IndexError
return FuncEntry(Z3_func_interp_get_entry(self.ctx.ref(), self.f, idx), self.ctx)
def translate(self, other_ctx):
"""Copy model 'self' to context 'other_ctx'.
"""
return ModelRef(Z3_model_translate(self.ctx.ref(), self.model, other_ctx.ref()), other_ctx)
def __copy__(self):
return self.translate(self.ctx)
def __deepcopy__(self, memo={}):
return self.translate(self.ctx)
def as_list(self):
"""Return the function interpretation as a Python list.
>>> f = Function('f', IntSort(), IntSort())
>>> s = Solver()
>>> s.add(f(0) == 1, f(1) == 1, f(2) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m[f]
[2 -> 0, else -> 1]
>>> m[f].as_list()
[[2, 0], 1]
"""
r = [self.entry(i).as_list() for i in range(self.num_entries())]
r.append(self.else_value())
return r
def __repr__(self):
return obj_to_string(self)
class ModelRef(Z3PPObject):
"""Model/Solution of a satisfiability problem (aka system of constraints)."""
def __init__(self, m, ctx):
assert ctx is not None
self.model = m
self.ctx = ctx
Z3_model_inc_ref(self.ctx.ref(), self.model)
def __del__(self):
if self.ctx.ref() is not None:
Z3_model_dec_ref(self.ctx.ref(), self.model)
def __repr__(self):
return obj_to_string(self)
def sexpr(self):
"""Return a textual representation of the s-expression representing the model."""
return Z3_model_to_string(self.ctx.ref(), self.model)
def eval(self, t, model_completion=False):
"""Evaluate the expression `t` in the model `self`.
If `model_completion` is enabled, then a default interpretation is automatically added
for symbols that do not have an interpretation in the model `self`.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, x < 2)
>>> s.check()
sat
>>> m = s.model()
>>> m.eval(x + 1)
2
>>> m.eval(x == 1)
True
>>> y = Int('y')
>>> m.eval(y + x)
1 + y
>>> m.eval(y)
y
>>> m.eval(y, model_completion=True)
0
>>> # Now, m contains an interpretation for y
>>> m.eval(y + x)
1
"""
r = (Ast * 1)()
if Z3_model_eval(self.ctx.ref(), self.model, t.as_ast(), model_completion, r):
return _to_expr_ref(r[0], self.ctx)
raise Z3Exception("failed to evaluate expression in the model")
def evaluate(self, t, model_completion=False):
"""Alias for `eval`.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, x < 2)
>>> s.check()
sat
>>> m = s.model()
>>> m.evaluate(x + 1)
2
>>> m.evaluate(x == 1)
True
>>> y = Int('y')
>>> m.evaluate(y + x)
1 + y
>>> m.evaluate(y)
y
>>> m.evaluate(y, model_completion=True)
0
>>> # Now, m contains an interpretation for y
>>> m.evaluate(y + x)
1
"""
return self.eval(t, model_completion)
def __len__(self):
"""Return the number of constant and function declarations in the model `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, f(x) != x)
>>> s.check()
sat
>>> m = s.model()
>>> len(m)
2
"""
num_consts = int(Z3_model_get_num_consts(self.ctx.ref(), self.model))
num_funcs = int(Z3_model_get_num_funcs(self.ctx.ref(), self.model))
return num_consts + num_funcs
def get_interp(self, decl):
"""Return the interpretation for a given declaration or constant.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, x < 2, f(x) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m[x]
1
>>> m[f]
[else -> 0]
"""
if z3_debug():
_z3_assert(isinstance(decl, FuncDeclRef) or is_const(decl), "Z3 declaration expected")
if is_const(decl):
decl = decl.decl()
try:
if decl.arity() == 0:
_r = Z3_model_get_const_interp(self.ctx.ref(), self.model, decl.ast)
if _r.value is None:
return None
r = _to_expr_ref(_r, self.ctx)
if is_as_array(r):
return self.get_interp(get_as_array_func(r))
else:
return r
else:
return FuncInterp(Z3_model_get_func_interp(self.ctx.ref(), self.model, decl.ast), self.ctx)
except Z3Exception:
return None
def num_sorts(self):
"""Return the number of uninterpreted sorts that contain an interpretation in the model `self`.
>>> A = DeclareSort('A')
>>> a, b = Consts('a b', A)
>>> s = Solver()
>>> s.add(a != b)
>>> s.check()
sat
>>> m = s.model()
>>> m.num_sorts()
1
"""
return int(Z3_model_get_num_sorts(self.ctx.ref(), self.model))
def get_sort(self, idx):
"""Return the uninterpreted sort at position `idx` < self.num_sorts().
>>> A = DeclareSort('A')
>>> B = DeclareSort('B')
>>> a1, a2 = Consts('a1 a2', A)
>>> b1, b2 = Consts('b1 b2', B)
>>> s = Solver()
>>> s.add(a1 != a2, b1 != b2)
>>> s.check()
sat
>>> m = s.model()
>>> m.num_sorts()
2
>>> m.get_sort(0)
A
>>> m.get_sort(1)
B
"""
if idx >= self.num_sorts():
raise IndexError
return _to_sort_ref(Z3_model_get_sort(self.ctx.ref(), self.model, idx), self.ctx)
def sorts(self):
"""Return all uninterpreted sorts that have an interpretation in the model `self`.
>>> A = DeclareSort('A')
>>> B = DeclareSort('B')
>>> a1, a2 = Consts('a1 a2', A)
>>> b1, b2 = Consts('b1 b2', B)
>>> s = Solver()
>>> s.add(a1 != a2, b1 != b2)
>>> s.check()
sat
>>> m = s.model()
>>> m.sorts()
[A, B]
"""
return [self.get_sort(i) for i in range(self.num_sorts())]
def get_universe(self, s):
"""Return the interpretation for the uninterpreted sort `s` in the model `self`.
>>> A = DeclareSort('A')
>>> a, b = Consts('a b', A)
>>> s = Solver()
>>> s.add(a != b)
>>> s.check()
sat
>>> m = s.model()
>>> m.get_universe(A)
[A!val!1, A!val!0]
"""
if z3_debug():
_z3_assert(isinstance(s, SortRef), "Z3 sort expected")
try:
return AstVector(Z3_model_get_sort_universe(self.ctx.ref(), self.model, s.ast), self.ctx)
except Z3Exception:
return None
def __getitem__(self, idx):
"""If `idx` is an integer, then the declaration at position `idx` in the model `self` is returned.
If `idx` is a declaration, then the actual interpretation is returned.
The elements can be retrieved using position or the actual declaration.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, x < 2, f(x) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> len(m)
2
>>> m[0]
x
>>> m[1]
f
>>> m[x]
1
>>> m[f]
[else -> 0]
>>> for d in m: print("%s -> %s" % (d, m[d]))
x -> 1
f -> [else -> 0]
"""
if _is_int(idx):
if idx >= len(self):
raise IndexError
num_consts = Z3_model_get_num_consts(self.ctx.ref(), self.model)
if (idx < num_consts):
return FuncDeclRef(Z3_model_get_const_decl(self.ctx.ref(), self.model, idx), self.ctx)
else:
return FuncDeclRef(Z3_model_get_func_decl(self.ctx.ref(), self.model, idx - num_consts), self.ctx)
if isinstance(idx, FuncDeclRef):
return self.get_interp(idx)
if is_const(idx):
return self.get_interp(idx.decl())
if isinstance(idx, SortRef):
return self.get_universe(idx)
if z3_debug():
_z3_assert(False, "Integer, Z3 declaration, or Z3 constant expected")
return None
def decls(self):
"""Return a list with all symbols that have an interpretation in the model `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, x < 2, f(x) == 0)
>>> s.check()
sat
>>> m = s.model()
>>> m.decls()
[x, f]
"""
r = []
for i in range(Z3_model_get_num_consts(self.ctx.ref(), self.model)):
r.append(FuncDeclRef(Z3_model_get_const_decl(self.ctx.ref(), self.model, i), self.ctx))
for i in range(Z3_model_get_num_funcs(self.ctx.ref(), self.model)):
r.append(FuncDeclRef(Z3_model_get_func_decl(self.ctx.ref(), self.model, i), self.ctx))
return r
def update_value(self, x, value):
"""Update the interpretation of a constant"""
if is_expr(x):
x = x.decl()
if not is_func_decl(x) or x.arity() != 0:
raise Z3Exception("Expecting 0-ary function or constant expression")
value = _py2expr(value)
Z3_add_const_interp(x.ctx_ref(), self.model, x.ast, value.ast)
def translate(self, target):
"""Translate `self` to the context `target`. That is, return a copy of `self` in the context `target`.
"""
if z3_debug():
_z3_assert(isinstance(target, Context), "argument must be a Z3 context")
model = Z3_model_translate(self.ctx.ref(), self.model, target.ref())
return ModelRef(model, target)
def __copy__(self):
return self.translate(self.ctx)
def __deepcopy__(self, memo={}):
return self.translate(self.ctx)
def Model(ctx=None):
ctx = _get_ctx(ctx)
return ModelRef(Z3_mk_model(ctx.ref()), ctx)
def is_as_array(n):
"""Return true if n is a Z3 expression of the form (_ as-array f)."""
return isinstance(n, ExprRef) and Z3_is_as_array(n.ctx.ref(), n.as_ast())
def get_as_array_func(n):
"""Return the function declaration f associated with a Z3 expression of the form (_ as-array f)."""
if z3_debug():
_z3_assert(is_as_array(n), "as-array Z3 expression expected.")
return FuncDeclRef(Z3_get_as_array_func_decl(n.ctx.ref(), n.as_ast()), n.ctx)
#########################################
#
# Statistics
#
#########################################
class Statistics:
"""Statistics for `Solver.check()`."""
def __init__(self, stats, ctx):
self.stats = stats
self.ctx = ctx
Z3_stats_inc_ref(self.ctx.ref(), self.stats)
def __deepcopy__(self, memo={}):
return Statistics(self.stats, self.ctx)
def __del__(self):
if self.ctx.ref() is not None:
Z3_stats_dec_ref(self.ctx.ref(), self.stats)
def __repr__(self):
if in_html_mode():
out = io.StringIO()
even = True
out.write(u('<table border="1" cellpadding="2" cellspacing="0">'))
for k, v in self:
if even:
out.write(u('<tr style="background-color:#CFCFCF">'))
even = False
else:
out.write(u("<tr>"))
even = True
out.write(u("<td>%s</td><td>%s</td></tr>" % (k, v)))
out.write(u("</table>"))
return out.getvalue()
else:
return Z3_stats_to_string(self.ctx.ref(), self.stats)
def __len__(self):
"""Return the number of statistical counters.
>>> x = Int('x')
>>> s = Then('simplify', 'nlsat').solver()
>>> s.add(x > 0)
>>> s.check()
sat
>>> st = s.statistics()
>>> len(st)
6
"""
return int(Z3_stats_size(self.ctx.ref(), self.stats))
def __getitem__(self, idx):
"""Return the value of statistical counter at position `idx`. The result is a pair (key, value).
>>> x = Int('x')
>>> s = Then('simplify', 'nlsat').solver()
>>> s.add(x > 0)
>>> s.check()
sat
>>> st = s.statistics()
>>> len(st)
6
>>> st[0]
('nlsat propagations', 2)
>>> st[1]
('nlsat stages', 2)
"""
if idx >= len(self):
raise IndexError
if Z3_stats_is_uint(self.ctx.ref(), self.stats, idx):
val = int(Z3_stats_get_uint_value(self.ctx.ref(), self.stats, idx))
else:
val = Z3_stats_get_double_value(self.ctx.ref(), self.stats, idx)
return (Z3_stats_get_key(self.ctx.ref(), self.stats, idx), val)
def keys(self):
"""Return the list of statistical counters.
>>> x = Int('x')
>>> s = Then('simplify', 'nlsat').solver()
>>> s.add(x > 0)
>>> s.check()
sat
>>> st = s.statistics()
"""
return [Z3_stats_get_key(self.ctx.ref(), self.stats, idx) for idx in range(len(self))]
def get_key_value(self, key):
"""Return the value of a particular statistical counter.
>>> x = Int('x')
>>> s = Then('simplify', 'nlsat').solver()
>>> s.add(x > 0)
>>> s.check()
sat
>>> st = s.statistics()
>>> st.get_key_value('nlsat propagations')
2
"""
for idx in range(len(self)):
if key == Z3_stats_get_key(self.ctx.ref(), self.stats, idx):
if Z3_stats_is_uint(self.ctx.ref(), self.stats, idx):
return int(Z3_stats_get_uint_value(self.ctx.ref(), self.stats, idx))
else:
return Z3_stats_get_double_value(self.ctx.ref(), self.stats, idx)
raise Z3Exception("unknown key")
def __getattr__(self, name):
"""Access the value of statistical using attributes.
Remark: to access a counter containing blank spaces (e.g., 'nlsat propagations'),
we should use '_' (e.g., 'nlsat_propagations').
>>> x = Int('x')
>>> s = Then('simplify', 'nlsat').solver()
>>> s.add(x > 0)
>>> s.check()
sat
>>> st = s.statistics()
>>> st.nlsat_propagations
2
>>> st.nlsat_stages
2
"""
key = name.replace("_", " ")
try:
return self.get_key_value(key)
except Z3Exception:
raise AttributeError
#########################################
#
# Solver
#
#########################################
class CheckSatResult:
"""Represents the result of a satisfiability check: sat, unsat, unknown.
>>> s = Solver()
>>> s.check()
sat
>>> r = s.check()
>>> isinstance(r, CheckSatResult)
True
"""
def __init__(self, r):
self.r = r
def __deepcopy__(self, memo={}):
return CheckSatResult(self.r)
def __eq__(self, other):
return isinstance(other, CheckSatResult) and self.r == other.r
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if in_html_mode():
if self.r == Z3_L_TRUE:
return "<b>sat</b>"
elif self.r == Z3_L_FALSE:
return "<b>unsat</b>"
else:
return "<b>unknown</b>"
else:
if self.r == Z3_L_TRUE:
return "sat"
elif self.r == Z3_L_FALSE:
return "unsat"
else:
return "unknown"
def _repr_html_(self):
in_html = in_html_mode()
set_html_mode(True)
res = repr(self)
set_html_mode(in_html)
return res
sat = CheckSatResult(Z3_L_TRUE)
unsat = CheckSatResult(Z3_L_FALSE)
unknown = CheckSatResult(Z3_L_UNDEF)
class Solver(Z3PPObject):
"""
Solver API provides methods for implementing the main SMT 2.0 commands:
push, pop, check, get-model, etc.
"""
def __init__(self, solver=None, ctx=None, logFile=None):
assert solver is None or ctx is not None
self.ctx = _get_ctx(ctx)
self.backtrack_level = 4000000000
self.solver = None
if solver is None:
self.solver = Z3_mk_solver(self.ctx.ref())
else:
self.solver = solver
Z3_solver_inc_ref(self.ctx.ref(), self.solver)
if logFile is not None:
self.set("smtlib2_log", logFile)
def __del__(self):
if self.solver is not None and self.ctx.ref() is not None:
Z3_solver_dec_ref(self.ctx.ref(), self.solver)
def set(self, *args, **keys):
"""Set a configuration option.
The method `help()` return a string containing all available options.
>>> s = Solver()
>>> # The option MBQI can be set using three different approaches.
>>> s.set(mbqi=True)
>>> s.set('MBQI', True)
>>> s.set(':mbqi', True)
"""
p = args2params(args, keys, self.ctx)
Z3_solver_set_params(self.ctx.ref(), self.solver, p.params)
def push(self):
"""Create a backtracking point.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0)
>>> s
[x > 0]
>>> s.push()
>>> s.add(x < 1)
>>> s
[x > 0, x < 1]
>>> s.check()
unsat
>>> s.pop()
>>> s.check()
sat
>>> s
[x > 0]
"""
Z3_solver_push(self.ctx.ref(), self.solver)
def pop(self, num=1):
"""Backtrack \\c num backtracking points.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0)
>>> s
[x > 0]
>>> s.push()
>>> s.add(x < 1)
>>> s
[x > 0, x < 1]
>>> s.check()
unsat
>>> s.pop()
>>> s.check()
sat
>>> s
[x > 0]
"""
Z3_solver_pop(self.ctx.ref(), self.solver, num)
def num_scopes(self):
"""Return the current number of backtracking points.
>>> s = Solver()
>>> s.num_scopes()
0
>>> s.push()
>>> s.num_scopes()
1
>>> s.push()
>>> s.num_scopes()
2
>>> s.pop()
>>> s.num_scopes()
1
"""
return Z3_solver_get_num_scopes(self.ctx.ref(), self.solver)
def reset(self):
"""Remove all asserted constraints and backtracking points created using `push()`.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0)
>>> s
[x > 0]
>>> s.reset()
>>> s
[]
"""
Z3_solver_reset(self.ctx.ref(), self.solver)
def assert_exprs(self, *args):
"""Assert constraints into the solver.
>>> x = Int('x')
>>> s = Solver()
>>> s.assert_exprs(x > 0, x < 2)
>>> s
[x > 0, x < 2]
"""
args = _get_args(args)
s = BoolSort(self.ctx)
for arg in args:
if isinstance(arg, Goal) or isinstance(arg, AstVector):
for f in arg:
Z3_solver_assert(self.ctx.ref(), self.solver, f.as_ast())
else:
arg = s.cast(arg)
Z3_solver_assert(self.ctx.ref(), self.solver, arg.as_ast())
def add(self, *args):
"""Assert constraints into the solver.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0, x < 2)
>>> s
[x > 0, x < 2]
"""
self.assert_exprs(*args)
def __iadd__(self, fml):
self.add(fml)
return self
def append(self, *args):
"""Assert constraints into the solver.
>>> x = Int('x')
>>> s = Solver()
>>> s.append(x > 0, x < 2)
>>> s
[x > 0, x < 2]
"""
self.assert_exprs(*args)
def insert(self, *args):
"""Assert constraints into the solver.
>>> x = Int('x')
>>> s = Solver()
>>> s.insert(x > 0, x < 2)
>>> s
[x > 0, x < 2]
"""
self.assert_exprs(*args)
def assert_and_track(self, a, p):
"""Assert constraint `a` and track it in the unsat core using the Boolean constant `p`.
If `p` is a string, it will be automatically converted into a Boolean constant.
>>> x = Int('x')
>>> p3 = Bool('p3')
>>> s = Solver()
>>> s.set(unsat_core=True)
>>> s.assert_and_track(x > 0, 'p1')
>>> s.assert_and_track(x != 1, 'p2')
>>> s.assert_and_track(x < 0, p3)
>>> print(s.check())
unsat
>>> c = s.unsat_core()
>>> len(c)
2
>>> Bool('p1') in c
True
>>> Bool('p2') in c
False
>>> p3 in c
True
"""
if isinstance(p, str):
p = Bool(p, self.ctx)
_z3_assert(isinstance(a, BoolRef), "Boolean expression expected")
_z3_assert(isinstance(p, BoolRef) and is_const(p), "Boolean expression expected")
Z3_solver_assert_and_track(self.ctx.ref(), self.solver, a.as_ast(), p.as_ast())
def check(self, *assumptions):
"""Check whether the assertions in the given solver plus the optional assumptions are consistent or not.
>>> x = Int('x')
>>> s = Solver()
>>> s.check()
sat
>>> s.add(x > 0, x < 2)
>>> s.check()
sat
>>> s.model().eval(x)
1
>>> s.add(x < 1)
>>> s.check()
unsat
>>> s.reset()
>>> s.add(2**x == 4)
>>> s.check()
unknown
"""
s = BoolSort(self.ctx)
assumptions = _get_args(assumptions)
num = len(assumptions)
_assumptions = (Ast * num)()
for i in range(num):
_assumptions[i] = s.cast(assumptions[i]).as_ast()
r = Z3_solver_check_assumptions(self.ctx.ref(), self.solver, num, _assumptions)
return CheckSatResult(r)
def model(self):
"""Return a model for the last `check()`.
This function raises an exception if
a model is not available (e.g., last `check()` returned unsat).
>>> s = Solver()
>>> a = Int('a')
>>> s.add(a + 2 == 0)
>>> s.check()
sat
>>> s.model()
[a = -2]
"""
try:
return ModelRef(Z3_solver_get_model(self.ctx.ref(), self.solver), self.ctx)
except Z3Exception:
raise Z3Exception("model is not available")
def import_model_converter(self, other):
"""Import model converter from other into the current solver"""
Z3_solver_import_model_converter(self.ctx.ref(), other.solver, self.solver)
def unsat_core(self):
"""Return a subset (as an AST vector) of the assumptions provided to the last check().
These are the assumptions Z3 used in the unsatisfiability proof.
Assumptions are available in Z3. They are used to extract unsatisfiable cores.
They may be also used to "retract" assumptions. Note that, assumptions are not really
"soft constraints", but they can be used to implement them.
>>> p1, p2, p3 = Bools('p1 p2 p3')
>>> x, y = Ints('x y')
>>> s = Solver()
>>> s.add(Implies(p1, x > 0))
>>> s.add(Implies(p2, y > x))
>>> s.add(Implies(p2, y < 1))
>>> s.add(Implies(p3, y > -3))
>>> s.check(p1, p2, p3)
unsat
>>> core = s.unsat_core()
>>> len(core)
2
>>> p1 in core
True
>>> p2 in core
True
>>> p3 in core
False
>>> # "Retracting" p2
>>> s.check(p1, p3)
sat
"""
return AstVector(Z3_solver_get_unsat_core(self.ctx.ref(), self.solver), self.ctx)
def consequences(self, assumptions, variables):
"""Determine fixed values for the variables based on the solver state and assumptions.
>>> s = Solver()
>>> a, b, c, d = Bools('a b c d')
>>> s.add(Implies(a,b), Implies(b, c))
>>> s.consequences([a],[b,c,d])
(sat, [Implies(a, b), Implies(a, c)])
>>> s.consequences([Not(c),d],[a,b,c,d])
(sat, [Implies(d, d), Implies(Not(c), Not(c)), Implies(Not(c), Not(b)), Implies(Not(c), Not(a))])
"""
if isinstance(assumptions, list):
_asms = AstVector(None, self.ctx)
for a in assumptions:
_asms.push(a)
assumptions = _asms
if isinstance(variables, list):
_vars = AstVector(None, self.ctx)
for a in variables:
_vars.push(a)
variables = _vars
_z3_assert(isinstance(assumptions, AstVector), "ast vector expected")
_z3_assert(isinstance(variables, AstVector), "ast vector expected")
consequences = AstVector(None, self.ctx)
r = Z3_solver_get_consequences(self.ctx.ref(), self.solver, assumptions.vector,
variables.vector, consequences.vector)
sz = len(consequences)
consequences = [consequences[i] for i in range(sz)]
return CheckSatResult(r), consequences
def from_file(self, filename):
"""Parse assertions from a file"""
Z3_solver_from_file(self.ctx.ref(), self.solver, filename)
def from_string(self, s):
"""Parse assertions from a string"""
Z3_solver_from_string(self.ctx.ref(), self.solver, s)
def cube(self, vars=None):
"""Get set of cubes
The method takes an optional set of variables that restrict which
variables may be used as a starting point for cubing.
If vars is not None, then the first case split is based on a variable in
this set.
"""
self.cube_vs = AstVector(None, self.ctx)
if vars is not None:
for v in vars:
self.cube_vs.push(v)
while True:
lvl = self.backtrack_level
self.backtrack_level = 4000000000
r = AstVector(Z3_solver_cube(self.ctx.ref(), self.solver, self.cube_vs.vector, lvl), self.ctx)
if (len(r) == 1 and is_false(r[0])):
return
yield r
if (len(r) == 0):
return
def cube_vars(self):
"""Access the set of variables that were touched by the most recently generated cube.
This set of variables can be used as a starting point for additional cubes.
The idea is that variables that appear in clauses that are reduced by the most recent
cube are likely more useful to cube on."""
return self.cube_vs
def proof(self):
"""Return a proof for the last `check()`. Proof construction must be enabled."""
return _to_expr_ref(Z3_solver_get_proof(self.ctx.ref(), self.solver), self.ctx)
def assertions(self):
"""Return an AST vector containing all added constraints.
>>> s = Solver()
>>> s.assertions()
[]
>>> a = Int('a')
>>> s.add(a > 0)
>>> s.add(a < 10)
>>> s.assertions()
[a > 0, a < 10]
"""
return AstVector(Z3_solver_get_assertions(self.ctx.ref(), self.solver), self.ctx)
def units(self):
"""Return an AST vector containing all currently inferred units.
"""
return AstVector(Z3_solver_get_units(self.ctx.ref(), self.solver), self.ctx)
def non_units(self):
"""Return an AST vector containing all atomic formulas in solver state that are not units.
"""
return AstVector(Z3_solver_get_non_units(self.ctx.ref(), self.solver), self.ctx)
def trail_levels(self):
"""Return trail and decision levels of the solver state after a check() call.
"""
trail = self.trail()
levels = (ctypes.c_uint * len(trail))()
Z3_solver_get_levels(self.ctx.ref(), self.solver, trail.vector, len(trail), levels)
return trail, levels
def trail(self):
"""Return trail of the solver state after a check() call.
"""
return AstVector(Z3_solver_get_trail(self.ctx.ref(), self.solver), self.ctx)
def statistics(self):
"""Return statistics for the last `check()`.
>>> s = SimpleSolver()
>>> x = Int('x')
>>> s.add(x > 0)
>>> s.check()
sat
>>> st = s.statistics()
>>> st.get_key_value('final checks')
1
>>> len(st) > 0
True
>>> st[0] != 0
True
"""
return Statistics(Z3_solver_get_statistics(self.ctx.ref(), self.solver), self.ctx)
def reason_unknown(self):
"""Return a string describing why the last `check()` returned `unknown`.
>>> x = Int('x')
>>> s = SimpleSolver()
>>> s.add(2**x == 4)
>>> s.check()
unknown
>>> s.reason_unknown()
'(incomplete (theory arithmetic))'
"""
return Z3_solver_get_reason_unknown(self.ctx.ref(), self.solver)
def help(self):
"""Display a string describing all available options."""
print(Z3_solver_get_help(self.ctx.ref(), self.solver))
def param_descrs(self):
"""Return the parameter description set."""
return ParamDescrsRef(Z3_solver_get_param_descrs(self.ctx.ref(), self.solver), self.ctx)
def __repr__(self):
"""Return a formatted string with all added constraints."""
return obj_to_string(self)
def translate(self, target):
"""Translate `self` to the context `target`. That is, return a copy of `self` in the context `target`.
>>> c1 = Context()
>>> c2 = Context()
>>> s1 = Solver(ctx=c1)
>>> s2 = s1.translate(c2)
"""
if z3_debug():
_z3_assert(isinstance(target, Context), "argument must be a Z3 context")
solver = Z3_solver_translate(self.ctx.ref(), self.solver, target.ref())
return Solver(solver, target)
def __copy__(self):
return self.translate(self.ctx)
def __deepcopy__(self, memo={}):
return self.translate(self.ctx)
def sexpr(self):
"""Return a formatted string (in Lisp-like format) with all added constraints.
We say the string is in s-expression format.
>>> x = Int('x')
>>> s = Solver()
>>> s.add(x > 0)
>>> s.add(x < 2)
>>> r = s.sexpr()
"""
return Z3_solver_to_string(self.ctx.ref(), self.solver)
def dimacs(self, include_names=True):
"""Return a textual representation of the solver in DIMACS format."""
return Z3_solver_to_dimacs_string(self.ctx.ref(), self.solver, include_names)
def to_smt2(self):
"""return SMTLIB2 formatted benchmark for solver's assertions"""
es = self.assertions()
sz = len(es)
sz1 = sz
if sz1 > 0:
sz1 -= 1
v = (Ast * sz1)()
for i in range(sz1):
v[i] = es[i].as_ast()
if sz > 0:
e = es[sz1].as_ast()
else:
e = BoolVal(True, self.ctx).as_ast()
return Z3_benchmark_to_smtlib_string(
self.ctx.ref(), "benchmark generated from python API", "", "unknown", "", sz1, v, e,
)
def SolverFor(logic, ctx=None, logFile=None):
"""Create a solver customized for the given logic.
The parameter `logic` is a string. It should be contains
the name of a SMT-LIB logic.
See http://www.smtlib.org/ for the name of all available logics.
>>> s = SolverFor("QF_LIA")
>>> x = Int('x')
>>> s.add(x > 0)
>>> s.add(x < 2)
>>> s.check()
sat
>>> s.model()
[x = 1]
"""
ctx = _get_ctx(ctx)
logic = to_symbol(logic)
return Solver(Z3_mk_solver_for_logic(ctx.ref(), logic), ctx, logFile)
def SimpleSolver(ctx=None, logFile=None):
"""Return a simple general purpose solver with limited amount of preprocessing.
>>> s = SimpleSolver()
>>> x = Int('x')
>>> s.add(x > 0)
>>> s.check()
sat
"""
ctx = _get_ctx(ctx)
return Solver(Z3_mk_simple_solver(ctx.ref()), ctx, logFile)
#########################################
#
# Fixedpoint
#
#########################################
class Fixedpoint(Z3PPObject):
"""Fixedpoint API provides methods for solving with recursive predicates"""
def __init__(self, fixedpoint=None, ctx=None):
assert fixedpoint is None or ctx is not None
self.ctx = _get_ctx(ctx)
self.fixedpoint = None
if fixedpoint is None:
self.fixedpoint = Z3_mk_fixedpoint(self.ctx.ref())
else:
self.fixedpoint = fixedpoint
Z3_fixedpoint_inc_ref(self.ctx.ref(), self.fixedpoint)
self.vars = []
def __deepcopy__(self, memo={}):
return FixedPoint(self.fixedpoint, self.ctx)
def __del__(self):
if self.fixedpoint is not None and self.ctx.ref() is not None:
Z3_fixedpoint_dec_ref(self.ctx.ref(), self.fixedpoint)
def set(self, *args, **keys):
"""Set a configuration option. The method `help()` return a string containing all available options.
"""
p = args2params(args, keys, self.ctx)
Z3_fixedpoint_set_params(self.ctx.ref(), self.fixedpoint, p.params)
def help(self):
"""Display a string describing all available options."""
print(Z3_fixedpoint_get_help(self.ctx.ref(), self.fixedpoint))
def param_descrs(self):
"""Return the parameter description set."""
return ParamDescrsRef(Z3_fixedpoint_get_param_descrs(self.ctx.ref(), self.fixedpoint), self.ctx)
def assert_exprs(self, *args):
"""Assert constraints as background axioms for the fixedpoint solver."""
args = _get_args(args)
s = BoolSort(self.ctx)
for arg in args:
if isinstance(arg, Goal) or isinstance(arg, AstVector):
for f in arg:
f = self.abstract(f)
Z3_fixedpoint_assert(self.ctx.ref(), self.fixedpoint, f.as_ast())
else:
arg = s.cast(arg)
arg = self.abstract(arg)
Z3_fixedpoint_assert(self.ctx.ref(), self.fixedpoint, arg.as_ast())
def add(self, *args):
"""Assert constraints as background axioms for the fixedpoint solver. Alias for assert_expr."""
self.assert_exprs(*args)
def __iadd__(self, fml):
self.add(fml)
return self
def append(self, *args):
"""Assert constraints as background axioms for the fixedpoint solver. Alias for assert_expr."""
self.assert_exprs(*args)
def insert(self, *args):
"""Assert constraints as background axioms for the fixedpoint solver. Alias for assert_expr."""
self.assert_exprs(*args)
def add_rule(self, head, body=None, name=None):
"""Assert rules defining recursive predicates to the fixedpoint solver.
>>> a = Bool('a')
>>> b = Bool('b')
>>> s = Fixedpoint()
>>> s.register_relation(a.decl())
>>> s.register_relation(b.decl())
>>> s.fact(a)
>>> s.rule(b, a)
>>> s.query(b)
sat
"""
if name is None:
name = ""
name = to_symbol(name, self.ctx)
if body is None:
head = self.abstract(head)
Z3_fixedpoint_add_rule(self.ctx.ref(), self.fixedpoint, head.as_ast(), name)
else:
body = _get_args(body)
f = self.abstract(Implies(And(body, self.ctx), head))
Z3_fixedpoint_add_rule(self.ctx.ref(), self.fixedpoint, f.as_ast(), name)
def rule(self, head, body=None, name=None):
"""Assert rules defining recursive predicates to the fixedpoint solver. Alias for add_rule."""
self.add_rule(head, body, name)
def fact(self, head, name=None):
"""Assert facts defining recursive predicates to the fixedpoint solver. Alias for add_rule."""
self.add_rule(head, None, name)
def query(self, *query):
"""Query the fixedpoint engine whether formula is derivable.
You can also pass an tuple or list of recursive predicates.
"""
query = _get_args(query)
sz = len(query)
if sz >= 1 and isinstance(query[0], FuncDeclRef):
_decls = (FuncDecl * sz)()
i = 0
for q in query:
_decls[i] = q.ast
i = i + 1
r = Z3_fixedpoint_query_relations(self.ctx.ref(), self.fixedpoint, sz, _decls)
else:
if sz == 1:
query = query[0]
else:
query = And(query, self.ctx)
query = self.abstract(query, False)
r = Z3_fixedpoint_query(self.ctx.ref(), self.fixedpoint, query.as_ast())
return CheckSatResult(r)
def query_from_lvl(self, lvl, *query):
"""Query the fixedpoint engine whether formula is derivable starting at the given query level.
"""
query = _get_args(query)
sz = len(query)
if sz >= 1 and isinstance(query[0], FuncDecl):
_z3_assert(False, "unsupported")
else:
if sz == 1:
query = query[0]
else:
query = And(query)
query = self.abstract(query, False)
r = Z3_fixedpoint_query_from_lvl(self.ctx.ref(), self.fixedpoint, query.as_ast(), lvl)
return CheckSatResult(r)
def update_rule(self, head, body, name):
"""update rule"""
if name is None:
name = ""
name = to_symbol(name, self.ctx)
body = _get_args(body)
f = self.abstract(Implies(And(body, self.ctx), head))
Z3_fixedpoint_update_rule(self.ctx.ref(), self.fixedpoint, f.as_ast(), name)
def get_answer(self):
"""Retrieve answer from last query call."""
r = Z3_fixedpoint_get_answer(self.ctx.ref(), self.fixedpoint)
return _to_expr_ref(r, self.ctx)
def get_ground_sat_answer(self):
"""Retrieve a ground cex from last query call."""
r = Z3_fixedpoint_get_ground_sat_answer(self.ctx.ref(), self.fixedpoint)
return _to_expr_ref(r, self.ctx)
def get_rules_along_trace(self):
"""retrieve rules along the counterexample trace"""
return AstVector(Z3_fixedpoint_get_rules_along_trace(self.ctx.ref(), self.fixedpoint), self.ctx)
def get_rule_names_along_trace(self):
"""retrieve rule names along the counterexample trace"""
# this is a hack as I don't know how to return a list of symbols from C++;
# obtain names as a single string separated by semicolons
names = _symbol2py(self.ctx, Z3_fixedpoint_get_rule_names_along_trace(self.ctx.ref(), self.fixedpoint))
# split into individual names
return names.split(";")
def get_num_levels(self, predicate):
"""Retrieve number of levels used for predicate in PDR engine"""
return Z3_fixedpoint_get_num_levels(self.ctx.ref(), self.fixedpoint, predicate.ast)
def get_cover_delta(self, level, predicate):
"""Retrieve properties known about predicate for the level'th unfolding.
-1 is treated as the limit (infinity)
"""
r = Z3_fixedpoint_get_cover_delta(self.ctx.ref(), self.fixedpoint, level, predicate.ast)
return _to_expr_ref(r, self.ctx)
def add_cover(self, level, predicate, property):
"""Add property to predicate for the level'th unfolding.
-1 is treated as infinity (infinity)
"""
Z3_fixedpoint_add_cover(self.ctx.ref(), self.fixedpoint, level, predicate.ast, property.ast)
def register_relation(self, *relations):
"""Register relation as recursive"""
relations = _get_args(relations)
for f in relations:
Z3_fixedpoint_register_relation(self.ctx.ref(), self.fixedpoint, f.ast)
def set_predicate_representation(self, f, *representations):
"""Control how relation is represented"""
representations = _get_args(representations)
representations = [to_symbol(s) for s in representations]
sz = len(representations)
args = (Symbol * sz)()
for i in range(sz):
args[i] = representations[i]
Z3_fixedpoint_set_predicate_representation(self.ctx.ref(), self.fixedpoint, f.ast, sz, args)
def parse_string(self, s):
"""Parse rules and queries from a string"""
return AstVector(Z3_fixedpoint_from_string(self.ctx.ref(), self.fixedpoint, s), self.ctx)
def parse_file(self, f):
"""Parse rules and queries from a file"""
return AstVector(Z3_fixedpoint_from_file(self.ctx.ref(), self.fixedpoint, f), self.ctx)
def get_rules(self):
"""retrieve rules that have been added to fixedpoint context"""
return AstVector(Z3_fixedpoint_get_rules(self.ctx.ref(), self.fixedpoint), self.ctx)
def get_assertions(self):
"""retrieve assertions that have been added to fixedpoint context"""
return AstVector(Z3_fixedpoint_get_assertions(self.ctx.ref(), self.fixedpoint), self.ctx)
def __repr__(self):
"""Return a formatted string with all added rules and constraints."""
return self.sexpr()
def sexpr(self):
"""Return a formatted string (in Lisp-like format) with all added constraints.
We say the string is in s-expression format.
"""
return Z3_fixedpoint_to_string(self.ctx.ref(), self.fixedpoint, 0, (Ast * 0)())
def to_string(self, queries):
"""Return a formatted string (in Lisp-like format) with all added constraints.
We say the string is in s-expression format.
Include also queries.
"""
args, len = _to_ast_array(queries)
return Z3_fixedpoint_to_string(self.ctx.ref(), self.fixedpoint, len, args)
def statistics(self):
"""Return statistics for the last `query()`.
"""
return Statistics(Z3_fixedpoint_get_statistics(self.ctx.ref(), self.fixedpoint), self.ctx)
def reason_unknown(self):
"""Return a string describing why the last `query()` returned `unknown`.
"""
return Z3_fixedpoint_get_reason_unknown(self.ctx.ref(), self.fixedpoint)
def declare_var(self, *vars):
"""Add variable or several variables.
The added variable or variables will be bound in the rules
and queries
"""
vars = _get_args(vars)
for v in vars:
self.vars += [v]
def abstract(self, fml, is_forall=True):
if self.vars == []:
return fml
if is_forall:
return ForAll(self.vars, fml)
else:
return Exists(self.vars, fml)
#########################################
#
# Finite domains
#
#########################################
class FiniteDomainSortRef(SortRef):
"""Finite domain sort."""
def size(self):
"""Return the size of the finite domain sort"""
r = (ctypes.c_ulonglong * 1)()
if Z3_get_finite_domain_sort_size(self.ctx_ref(), self.ast, r):
return r[0]
else:
raise Z3Exception("Failed to retrieve finite domain sort size")
def FiniteDomainSort(name, sz, ctx=None):
"""Create a named finite domain sort of a given size sz"""
if not isinstance(name, Symbol):
name = to_symbol(name)
ctx = _get_ctx(ctx)
return FiniteDomainSortRef(Z3_mk_finite_domain_sort(ctx.ref(), name, sz), ctx)
def is_finite_domain_sort(s):
"""Return True if `s` is a Z3 finite-domain sort.
>>> is_finite_domain_sort(FiniteDomainSort('S', 100))
True
>>> is_finite_domain_sort(IntSort())
False
"""
return isinstance(s, FiniteDomainSortRef)
class FiniteDomainRef(ExprRef):
"""Finite-domain expressions."""
def sort(self):
"""Return the sort of the finite-domain expression `self`."""
return FiniteDomainSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def as_string(self):
"""Return a Z3 floating point expression as a Python string."""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def is_finite_domain(a):
"""Return `True` if `a` is a Z3 finite-domain expression.
>>> s = FiniteDomainSort('S', 100)
>>> b = Const('b', s)
>>> is_finite_domain(b)
True
>>> is_finite_domain(Int('x'))
False
"""
return isinstance(a, FiniteDomainRef)
class FiniteDomainNumRef(FiniteDomainRef):
"""Integer values."""
def as_long(self):
"""Return a Z3 finite-domain numeral as a Python long (bignum) numeral.
>>> s = FiniteDomainSort('S', 100)
>>> v = FiniteDomainVal(3, s)
>>> v
3
>>> v.as_long() + 1
4
"""
return int(self.as_string())
def as_string(self):
"""Return a Z3 finite-domain numeral as a Python string.
>>> s = FiniteDomainSort('S', 100)
>>> v = FiniteDomainVal(42, s)
>>> v.as_string()
'42'
"""
return Z3_get_numeral_string(self.ctx_ref(), self.as_ast())
def FiniteDomainVal(val, sort, ctx=None):
"""Return a Z3 finite-domain value. If `ctx=None`, then the global context is used.
>>> s = FiniteDomainSort('S', 256)
>>> FiniteDomainVal(255, s)
255
>>> FiniteDomainVal('100', s)
100
"""
if z3_debug():
_z3_assert(is_finite_domain_sort(sort), "Expected finite-domain sort")
ctx = sort.ctx
return FiniteDomainNumRef(Z3_mk_numeral(ctx.ref(), _to_int_str(val), sort.ast), ctx)
def is_finite_domain_value(a):
"""Return `True` if `a` is a Z3 finite-domain value.
>>> s = FiniteDomainSort('S', 100)
>>> b = Const('b', s)
>>> is_finite_domain_value(b)
False
>>> b = FiniteDomainVal(10, s)
>>> b
10
>>> is_finite_domain_value(b)
True
"""
return is_finite_domain(a) and _is_numeral(a.ctx, a.as_ast())
#########################################
#
# Optimize
#
#########################################
class OptimizeObjective:
def __init__(self, opt, value, is_max):
self._opt = opt
self._value = value
self._is_max = is_max
def lower(self):
opt = self._opt
return _to_expr_ref(Z3_optimize_get_lower(opt.ctx.ref(), opt.optimize, self._value), opt.ctx)
def upper(self):
opt = self._opt
return _to_expr_ref(Z3_optimize_get_upper(opt.ctx.ref(), opt.optimize, self._value), opt.ctx)
def lower_values(self):
opt = self._opt
return AstVector(Z3_optimize_get_lower_as_vector(opt.ctx.ref(), opt.optimize, self._value), opt.ctx)
def upper_values(self):
opt = self._opt
return AstVector(Z3_optimize_get_upper_as_vector(opt.ctx.ref(), opt.optimize, self._value), opt.ctx)
def value(self):
if self._is_max:
return self.upper()
else:
return self.lower()
def __str__(self):
return "%s:%s" % (self._value, self._is_max)
_on_models = {}
def _global_on_model(ctx):
(fn, mdl) = _on_models[ctx]
fn(mdl)
_on_model_eh = on_model_eh_type(_global_on_model)
class Optimize(Z3PPObject):
"""Optimize API provides methods for solving using objective functions and weighted soft constraints"""
def __init__(self, ctx=None):
self.ctx = _get_ctx(ctx)
self.optimize = Z3_mk_optimize(self.ctx.ref())
self._on_models_id = None
Z3_optimize_inc_ref(self.ctx.ref(), self.optimize)
def __deepcopy__(self, memo={}):
return Optimize(self.optimize, self.ctx)
def __del__(self):
if self.optimize is not None and self.ctx.ref() is not None:
Z3_optimize_dec_ref(self.ctx.ref(), self.optimize)
if self._on_models_id is not None:
del _on_models[self._on_models_id]
def set(self, *args, **keys):
"""Set a configuration option.
The method `help()` return a string containing all available options.
"""
p = args2params(args, keys, self.ctx)
Z3_optimize_set_params(self.ctx.ref(), self.optimize, p.params)
def help(self):
"""Display a string describing all available options."""
print(Z3_optimize_get_help(self.ctx.ref(), self.optimize))
def param_descrs(self):
"""Return the parameter description set."""
return ParamDescrsRef(Z3_optimize_get_param_descrs(self.ctx.ref(), self.optimize), self.ctx)
def assert_exprs(self, *args):
"""Assert constraints as background axioms for the optimize solver."""
args = _get_args(args)
s = BoolSort(self.ctx)
for arg in args:
if isinstance(arg, Goal) or isinstance(arg, AstVector):
for f in arg:
Z3_optimize_assert(self.ctx.ref(), self.optimize, f.as_ast())
else:
arg = s.cast(arg)
Z3_optimize_assert(self.ctx.ref(), self.optimize, arg.as_ast())
def add(self, *args):
"""Assert constraints as background axioms for the optimize solver. Alias for assert_expr."""
self.assert_exprs(*args)
def __iadd__(self, fml):
self.add(fml)
return self
def assert_and_track(self, a, p):
"""Assert constraint `a` and track it in the unsat core using the Boolean constant `p`.
If `p` is a string, it will be automatically converted into a Boolean constant.
>>> x = Int('x')
>>> p3 = Bool('p3')
>>> s = Optimize()
>>> s.assert_and_track(x > 0, 'p1')
>>> s.assert_and_track(x != 1, 'p2')
>>> s.assert_and_track(x < 0, p3)
>>> print(s.check())
unsat
>>> c = s.unsat_core()
>>> len(c)
2
>>> Bool('p1') in c
True
>>> Bool('p2') in c
False
>>> p3 in c
True
"""
if isinstance(p, str):
p = Bool(p, self.ctx)
_z3_assert(isinstance(a, BoolRef), "Boolean expression expected")
_z3_assert(isinstance(p, BoolRef) and is_const(p), "Boolean expression expected")
Z3_optimize_assert_and_track(self.ctx.ref(), self.optimize, a.as_ast(), p.as_ast())
def add_soft(self, arg, weight="1", id=None):
"""Add soft constraint with optional weight and optional identifier.
If no weight is supplied, then the penalty for violating the soft constraint
is 1.
Soft constraints are grouped by identifiers. Soft constraints that are
added without identifiers are grouped by default.
"""
if _is_int(weight):
weight = "%d" % weight
elif isinstance(weight, float):
weight = "%f" % weight
if not isinstance(weight, str):
raise Z3Exception("weight should be a string or an integer")
if id is None:
id = ""
id = to_symbol(id, self.ctx)
def asoft(a):
v = Z3_optimize_assert_soft(self.ctx.ref(), self.optimize, a.as_ast(), weight, id)
return OptimizeObjective(self, v, False)
if sys.version_info.major >= 3 and isinstance(arg, Iterable):
return [asoft(a) for a in arg]
return asoft(arg)
def maximize(self, arg):
"""Add objective function to maximize."""
return OptimizeObjective(
self,
Z3_optimize_maximize(self.ctx.ref(), self.optimize, arg.as_ast()),
is_max=True,
)
def minimize(self, arg):
"""Add objective function to minimize."""
return OptimizeObjective(
self,
Z3_optimize_minimize(self.ctx.ref(), self.optimize, arg.as_ast()),
is_max=False,
)
def push(self):
"""create a backtracking point for added rules, facts and assertions"""
Z3_optimize_push(self.ctx.ref(), self.optimize)
def pop(self):
"""restore to previously created backtracking point"""
Z3_optimize_pop(self.ctx.ref(), self.optimize)
def check(self, *assumptions):
"""Check satisfiability while optimizing objective functions."""
assumptions = _get_args(assumptions)
num = len(assumptions)
_assumptions = (Ast * num)()
for i in range(num):
_assumptions[i] = assumptions[i].as_ast()
return CheckSatResult(Z3_optimize_check(self.ctx.ref(), self.optimize, num, _assumptions))
def reason_unknown(self):
"""Return a string that describes why the last `check()` returned `unknown`."""
return Z3_optimize_get_reason_unknown(self.ctx.ref(), self.optimize)
def model(self):
"""Return a model for the last check()."""
try:
return ModelRef(Z3_optimize_get_model(self.ctx.ref(), self.optimize), self.ctx)
except Z3Exception:
raise Z3Exception("model is not available")
def unsat_core(self):
return AstVector(Z3_optimize_get_unsat_core(self.ctx.ref(), self.optimize), self.ctx)
def lower(self, obj):
if not isinstance(obj, OptimizeObjective):
raise Z3Exception("Expecting objective handle returned by maximize/minimize")
return obj.lower()
def upper(self, obj):
if not isinstance(obj, OptimizeObjective):
raise Z3Exception("Expecting objective handle returned by maximize/minimize")
return obj.upper()
def lower_values(self, obj):
if not isinstance(obj, OptimizeObjective):
raise Z3Exception("Expecting objective handle returned by maximize/minimize")
return obj.lower_values()
def upper_values(self, obj):
if not isinstance(obj, OptimizeObjective):
raise Z3Exception("Expecting objective handle returned by maximize/minimize")
return obj.upper_values()
def from_file(self, filename):
"""Parse assertions and objectives from a file"""
Z3_optimize_from_file(self.ctx.ref(), self.optimize, filename)
def from_string(self, s):
"""Parse assertions and objectives from a string"""
Z3_optimize_from_string(self.ctx.ref(), self.optimize, s)
def assertions(self):
"""Return an AST vector containing all added constraints."""
return AstVector(Z3_optimize_get_assertions(self.ctx.ref(), self.optimize), self.ctx)
def objectives(self):
"""returns set of objective functions"""
return AstVector(Z3_optimize_get_objectives(self.ctx.ref(), self.optimize), self.ctx)
def __repr__(self):
"""Return a formatted string with all added rules and constraints."""
return self.sexpr()
def sexpr(self):
"""Return a formatted string (in Lisp-like format) with all added constraints.
We say the string is in s-expression format.
"""
return Z3_optimize_to_string(self.ctx.ref(), self.optimize)
def statistics(self):
"""Return statistics for the last check`.
"""
return Statistics(Z3_optimize_get_statistics(self.ctx.ref(), self.optimize), self.ctx)
def set_on_model(self, on_model):
"""Register a callback that is invoked with every incremental improvement to
objective values. The callback takes a model as argument.
The life-time of the model is limited to the callback so the
model has to be (deep) copied if it is to be used after the callback
"""
id = len(_on_models) + 41
mdl = Model(self.ctx)
_on_models[id] = (on_model, mdl)
self._on_models_id = id
Z3_optimize_register_model_eh(
self.ctx.ref(), self.optimize, mdl.model, ctypes.c_void_p(id), _on_model_eh,
)
#########################################
#
# ApplyResult
#
#########################################
class ApplyResult(Z3PPObject):
"""An ApplyResult object contains the subgoals produced by a tactic when applied to a goal.
It also contains model and proof converters.
"""
def __init__(self, result, ctx):
self.result = result
self.ctx = ctx
Z3_apply_result_inc_ref(self.ctx.ref(), self.result)
def __deepcopy__(self, memo={}):
return ApplyResult(self.result, self.ctx)
def __del__(self):
if self.ctx.ref() is not None:
Z3_apply_result_dec_ref(self.ctx.ref(), self.result)
def __len__(self):
"""Return the number of subgoals in `self`.
>>> a, b = Ints('a b')
>>> g = Goal()
>>> g.add(Or(a == 0, a == 1), Or(b == 0, b == 1), a > b)
>>> t = Tactic('split-clause')
>>> r = t(g)
>>> len(r)
2
>>> t = Then(Tactic('split-clause'), Tactic('split-clause'))
>>> len(t(g))
4
>>> t = Then(Tactic('split-clause'), Tactic('split-clause'), Tactic('propagate-values'))
>>> len(t(g))
1
"""
return int(Z3_apply_result_get_num_subgoals(self.ctx.ref(), self.result))
def __getitem__(self, idx):
"""Return one of the subgoals stored in ApplyResult object `self`.
>>> a, b = Ints('a b')
>>> g = Goal()
>>> g.add(Or(a == 0, a == 1), Or(b == 0, b == 1), a > b)
>>> t = Tactic('split-clause')
>>> r = t(g)
>>> r[0]
[a == 0, Or(b == 0, b == 1), a > b]
>>> r[1]
[a == 1, Or(b == 0, b == 1), a > b]
"""
if idx >= len(self):
raise IndexError
return Goal(goal=Z3_apply_result_get_subgoal(self.ctx.ref(), self.result, idx), ctx=self.ctx)
def __repr__(self):
return obj_to_string(self)
def sexpr(self):
"""Return a textual representation of the s-expression representing the set of subgoals in `self`."""
return Z3_apply_result_to_string(self.ctx.ref(), self.result)
def as_expr(self):
"""Return a Z3 expression consisting of all subgoals.
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 1)
>>> g.add(Or(x == 2, x == 3))
>>> r = Tactic('simplify')(g)
>>> r
[[Not(x <= 1), Or(x == 2, x == 3)]]
>>> r.as_expr()
And(Not(x <= 1), Or(x == 2, x == 3))
>>> r = Tactic('split-clause')(g)
>>> r
[[x > 1, x == 2], [x > 1, x == 3]]
>>> r.as_expr()
Or(And(x > 1, x == 2), And(x > 1, x == 3))
"""
sz = len(self)
if sz == 0:
return BoolVal(False, self.ctx)
elif sz == 1:
return self[0].as_expr()
else:
return Or([self[i].as_expr() for i in range(len(self))])
#########################################
#
# Tactics
#
#########################################
class Tactic:
"""Tactics transform, solver and/or simplify sets of constraints (Goal).
A Tactic can be converted into a Solver using the method solver().
Several combinators are available for creating new tactics using the built-in ones:
Then(), OrElse(), FailIf(), Repeat(), When(), Cond().
"""
def __init__(self, tactic, ctx=None):
self.ctx = _get_ctx(ctx)
self.tactic = None
if isinstance(tactic, TacticObj):
self.tactic = tactic
else:
if z3_debug():
_z3_assert(isinstance(tactic, str), "tactic name expected")
try:
self.tactic = Z3_mk_tactic(self.ctx.ref(), str(tactic))
except Z3Exception:
raise Z3Exception("unknown tactic '%s'" % tactic)
Z3_tactic_inc_ref(self.ctx.ref(), self.tactic)
def __deepcopy__(self, memo={}):
return Tactic(self.tactic, self.ctx)
def __del__(self):
if self.tactic is not None and self.ctx.ref() is not None:
Z3_tactic_dec_ref(self.ctx.ref(), self.tactic)
def solver(self, logFile=None):
"""Create a solver using the tactic `self`.
The solver supports the methods `push()` and `pop()`, but it
will always solve each `check()` from scratch.
>>> t = Then('simplify', 'nlsat')
>>> s = t.solver()
>>> x = Real('x')
>>> s.add(x**2 == 2, x > 0)
>>> s.check()
sat
>>> s.model()
[x = 1.4142135623?]
"""
return Solver(Z3_mk_solver_from_tactic(self.ctx.ref(), self.tactic), self.ctx, logFile)
def apply(self, goal, *arguments, **keywords):
"""Apply tactic `self` to the given goal or Z3 Boolean expression using the given options.
>>> x, y = Ints('x y')
>>> t = Tactic('solve-eqs')
>>> t.apply(And(x == 0, y >= x + 1))
[[y >= 1]]
"""
if z3_debug():
_z3_assert(isinstance(goal, (Goal, BoolRef)), "Z3 Goal or Boolean expressions expected")
goal = _to_goal(goal)
if len(arguments) > 0 or len(keywords) > 0:
p = args2params(arguments, keywords, self.ctx)
return ApplyResult(Z3_tactic_apply_ex(self.ctx.ref(), self.tactic, goal.goal, p.params), self.ctx)
else:
return ApplyResult(Z3_tactic_apply(self.ctx.ref(), self.tactic, goal.goal), self.ctx)
def __call__(self, goal, *arguments, **keywords):
"""Apply tactic `self` to the given goal or Z3 Boolean expression using the given options.
>>> x, y = Ints('x y')
>>> t = Tactic('solve-eqs')
>>> t(And(x == 0, y >= x + 1))
[[y >= 1]]
"""
return self.apply(goal, *arguments, **keywords)
def help(self):
"""Display a string containing a description of the available options for the `self` tactic."""
print(Z3_tactic_get_help(self.ctx.ref(), self.tactic))
def param_descrs(self):
"""Return the parameter description set."""
return ParamDescrsRef(Z3_tactic_get_param_descrs(self.ctx.ref(), self.tactic), self.ctx)
def _to_goal(a):
if isinstance(a, BoolRef):
goal = Goal(ctx=a.ctx)
goal.add(a)
return goal
else:
return a
def _to_tactic(t, ctx=None):
if isinstance(t, Tactic):
return t
else:
return Tactic(t, ctx)
def _and_then(t1, t2, ctx=None):
t1 = _to_tactic(t1, ctx)
t2 = _to_tactic(t2, ctx)
if z3_debug():
_z3_assert(t1.ctx == t2.ctx, "Context mismatch")
return Tactic(Z3_tactic_and_then(t1.ctx.ref(), t1.tactic, t2.tactic), t1.ctx)
def _or_else(t1, t2, ctx=None):
t1 = _to_tactic(t1, ctx)
t2 = _to_tactic(t2, ctx)
if z3_debug():
_z3_assert(t1.ctx == t2.ctx, "Context mismatch")
return Tactic(Z3_tactic_or_else(t1.ctx.ref(), t1.tactic, t2.tactic), t1.ctx)
def AndThen(*ts, **ks):
"""Return a tactic that applies the tactics in `*ts` in sequence.
>>> x, y = Ints('x y')
>>> t = AndThen(Tactic('simplify'), Tactic('solve-eqs'))
>>> t(And(x == 0, y > x + 1))
[[Not(y <= 1)]]
>>> t(And(x == 0, y > x + 1)).as_expr()
Not(y <= 1)
"""
if z3_debug():
_z3_assert(len(ts) >= 2, "At least two arguments expected")
ctx = ks.get("ctx", None)
num = len(ts)
r = ts[0]
for i in range(num - 1):
r = _and_then(r, ts[i + 1], ctx)
return r
def Then(*ts, **ks):
"""Return a tactic that applies the tactics in `*ts` in sequence. Shorthand for AndThen(*ts, **ks).
>>> x, y = Ints('x y')
>>> t = Then(Tactic('simplify'), Tactic('solve-eqs'))
>>> t(And(x == 0, y > x + 1))
[[Not(y <= 1)]]
>>> t(And(x == 0, y > x + 1)).as_expr()
Not(y <= 1)
"""
return AndThen(*ts, **ks)
def OrElse(*ts, **ks):
"""Return a tactic that applies the tactics in `*ts` until one of them succeeds (it doesn't fail).
>>> x = Int('x')
>>> t = OrElse(Tactic('split-clause'), Tactic('skip'))
>>> # Tactic split-clause fails if there is no clause in the given goal.
>>> t(x == 0)
[[x == 0]]
>>> t(Or(x == 0, x == 1))
[[x == 0], [x == 1]]
"""
if z3_debug():
_z3_assert(len(ts) >= 2, "At least two arguments expected")
ctx = ks.get("ctx", None)
num = len(ts)
r = ts[0]
for i in range(num - 1):
r = _or_else(r, ts[i + 1], ctx)
return r
def ParOr(*ts, **ks):
"""Return a tactic that applies the tactics in `*ts` in parallel until one of them succeeds (it doesn't fail).
>>> x = Int('x')
>>> t = ParOr(Tactic('simplify'), Tactic('fail'))
>>> t(x + 1 == 2)
[[x == 1]]
"""
if z3_debug():
_z3_assert(len(ts) >= 2, "At least two arguments expected")
ctx = _get_ctx(ks.get("ctx", None))
ts = [_to_tactic(t, ctx) for t in ts]
sz = len(ts)
_args = (TacticObj * sz)()
for i in range(sz):
_args[i] = ts[i].tactic
return Tactic(Z3_tactic_par_or(ctx.ref(), sz, _args), ctx)
def ParThen(t1, t2, ctx=None):
"""Return a tactic that applies t1 and then t2 to every subgoal produced by t1.
The subgoals are processed in parallel.
>>> x, y = Ints('x y')
>>> t = ParThen(Tactic('split-clause'), Tactic('propagate-values'))
>>> t(And(Or(x == 1, x == 2), y == x + 1))
[[x == 1, y == 2], [x == 2, y == 3]]
"""
t1 = _to_tactic(t1, ctx)
t2 = _to_tactic(t2, ctx)
if z3_debug():
_z3_assert(t1.ctx == t2.ctx, "Context mismatch")
return Tactic(Z3_tactic_par_and_then(t1.ctx.ref(), t1.tactic, t2.tactic), t1.ctx)
def ParAndThen(t1, t2, ctx=None):
"""Alias for ParThen(t1, t2, ctx)."""
return ParThen(t1, t2, ctx)
def With(t, *args, **keys):
"""Return a tactic that applies tactic `t` using the given configuration options.
>>> x, y = Ints('x y')
>>> t = With(Tactic('simplify'), som=True)
>>> t((x + 1)*(y + 2) == 0)
[[2*x + y + x*y == -2]]
"""
ctx = keys.pop("ctx", None)
t = _to_tactic(t, ctx)
p = args2params(args, keys, t.ctx)
return Tactic(Z3_tactic_using_params(t.ctx.ref(), t.tactic, p.params), t.ctx)
def WithParams(t, p):
"""Return a tactic that applies tactic `t` using the given configuration options.
>>> x, y = Ints('x y')
>>> p = ParamsRef()
>>> p.set("som", True)
>>> t = WithParams(Tactic('simplify'), p)
>>> t((x + 1)*(y + 2) == 0)
[[2*x + y + x*y == -2]]
"""
t = _to_tactic(t, None)
return Tactic(Z3_tactic_using_params(t.ctx.ref(), t.tactic, p.params), t.ctx)
def Repeat(t, max=4294967295, ctx=None):
"""Return a tactic that keeps applying `t` until the goal is not modified anymore
or the maximum number of iterations `max` is reached.
>>> x, y = Ints('x y')
>>> c = And(Or(x == 0, x == 1), Or(y == 0, y == 1), x > y)
>>> t = Repeat(OrElse(Tactic('split-clause'), Tactic('skip')))
>>> r = t(c)
>>> for subgoal in r: print(subgoal)
[x == 0, y == 0, x > y]
[x == 0, y == 1, x > y]
[x == 1, y == 0, x > y]
[x == 1, y == 1, x > y]
>>> t = Then(t, Tactic('propagate-values'))
>>> t(c)
[[x == 1, y == 0]]
"""
t = _to_tactic(t, ctx)
return Tactic(Z3_tactic_repeat(t.ctx.ref(), t.tactic, max), t.ctx)
def TryFor(t, ms, ctx=None):
"""Return a tactic that applies `t` to a given goal for `ms` milliseconds.
If `t` does not terminate in `ms` milliseconds, then it fails.
"""
t = _to_tactic(t, ctx)
return Tactic(Z3_tactic_try_for(t.ctx.ref(), t.tactic, ms), t.ctx)
def tactics(ctx=None):
"""Return a list of all available tactics in Z3.
>>> l = tactics()
>>> l.count('simplify') == 1
True
"""
ctx = _get_ctx(ctx)
return [Z3_get_tactic_name(ctx.ref(), i) for i in range(Z3_get_num_tactics(ctx.ref()))]
def tactic_description(name, ctx=None):
"""Return a short description for the tactic named `name`.
>>> d = tactic_description('simplify')
"""
ctx = _get_ctx(ctx)
return Z3_tactic_get_descr(ctx.ref(), name)
def describe_tactics():
"""Display a (tabular) description of all available tactics in Z3."""
if in_html_mode():
even = True
print('<table border="1" cellpadding="2" cellspacing="0">')
for t in tactics():
if even:
print('<tr style="background-color:#CFCFCF">')
even = False
else:
print("<tr>")
even = True
print("<td>%s</td><td>%s</td></tr>" % (t, insert_line_breaks(tactic_description(t), 40)))
print("</table>")
else:
for t in tactics():
print("%s : %s" % (t, tactic_description(t)))
class Probe:
"""Probes are used to inspect a goal (aka problem) and collect information that may be used
to decide which solver and/or preprocessing step will be used.
"""
def __init__(self, probe, ctx=None):
self.ctx = _get_ctx(ctx)
self.probe = None
if isinstance(probe, ProbeObj):
self.probe = probe
elif isinstance(probe, float):
self.probe = Z3_probe_const(self.ctx.ref(), probe)
elif _is_int(probe):
self.probe = Z3_probe_const(self.ctx.ref(), float(probe))
elif isinstance(probe, bool):
if probe:
self.probe = Z3_probe_const(self.ctx.ref(), 1.0)
else:
self.probe = Z3_probe_const(self.ctx.ref(), 0.0)
else:
if z3_debug():
_z3_assert(isinstance(probe, str), "probe name expected")
try:
self.probe = Z3_mk_probe(self.ctx.ref(), probe)
except Z3Exception:
raise Z3Exception("unknown probe '%s'" % probe)
Z3_probe_inc_ref(self.ctx.ref(), self.probe)
def __deepcopy__(self, memo={}):
return Probe(self.probe, self.ctx)
def __del__(self):
if self.probe is not None and self.ctx.ref() is not None:
Z3_probe_dec_ref(self.ctx.ref(), self.probe)
def __lt__(self, other):
"""Return a probe that evaluates to "true" when the value returned by `self`
is less than the value returned by `other`.
>>> p = Probe('size') < 10
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
1.0
"""
return Probe(Z3_probe_lt(self.ctx.ref(), self.probe, _to_probe(other, self.ctx).probe), self.ctx)
def __gt__(self, other):
"""Return a probe that evaluates to "true" when the value returned by `self`
is greater than the value returned by `other`.
>>> p = Probe('size') > 10
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
0.0
"""
return Probe(Z3_probe_gt(self.ctx.ref(), self.probe, _to_probe(other, self.ctx).probe), self.ctx)
def __le__(self, other):
"""Return a probe that evaluates to "true" when the value returned by `self`
is less than or equal to the value returned by `other`.
>>> p = Probe('size') <= 2
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
1.0
"""
return Probe(Z3_probe_le(self.ctx.ref(), self.probe, _to_probe(other, self.ctx).probe), self.ctx)
def __ge__(self, other):
"""Return a probe that evaluates to "true" when the value returned by `self`
is greater than or equal to the value returned by `other`.
>>> p = Probe('size') >= 2
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
1.0
"""
return Probe(Z3_probe_ge(self.ctx.ref(), self.probe, _to_probe(other, self.ctx).probe), self.ctx)
def __eq__(self, other):
"""Return a probe that evaluates to "true" when the value returned by `self`
is equal to the value returned by `other`.
>>> p = Probe('size') == 2
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
1.0
"""
return Probe(Z3_probe_eq(self.ctx.ref(), self.probe, _to_probe(other, self.ctx).probe), self.ctx)
def __ne__(self, other):
"""Return a probe that evaluates to "true" when the value returned by `self`
is not equal to the value returned by `other`.
>>> p = Probe('size') != 2
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
0.0
"""
p = self.__eq__(other)
return Probe(Z3_probe_not(self.ctx.ref(), p.probe), self.ctx)
def __call__(self, goal):
"""Evaluate the probe `self` in the given goal.
>>> p = Probe('size')
>>> x = Int('x')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(x < 10)
>>> p(g)
2.0
>>> g.add(x < 20)
>>> p(g)
3.0
>>> p = Probe('num-consts')
>>> p(g)
1.0
>>> p = Probe('is-propositional')
>>> p(g)
0.0
>>> p = Probe('is-qflia')
>>> p(g)
1.0
"""
if z3_debug():
_z3_assert(isinstance(goal, (Goal, BoolRef)), "Z3 Goal or Boolean expression expected")
goal = _to_goal(goal)
return Z3_probe_apply(self.ctx.ref(), self.probe, goal.goal)
def is_probe(p):
"""Return `True` if `p` is a Z3 probe.
>>> is_probe(Int('x'))
False
>>> is_probe(Probe('memory'))
True
"""
return isinstance(p, Probe)
def _to_probe(p, ctx=None):
if is_probe(p):
return p
else:
return Probe(p, ctx)
def probes(ctx=None):
"""Return a list of all available probes in Z3.
>>> l = probes()
>>> l.count('memory') == 1
True
"""
ctx = _get_ctx(ctx)
return [Z3_get_probe_name(ctx.ref(), i) for i in range(Z3_get_num_probes(ctx.ref()))]
def probe_description(name, ctx=None):
"""Return a short description for the probe named `name`.
>>> d = probe_description('memory')
"""
ctx = _get_ctx(ctx)
return Z3_probe_get_descr(ctx.ref(), name)
def describe_probes():
"""Display a (tabular) description of all available probes in Z3."""
if in_html_mode():
even = True
print('<table border="1" cellpadding="2" cellspacing="0">')
for p in probes():
if even:
print('<tr style="background-color:#CFCFCF">')
even = False
else:
print("<tr>")
even = True
print("<td>%s</td><td>%s</td></tr>" % (p, insert_line_breaks(probe_description(p), 40)))
print("</table>")
else:
for p in probes():
print("%s : %s" % (p, probe_description(p)))
def _probe_nary(f, args, ctx):
if z3_debug():
_z3_assert(len(args) > 0, "At least one argument expected")
num = len(args)
r = _to_probe(args[0], ctx)
for i in range(num - 1):
r = Probe(f(ctx.ref(), r.probe, _to_probe(args[i + 1], ctx).probe), ctx)
return r
def _probe_and(args, ctx):
return _probe_nary(Z3_probe_and, args, ctx)
def _probe_or(args, ctx):
return _probe_nary(Z3_probe_or, args, ctx)
def FailIf(p, ctx=None):
"""Return a tactic that fails if the probe `p` evaluates to true.
Otherwise, it returns the input goal unmodified.
In the following example, the tactic applies 'simplify' if and only if there are
more than 2 constraints in the goal.
>>> t = OrElse(FailIf(Probe('size') > 2), Tactic('simplify'))
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(y > 0)
>>> t(g)
[[x > 0, y > 0]]
>>> g.add(x == y + 1)
>>> t(g)
[[Not(x <= 0), Not(y <= 0), x == 1 + y]]
"""
p = _to_probe(p, ctx)
return Tactic(Z3_tactic_fail_if(p.ctx.ref(), p.probe), p.ctx)
def When(p, t, ctx=None):
"""Return a tactic that applies tactic `t` only if probe `p` evaluates to true.
Otherwise, it returns the input goal unmodified.
>>> t = When(Probe('size') > 2, Tactic('simplify'))
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(y > 0)
>>> t(g)
[[x > 0, y > 0]]
>>> g.add(x == y + 1)
>>> t(g)
[[Not(x <= 0), Not(y <= 0), x == 1 + y]]
"""
p = _to_probe(p, ctx)
t = _to_tactic(t, ctx)
return Tactic(Z3_tactic_when(t.ctx.ref(), p.probe, t.tactic), t.ctx)
def Cond(p, t1, t2, ctx=None):
"""Return a tactic that applies tactic `t1` to a goal if probe `p` evaluates to true, and `t2` otherwise.
>>> t = Cond(Probe('is-qfnra'), Tactic('qfnra'), Tactic('smt'))
"""
p = _to_probe(p, ctx)
t1 = _to_tactic(t1, ctx)
t2 = _to_tactic(t2, ctx)
return Tactic(Z3_tactic_cond(t1.ctx.ref(), p.probe, t1.tactic, t2.tactic), t1.ctx)
#########################################
#
# Utils
#
#########################################
def simplify(a, *arguments, **keywords):
"""Simplify the expression `a` using the given options.
This function has many options. Use `help_simplify` to obtain the complete list.
>>> x = Int('x')
>>> y = Int('y')
>>> simplify(x + 1 + y + x + 1)
2 + 2*x + y
>>> simplify((x + 1)*(y + 1), som=True)
1 + x + y + x*y
>>> simplify(Distinct(x, y, 1), blast_distinct=True)
And(Not(x == y), Not(x == 1), Not(y == 1))
>>> simplify(And(x == 0, y == 1), elim_and=True)
Not(Or(Not(x == 0), Not(y == 1)))
"""
if z3_debug():
_z3_assert(is_expr(a), "Z3 expression expected")
if len(arguments) > 0 or len(keywords) > 0:
p = args2params(arguments, keywords, a.ctx)
return _to_expr_ref(Z3_simplify_ex(a.ctx_ref(), a.as_ast(), p.params), a.ctx)
else:
return _to_expr_ref(Z3_simplify(a.ctx_ref(), a.as_ast()), a.ctx)
def help_simplify():
"""Return a string describing all options available for Z3 `simplify` procedure."""
print(Z3_simplify_get_help(main_ctx().ref()))
def simplify_param_descrs():
"""Return the set of parameter descriptions for Z3 `simplify` procedure."""
return ParamDescrsRef(Z3_simplify_get_param_descrs(main_ctx().ref()), main_ctx())
def substitute(t, *m):
"""Apply substitution m on t, m is a list of pairs of the form (from, to).
Every occurrence in t of from is replaced with to.
>>> x = Int('x')
>>> y = Int('y')
>>> substitute(x + 1, (x, y + 1))
y + 1 + 1
>>> f = Function('f', IntSort(), IntSort())
>>> substitute(f(x) + f(y), (f(x), IntVal(1)), (f(y), IntVal(1)))
1 + 1
"""
if isinstance(m, tuple):
m1 = _get_args(m)
if isinstance(m1, list) and all(isinstance(p, tuple) for p in m1):
m = m1
if z3_debug():
_z3_assert(is_expr(t), "Z3 expression expected")
_z3_assert(all([isinstance(p, tuple) and is_expr(p[0]) and is_expr(p[1]) and p[0].sort().eq(
p[1].sort()) for p in m]), "Z3 invalid substitution, expression pairs expected.")
num = len(m)
_from = (Ast * num)()
_to = (Ast * num)()
for i in range(num):
_from[i] = m[i][0].as_ast()
_to[i] = m[i][1].as_ast()
return _to_expr_ref(Z3_substitute(t.ctx.ref(), t.as_ast(), num, _from, _to), t.ctx)
def substitute_vars(t, *m):
"""Substitute the free variables in t with the expression in m.
>>> v0 = Var(0, IntSort())
>>> v1 = Var(1, IntSort())
>>> x = Int('x')
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> # replace v0 with x+1 and v1 with x
>>> substitute_vars(f(v0, v1), x + 1, x)
f(x + 1, x)
"""
if z3_debug():
_z3_assert(is_expr(t), "Z3 expression expected")
_z3_assert(all([is_expr(n) for n in m]), "Z3 invalid substitution, list of expressions expected.")
num = len(m)
_to = (Ast * num)()
for i in range(num):
_to[i] = m[i].as_ast()
return _to_expr_ref(Z3_substitute_vars(t.ctx.ref(), t.as_ast(), num, _to), t.ctx)
def Sum(*args):
"""Create the sum of the Z3 expressions.
>>> a, b, c = Ints('a b c')
>>> Sum(a, b, c)
a + b + c
>>> Sum([a, b, c])
a + b + c
>>> A = IntVector('a', 5)
>>> Sum(A)
a__0 + a__1 + a__2 + a__3 + a__4
"""
args = _get_args(args)
if len(args) == 0:
return 0
ctx = _ctx_from_ast_arg_list(args)
if ctx is None:
return _reduce(lambda a, b: a + b, args, 0)
args = _coerce_expr_list(args, ctx)
if is_bv(args[0]):
return _reduce(lambda a, b: a + b, args, 0)
else:
_args, sz = _to_ast_array(args)
return ArithRef(Z3_mk_add(ctx.ref(), sz, _args), ctx)
def Product(*args):
"""Create the product of the Z3 expressions.
>>> a, b, c = Ints('a b c')
>>> Product(a, b, c)
a*b*c
>>> Product([a, b, c])
a*b*c
>>> A = IntVector('a', 5)
>>> Product(A)
a__0*a__1*a__2*a__3*a__4
"""
args = _get_args(args)
if len(args) == 0:
return 1
ctx = _ctx_from_ast_arg_list(args)
if ctx is None:
return _reduce(lambda a, b: a * b, args, 1)
args = _coerce_expr_list(args, ctx)
if is_bv(args[0]):
return _reduce(lambda a, b: a * b, args, 1)
else:
_args, sz = _to_ast_array(args)
return ArithRef(Z3_mk_mul(ctx.ref(), sz, _args), ctx)
def Abs(arg):
"""Create the absolute value of an arithmetic expression"""
return If(arg > 0, arg, -arg)
def AtMost(*args):
"""Create an at-most Pseudo-Boolean k constraint.
>>> a, b, c = Bools('a b c')
>>> f = AtMost(a, b, c, 2)
"""
args = _get_args(args)
if z3_debug():
_z3_assert(len(args) > 1, "Non empty list of arguments expected")
ctx = _ctx_from_ast_arg_list(args)
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression")
args1 = _coerce_expr_list(args[:-1], ctx)
k = args[-1]
_args, sz = _to_ast_array(args1)
return BoolRef(Z3_mk_atmost(ctx.ref(), sz, _args, k), ctx)
def AtLeast(*args):
"""Create an at-most Pseudo-Boolean k constraint.
>>> a, b, c = Bools('a b c')
>>> f = AtLeast(a, b, c, 2)
"""
args = _get_args(args)
if z3_debug():
_z3_assert(len(args) > 1, "Non empty list of arguments expected")
ctx = _ctx_from_ast_arg_list(args)
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression")
args1 = _coerce_expr_list(args[:-1], ctx)
k = args[-1]
_args, sz = _to_ast_array(args1)
return BoolRef(Z3_mk_atleast(ctx.ref(), sz, _args, k), ctx)
def _reorder_pb_arg(arg):
a, b = arg
if not _is_int(b) and _is_int(a):
return b, a
return arg
def _pb_args_coeffs(args, default_ctx=None):
args = _get_args_ast_list(args)
if len(args) == 0:
return _get_ctx(default_ctx), 0, (Ast * 0)(), (ctypes.c_int * 0)()
args = [_reorder_pb_arg(arg) for arg in args]
args, coeffs = zip(*args)
if z3_debug():
_z3_assert(len(args) > 0, "Non empty list of arguments expected")
ctx = _ctx_from_ast_arg_list(args)
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression")
args = _coerce_expr_list(args, ctx)
_args, sz = _to_ast_array(args)
_coeffs = (ctypes.c_int * len(coeffs))()
for i in range(len(coeffs)):
_z3_check_cint_overflow(coeffs[i], "coefficient")
_coeffs[i] = coeffs[i]
return ctx, sz, _args, _coeffs, args
def PbLe(args, k):
"""Create a Pseudo-Boolean inequality k constraint.
>>> a, b, c = Bools('a b c')
>>> f = PbLe(((a,1),(b,3),(c,2)), 3)
"""
_z3_check_cint_overflow(k, "k")
ctx, sz, _args, _coeffs, args = _pb_args_coeffs(args)
return BoolRef(Z3_mk_pble(ctx.ref(), sz, _args, _coeffs, k), ctx)
def PbGe(args, k):
"""Create a Pseudo-Boolean inequality k constraint.
>>> a, b, c = Bools('a b c')
>>> f = PbGe(((a,1),(b,3),(c,2)), 3)
"""
_z3_check_cint_overflow(k, "k")
ctx, sz, _args, _coeffs, args = _pb_args_coeffs(args)
return BoolRef(Z3_mk_pbge(ctx.ref(), sz, _args, _coeffs, k), ctx)
def PbEq(args, k, ctx=None):
"""Create a Pseudo-Boolean inequality k constraint.
>>> a, b, c = Bools('a b c')
>>> f = PbEq(((a,1),(b,3),(c,2)), 3)
"""
_z3_check_cint_overflow(k, "k")
ctx, sz, _args, _coeffs, args = _pb_args_coeffs(args)
return BoolRef(Z3_mk_pbeq(ctx.ref(), sz, _args, _coeffs, k), ctx)
def solve(*args, **keywords):
"""Solve the constraints `*args`.
This is a simple function for creating demonstrations. It creates a solver,
configure it using the options in `keywords`, adds the constraints
in `args`, and invokes check.
>>> a = Int('a')
>>> solve(a > 0, a < 2)
[a = 1]
"""
show = keywords.pop("show", False)
s = Solver()
s.set(**keywords)
s.add(*args)
if show:
print(s)
r = s.check()
if r == unsat:
print("no solution")
elif r == unknown:
print("failed to solve")
try:
print(s.model())
except Z3Exception:
return
else:
print(s.model())
def solve_using(s, *args, **keywords):
"""Solve the constraints `*args` using solver `s`.
This is a simple function for creating demonstrations. It is similar to `solve`,
but it uses the given solver `s`.
It configures solver `s` using the options in `keywords`, adds the constraints
in `args`, and invokes check.
"""
show = keywords.pop("show", False)
if z3_debug():
_z3_assert(isinstance(s, Solver), "Solver object expected")
s.set(**keywords)
s.add(*args)
if show:
print("Problem:")
print(s)
r = s.check()
if r == unsat:
print("no solution")
elif r == unknown:
print("failed to solve")
try:
print(s.model())
except Z3Exception:
return
else:
if show:
print("Solution:")
print(s.model())
def prove(claim, show=False, **keywords):
"""Try to prove the given claim.
This is a simple function for creating demonstrations. It tries to prove
`claim` by showing the negation is unsatisfiable.
>>> p, q = Bools('p q')
>>> prove(Not(And(p, q)) == Or(Not(p), Not(q)))
proved
"""
if z3_debug():
_z3_assert(is_bool(claim), "Z3 Boolean expression expected")
s = Solver()
s.set(**keywords)
s.add(Not(claim))
if show:
print(s)
r = s.check()
if r == unsat:
print("proved")
elif r == unknown:
print("failed to prove")
print(s.model())
else:
print("counterexample")
print(s.model())
def _solve_html(*args, **keywords):
"""Version of function `solve` used in RiSE4Fun."""
show = keywords.pop("show", False)
s = Solver()
s.set(**keywords)
s.add(*args)
if show:
print("<b>Problem:</b>")
print(s)
r = s.check()
if r == unsat:
print("<b>no solution</b>")
elif r == unknown:
print("<b>failed to solve</b>")
try:
print(s.model())
except Z3Exception:
return
else:
if show:
print("<b>Solution:</b>")
print(s.model())
def _solve_using_html(s, *args, **keywords):
"""Version of function `solve_using` used in RiSE4Fun."""
show = keywords.pop("show", False)
if z3_debug():
_z3_assert(isinstance(s, Solver), "Solver object expected")
s.set(**keywords)
s.add(*args)
if show:
print("<b>Problem:</b>")
print(s)
r = s.check()
if r == unsat:
print("<b>no solution</b>")
elif r == unknown:
print("<b>failed to solve</b>")
try:
print(s.model())
except Z3Exception:
return
else:
if show:
print("<b>Solution:</b>")
print(s.model())
def _prove_html(claim, show=False, **keywords):
"""Version of function `prove` used in RiSE4Fun."""
if z3_debug():
_z3_assert(is_bool(claim), "Z3 Boolean expression expected")
s = Solver()
s.set(**keywords)
s.add(Not(claim))
if show:
print(s)
r = s.check()
if r == unsat:
print("<b>proved</b>")
elif r == unknown:
print("<b>failed to prove</b>")
print(s.model())
else:
print("<b>counterexample</b>")
print(s.model())
def _dict2sarray(sorts, ctx):
sz = len(sorts)
_names = (Symbol * sz)()
_sorts = (Sort * sz)()
i = 0
for k in sorts:
v = sorts[k]
if z3_debug():
_z3_assert(isinstance(k, str), "String expected")
_z3_assert(is_sort(v), "Z3 sort expected")
_names[i] = to_symbol(k, ctx)
_sorts[i] = v.ast
i = i + 1
return sz, _names, _sorts
def _dict2darray(decls, ctx):
sz = len(decls)
_names = (Symbol * sz)()
_decls = (FuncDecl * sz)()
i = 0
for k in decls:
v = decls[k]
if z3_debug():
_z3_assert(isinstance(k, str), "String expected")
_z3_assert(is_func_decl(v) or is_const(v), "Z3 declaration or constant expected")
_names[i] = to_symbol(k, ctx)
if is_const(v):
_decls[i] = v.decl().ast
else:
_decls[i] = v.ast
i = i + 1
return sz, _names, _decls
def parse_smt2_string(s, sorts={}, decls={}, ctx=None):
"""Parse a string in SMT 2.0 format using the given sorts and decls.
The arguments sorts and decls are Python dictionaries used to initialize
the symbol table used for the SMT 2.0 parser.
>>> parse_smt2_string('(declare-const x Int) (assert (> x 0)) (assert (< x 10))')
[x > 0, x < 10]
>>> x, y = Ints('x y')
>>> f = Function('f', IntSort(), IntSort())
>>> parse_smt2_string('(assert (> (+ foo (g bar)) 0))', decls={ 'foo' : x, 'bar' : y, 'g' : f})
[x + f(y) > 0]
>>> parse_smt2_string('(declare-const a U) (assert (> a 0))', sorts={ 'U' : IntSort() })
[a > 0]
"""
ctx = _get_ctx(ctx)
ssz, snames, ssorts = _dict2sarray(sorts, ctx)
dsz, dnames, ddecls = _dict2darray(decls, ctx)
return AstVector(Z3_parse_smtlib2_string(ctx.ref(), s, ssz, snames, ssorts, dsz, dnames, ddecls), ctx)
def parse_smt2_file(f, sorts={}, decls={}, ctx=None):
"""Parse a file in SMT 2.0 format using the given sorts and decls.
This function is similar to parse_smt2_string().
"""
ctx = _get_ctx(ctx)
ssz, snames, ssorts = _dict2sarray(sorts, ctx)
dsz, dnames, ddecls = _dict2darray(decls, ctx)
return AstVector(Z3_parse_smtlib2_file(ctx.ref(), f, ssz, snames, ssorts, dsz, dnames, ddecls), ctx)
#########################################
#
# Floating-Point Arithmetic
#
#########################################
# Global default rounding mode
_dflt_rounding_mode = Z3_OP_FPA_RM_TOWARD_ZERO
_dflt_fpsort_ebits = 11
_dflt_fpsort_sbits = 53
def get_default_rounding_mode(ctx=None):
"""Retrieves the global default rounding mode."""
global _dflt_rounding_mode
if _dflt_rounding_mode == Z3_OP_FPA_RM_TOWARD_ZERO:
return RTZ(ctx)
elif _dflt_rounding_mode == Z3_OP_FPA_RM_TOWARD_NEGATIVE:
return RTN(ctx)
elif _dflt_rounding_mode == Z3_OP_FPA_RM_TOWARD_POSITIVE:
return RTP(ctx)
elif _dflt_rounding_mode == Z3_OP_FPA_RM_NEAREST_TIES_TO_EVEN:
return RNE(ctx)
elif _dflt_rounding_mode == Z3_OP_FPA_RM_NEAREST_TIES_TO_AWAY:
return RNA(ctx)
_ROUNDING_MODES = frozenset({
Z3_OP_FPA_RM_TOWARD_ZERO,
Z3_OP_FPA_RM_TOWARD_NEGATIVE,
Z3_OP_FPA_RM_TOWARD_POSITIVE,
Z3_OP_FPA_RM_NEAREST_TIES_TO_EVEN,
Z3_OP_FPA_RM_NEAREST_TIES_TO_AWAY
})
def set_default_rounding_mode(rm, ctx=None):
global _dflt_rounding_mode
if is_fprm_value(rm):
_dflt_rounding_mode = rm.decl().kind()
else:
_z3_assert(_dflt_rounding_mode in _ROUNDING_MODES, "illegal rounding mode")
_dflt_rounding_mode = rm
def get_default_fp_sort(ctx=None):
return FPSort(_dflt_fpsort_ebits, _dflt_fpsort_sbits, ctx)
def set_default_fp_sort(ebits, sbits, ctx=None):
global _dflt_fpsort_ebits
global _dflt_fpsort_sbits
_dflt_fpsort_ebits = ebits
_dflt_fpsort_sbits = sbits
def _dflt_rm(ctx=None):
return get_default_rounding_mode(ctx)
def _dflt_fps(ctx=None):
return get_default_fp_sort(ctx)
def _coerce_fp_expr_list(alist, ctx):
first_fp_sort = None
for a in alist:
if is_fp(a):
if first_fp_sort is None:
first_fp_sort = a.sort()
elif first_fp_sort == a.sort():
pass # OK, same as before
else:
# we saw at least 2 different float sorts; something will
# throw a sort mismatch later, for now assume None.
first_fp_sort = None
break
r = []
for i in range(len(alist)):
a = alist[i]
is_repr = isinstance(a, str) and a.contains("2**(") and a.endswith(")")
if is_repr or _is_int(a) or isinstance(a, (float, bool)):
r.append(FPVal(a, None, first_fp_sort, ctx))
else:
r.append(a)
return _coerce_expr_list(r, ctx)
# FP Sorts
class FPSortRef(SortRef):
"""Floating-point sort."""
def ebits(self):
"""Retrieves the number of bits reserved for the exponent in the FloatingPoint sort `self`.
>>> b = FPSort(8, 24)
>>> b.ebits()
8
"""
return int(Z3_fpa_get_ebits(self.ctx_ref(), self.ast))
def sbits(self):
"""Retrieves the number of bits reserved for the significand in the FloatingPoint sort `self`.
>>> b = FPSort(8, 24)
>>> b.sbits()
24
"""
return int(Z3_fpa_get_sbits(self.ctx_ref(), self.ast))
def cast(self, val):
"""Try to cast `val` as a floating-point expression.
>>> b = FPSort(8, 24)
>>> b.cast(1.0)
1
>>> b.cast(1.0).sexpr()
'(fp #b0 #x7f #b00000000000000000000000)'
"""
if is_expr(val):
if z3_debug():
_z3_assert(self.ctx == val.ctx, "Context mismatch")
return val
else:
return FPVal(val, None, self, self.ctx)
def Float16(ctx=None):
"""Floating-point 16-bit (half) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_16(ctx.ref()), ctx)
def FloatHalf(ctx=None):
"""Floating-point 16-bit (half) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_half(ctx.ref()), ctx)
def Float32(ctx=None):
"""Floating-point 32-bit (single) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_32(ctx.ref()), ctx)
def FloatSingle(ctx=None):
"""Floating-point 32-bit (single) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_single(ctx.ref()), ctx)
def Float64(ctx=None):
"""Floating-point 64-bit (double) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_64(ctx.ref()), ctx)
def FloatDouble(ctx=None):
"""Floating-point 64-bit (double) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_double(ctx.ref()), ctx)
def Float128(ctx=None):
"""Floating-point 128-bit (quadruple) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_128(ctx.ref()), ctx)
def FloatQuadruple(ctx=None):
"""Floating-point 128-bit (quadruple) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_quadruple(ctx.ref()), ctx)
class FPRMSortRef(SortRef):
""""Floating-point rounding mode sort."""
def is_fp_sort(s):
"""Return True if `s` is a Z3 floating-point sort.
>>> is_fp_sort(FPSort(8, 24))
True
>>> is_fp_sort(IntSort())
False
"""
return isinstance(s, FPSortRef)
def is_fprm_sort(s):
"""Return True if `s` is a Z3 floating-point rounding mode sort.
>>> is_fprm_sort(FPSort(8, 24))
False
>>> is_fprm_sort(RNE().sort())
True
"""
return isinstance(s, FPRMSortRef)
# FP Expressions
class FPRef(ExprRef):
"""Floating-point expressions."""
def sort(self):
"""Return the sort of the floating-point expression `self`.
>>> x = FP('1.0', FPSort(8, 24))
>>> x.sort()
FPSort(8, 24)
>>> x.sort() == FPSort(8, 24)
True
"""
return FPSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def ebits(self):
"""Retrieves the number of bits reserved for the exponent in the FloatingPoint expression `self`.
>>> b = FPSort(8, 24)
>>> b.ebits()
8
"""
return self.sort().ebits()
def sbits(self):
"""Retrieves the number of bits reserved for the exponent in the FloatingPoint expression `self`.
>>> b = FPSort(8, 24)
>>> b.sbits()
24
"""
return self.sort().sbits()
def as_string(self):
"""Return a Z3 floating point expression as a Python string."""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def __le__(self, other):
return fpLEQ(self, other, self.ctx)
def __lt__(self, other):
return fpLT(self, other, self.ctx)
def __ge__(self, other):
return fpGEQ(self, other, self.ctx)
def __gt__(self, other):
return fpGT(self, other, self.ctx)
def __add__(self, other):
"""Create the Z3 expression `self + other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x + y
x + y
>>> (x + y).sort()
FPSort(8, 24)
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpAdd(_dflt_rm(), a, b, self.ctx)
def __radd__(self, other):
"""Create the Z3 expression `other + self`.
>>> x = FP('x', FPSort(8, 24))
>>> 10 + x
1.25*(2**3) + x
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpAdd(_dflt_rm(), a, b, self.ctx)
def __sub__(self, other):
"""Create the Z3 expression `self - other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x - y
x - y
>>> (x - y).sort()
FPSort(8, 24)
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpSub(_dflt_rm(), a, b, self.ctx)
def __rsub__(self, other):
"""Create the Z3 expression `other - self`.
>>> x = FP('x', FPSort(8, 24))
>>> 10 - x
1.25*(2**3) - x
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpSub(_dflt_rm(), a, b, self.ctx)
def __mul__(self, other):
"""Create the Z3 expression `self * other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x * y
x * y
>>> (x * y).sort()
FPSort(8, 24)
>>> 10 * y
1.25*(2**3) * y
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpMul(_dflt_rm(), a, b, self.ctx)
def __rmul__(self, other):
"""Create the Z3 expression `other * self`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x * y
x * y
>>> x * 10
x * 1.25*(2**3)
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpMul(_dflt_rm(), a, b, self.ctx)
def __pos__(self):
"""Create the Z3 expression `+self`."""
return self
def __neg__(self):
"""Create the Z3 expression `-self`.
>>> x = FP('x', Float32())
>>> -x
-x
"""
return fpNeg(self)
def __div__(self, other):
"""Create the Z3 expression `self / other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x / y
x / y
>>> (x / y).sort()
FPSort(8, 24)
>>> 10 / y
1.25*(2**3) / y
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpDiv(_dflt_rm(), a, b, self.ctx)
def __rdiv__(self, other):
"""Create the Z3 expression `other / self`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x / y
x / y
>>> x / 10
x / 1.25*(2**3)
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpDiv(_dflt_rm(), a, b, self.ctx)
def __truediv__(self, other):
"""Create the Z3 expression division `self / other`."""
return self.__div__(other)
def __rtruediv__(self, other):
"""Create the Z3 expression division `other / self`."""
return self.__rdiv__(other)
def __mod__(self, other):
"""Create the Z3 expression mod `self % other`."""
return fpRem(self, other)
def __rmod__(self, other):
"""Create the Z3 expression mod `other % self`."""
return fpRem(other, self)
class FPRMRef(ExprRef):
"""Floating-point rounding mode expressions"""
def as_string(self):
"""Return a Z3 floating point expression as a Python string."""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def RoundNearestTiesToEven(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_even(ctx.ref()), ctx)
def RNE(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_even(ctx.ref()), ctx)
def RoundNearestTiesToAway(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_away(ctx.ref()), ctx)
def RNA(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_away(ctx.ref()), ctx)
def RoundTowardPositive(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_positive(ctx.ref()), ctx)
def RTP(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_positive(ctx.ref()), ctx)
def RoundTowardNegative(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_negative(ctx.ref()), ctx)
def RTN(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_negative(ctx.ref()), ctx)
def RoundTowardZero(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_zero(ctx.ref()), ctx)
def RTZ(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_zero(ctx.ref()), ctx)
def is_fprm(a):
"""Return `True` if `a` is a Z3 floating-point rounding mode expression.
>>> rm = RNE()
>>> is_fprm(rm)
True
>>> rm = 1.0
>>> is_fprm(rm)
False
"""
return isinstance(a, FPRMRef)
def is_fprm_value(a):
"""Return `True` if `a` is a Z3 floating-point rounding mode numeral value."""
return is_fprm(a) and _is_numeral(a.ctx, a.ast)
# FP Numerals
class FPNumRef(FPRef):
"""The sign of the numeral.
>>> x = FPVal(+1.0, FPSort(8, 24))
>>> x.sign()
False
>>> x = FPVal(-1.0, FPSort(8, 24))
>>> x.sign()
True
"""
def sign(self):
num = (ctypes.c_int)()
nsign = Z3_fpa_get_numeral_sign(self.ctx.ref(), self.as_ast(), byref(num))
if nsign is False:
raise Z3Exception("error retrieving the sign of a numeral.")
return num.value != 0
"""The sign of a floating-point numeral as a bit-vector expression.
Remark: NaN's are invalid arguments.
"""
def sign_as_bv(self):
return BitVecNumRef(Z3_fpa_get_numeral_sign_bv(self.ctx.ref(), self.as_ast()), self.ctx)
"""The significand of the numeral.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.significand()
1.25
"""
def significand(self):
return Z3_fpa_get_numeral_significand_string(self.ctx.ref(), self.as_ast())
"""The significand of the numeral as a long.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.significand_as_long()
1.25
"""
def significand_as_long(self):
ptr = (ctypes.c_ulonglong * 1)()
if not Z3_fpa_get_numeral_significand_uint64(self.ctx.ref(), self.as_ast(), ptr):
raise Z3Exception("error retrieving the significand of a numeral.")
return ptr[0]
"""The significand of the numeral as a bit-vector expression.
Remark: NaN are invalid arguments.
"""
def significand_as_bv(self):
return BitVecNumRef(Z3_fpa_get_numeral_significand_bv(self.ctx.ref(), self.as_ast()), self.ctx)
"""The exponent of the numeral.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.exponent()
1
"""
def exponent(self, biased=True):
return Z3_fpa_get_numeral_exponent_string(self.ctx.ref(), self.as_ast(), biased)
"""The exponent of the numeral as a long.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.exponent_as_long()
1
"""
def exponent_as_long(self, biased=True):
ptr = (ctypes.c_longlong * 1)()
if not Z3_fpa_get_numeral_exponent_int64(self.ctx.ref(), self.as_ast(), ptr, biased):
raise Z3Exception("error retrieving the exponent of a numeral.")
return ptr[0]
"""The exponent of the numeral as a bit-vector expression.
Remark: NaNs are invalid arguments.
"""
def exponent_as_bv(self, biased=True):
return BitVecNumRef(Z3_fpa_get_numeral_exponent_bv(self.ctx.ref(), self.as_ast(), biased), self.ctx)
"""Indicates whether the numeral is a NaN."""
def isNaN(self):
return Z3_fpa_is_numeral_nan(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is +oo or -oo."""
def isInf(self):
return Z3_fpa_is_numeral_inf(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is +zero or -zero."""
def isZero(self):
return Z3_fpa_is_numeral_zero(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is normal."""
def isNormal(self):
return Z3_fpa_is_numeral_normal(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is subnormal."""
def isSubnormal(self):
return Z3_fpa_is_numeral_subnormal(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is positive."""
def isPositive(self):
return Z3_fpa_is_numeral_positive(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is negative."""
def isNegative(self):
return Z3_fpa_is_numeral_negative(self.ctx.ref(), self.as_ast())
"""
The string representation of the numeral.
>>> x = FPVal(20, FPSort(8, 24))
>>> x.as_string()
1.25*(2**4)
"""
def as_string(self):
s = Z3_get_numeral_string(self.ctx.ref(), self.as_ast())
return ("FPVal(%s, %s)" % (s, self.sort()))
def is_fp(a):
"""Return `True` if `a` is a Z3 floating-point expression.
>>> b = FP('b', FPSort(8, 24))
>>> is_fp(b)
True
>>> is_fp(b + 1.0)
True
>>> is_fp(Int('x'))
False
"""
return isinstance(a, FPRef)
def is_fp_value(a):
"""Return `True` if `a` is a Z3 floating-point numeral value.
>>> b = FP('b', FPSort(8, 24))
>>> is_fp_value(b)
False
>>> b = FPVal(1.0, FPSort(8, 24))
>>> b
1
>>> is_fp_value(b)
True
"""
return is_fp(a) and _is_numeral(a.ctx, a.ast)
def FPSort(ebits, sbits, ctx=None):
"""Return a Z3 floating-point sort of the given sizes. If `ctx=None`, then the global context is used.
>>> Single = FPSort(8, 24)
>>> Double = FPSort(11, 53)
>>> Single
FPSort(8, 24)
>>> x = Const('x', Single)
>>> eq(x, FP('x', FPSort(8, 24)))
True
"""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort(ctx.ref(), ebits, sbits), ctx)
def _to_float_str(val, exp=0):
if isinstance(val, float):
if math.isnan(val):
res = "NaN"
elif val == 0.0:
sone = math.copysign(1.0, val)
if sone < 0.0:
return "-0.0"
else:
return "+0.0"
elif val == float("+inf"):
res = "+oo"
elif val == float("-inf"):
res = "-oo"
else:
v = val.as_integer_ratio()
num = v[0]
den = v[1]
rvs = str(num) + "/" + str(den)
res = rvs + "p" + _to_int_str(exp)
elif isinstance(val, bool):
if val:
res = "1.0"
else:
res = "0.0"
elif _is_int(val):
res = str(val)
elif isinstance(val, str):
inx = val.find("*(2**")
if inx == -1:
res = val
elif val[-1] == ")":
res = val[0:inx]
exp = str(int(val[inx + 5:-1]) + int(exp))
else:
_z3_assert(False, "String does not have floating-point numeral form.")
elif z3_debug():
_z3_assert(False, "Python value cannot be used to create floating-point numerals.")
if exp == 0:
return res
else:
return res + "p" + exp
def fpNaN(s):
"""Create a Z3 floating-point NaN term.
>>> s = FPSort(8, 24)
>>> set_fpa_pretty(True)
>>> fpNaN(s)
NaN
>>> pb = get_fpa_pretty()
>>> set_fpa_pretty(False)
>>> fpNaN(s)
fpNaN(FPSort(8, 24))
>>> set_fpa_pretty(pb)
"""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_nan(s.ctx_ref(), s.ast), s.ctx)
def fpPlusInfinity(s):
"""Create a Z3 floating-point +oo term.
>>> s = FPSort(8, 24)
>>> pb = get_fpa_pretty()
>>> set_fpa_pretty(True)
>>> fpPlusInfinity(s)
+oo
>>> set_fpa_pretty(False)
>>> fpPlusInfinity(s)
fpPlusInfinity(FPSort(8, 24))
>>> set_fpa_pretty(pb)
"""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, False), s.ctx)
def fpMinusInfinity(s):
"""Create a Z3 floating-point -oo term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, True), s.ctx)
def fpInfinity(s, negative):
"""Create a Z3 floating-point +oo or -oo term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
_z3_assert(isinstance(negative, bool), "expected Boolean flag")
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, negative), s.ctx)
def fpPlusZero(s):
"""Create a Z3 floating-point +0.0 term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, False), s.ctx)
def fpMinusZero(s):
"""Create a Z3 floating-point -0.0 term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, True), s.ctx)
def fpZero(s, negative):
"""Create a Z3 floating-point +0.0 or -0.0 term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
_z3_assert(isinstance(negative, bool), "expected Boolean flag")
return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, negative), s.ctx)
def FPVal(sig, exp=None, fps=None, ctx=None):
"""Return a floating-point value of value `val` and sort `fps`.
If `ctx=None`, then the global context is used.
>>> v = FPVal(20.0, FPSort(8, 24))
>>> v
1.25*(2**4)
>>> print("0x%.8x" % v.exponent_as_long(False))
0x00000004
>>> v = FPVal(2.25, FPSort(8, 24))
>>> v
1.125*(2**1)
>>> v = FPVal(-2.25, FPSort(8, 24))
>>> v
-1.125*(2**1)
>>> FPVal(-0.0, FPSort(8, 24))
-0.0
>>> FPVal(0.0, FPSort(8, 24))
+0.0
>>> FPVal(+0.0, FPSort(8, 24))
+0.0
"""
ctx = _get_ctx(ctx)
if is_fp_sort(exp):
fps = exp
exp = None
elif fps is None:
fps = _dflt_fps(ctx)
_z3_assert(is_fp_sort(fps), "sort mismatch")
if exp is None:
exp = 0
val = _to_float_str(sig)
if val == "NaN" or val == "nan":
return fpNaN(fps)
elif val == "-0.0":
return fpMinusZero(fps)
elif val == "0.0" or val == "+0.0":
return fpPlusZero(fps)
elif val == "+oo" or val == "+inf" or val == "+Inf":
return fpPlusInfinity(fps)
elif val == "-oo" or val == "-inf" or val == "-Inf":
return fpMinusInfinity(fps)
else:
return FPNumRef(Z3_mk_numeral(ctx.ref(), val, fps.ast), ctx)
def FP(name, fpsort, ctx=None):
"""Return a floating-point constant named `name`.
`fpsort` is the floating-point sort.
If `ctx=None`, then the global context is used.
>>> x = FP('x', FPSort(8, 24))
>>> is_fp(x)
True
>>> x.ebits()
8
>>> x.sort()
FPSort(8, 24)
>>> word = FPSort(8, 24)
>>> x2 = FP('x', word)
>>> eq(x, x2)
True
"""
if isinstance(fpsort, FPSortRef) and ctx is None:
ctx = fpsort.ctx
else:
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), fpsort.ast), ctx)
def FPs(names, fpsort, ctx=None):
"""Return an array of floating-point constants.
>>> x, y, z = FPs('x y z', FPSort(8, 24))
>>> x.sort()
FPSort(8, 24)
>>> x.sbits()
24
>>> x.ebits()
8
>>> fpMul(RNE(), fpAdd(RNE(), x, y), z)
fpMul(RNE(), fpAdd(RNE(), x, y), z)
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [FP(name, fpsort, ctx) for name in names]
def fpAbs(a, ctx=None):
"""Create a Z3 floating-point absolute value expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FPVal(1.0, s)
>>> fpAbs(x)
fpAbs(1)
>>> y = FPVal(-20.0, s)
>>> y
-1.25*(2**4)
>>> fpAbs(y)
fpAbs(-1.25*(2**4))
>>> fpAbs(-1.25*(2**4))
fpAbs(-1.25*(2**4))
>>> fpAbs(x).sort()
FPSort(8, 24)
"""
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
return FPRef(Z3_mk_fpa_abs(ctx.ref(), a.as_ast()), ctx)
def fpNeg(a, ctx=None):
"""Create a Z3 floating-point addition expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> fpNeg(x)
-x
>>> fpNeg(x).sort()
FPSort(8, 24)
"""
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
return FPRef(Z3_mk_fpa_neg(ctx.ref(), a.as_ast()), ctx)
def _mk_fp_unary(f, rm, a, ctx):
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(a), "Second argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast()), ctx)
def _mk_fp_unary_pred(f, a, ctx):
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
if z3_debug():
_z3_assert(is_fp(a), "First argument must be a Z3 floating-point expression")
return BoolRef(f(ctx.ref(), a.as_ast()), ctx)
def _mk_fp_bin(f, rm, a, b, ctx):
ctx = _get_ctx(ctx)
[a, b] = _coerce_fp_expr_list([a, b], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(a) or is_fp(b), "Second or third argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast(), b.as_ast()), ctx)
def _mk_fp_bin_norm(f, a, b, ctx):
ctx = _get_ctx(ctx)
[a, b] = _coerce_fp_expr_list([a, b], ctx)
if z3_debug():
_z3_assert(is_fp(a) or is_fp(b), "First or second argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def _mk_fp_bin_pred(f, a, b, ctx):
ctx = _get_ctx(ctx)
[a, b] = _coerce_fp_expr_list([a, b], ctx)
if z3_debug():
_z3_assert(is_fp(a) or is_fp(b), "First or second argument must be a Z3 floating-point expression")
return BoolRef(f(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def _mk_fp_tern(f, rm, a, b, c, ctx):
ctx = _get_ctx(ctx)
[a, b, c] = _coerce_fp_expr_list([a, b, c], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(a) or is_fp(b) or is_fp(
c), "Second, third or fourth argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast(), b.as_ast(), c.as_ast()), ctx)
def fpAdd(rm, a, b, ctx=None):
"""Create a Z3 floating-point addition expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpAdd(rm, x, y)
fpAdd(RNE(), x, y)
>>> fpAdd(RTZ(), x, y) # default rounding mode is RTZ
x + y
>>> fpAdd(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_add, rm, a, b, ctx)
def fpSub(rm, a, b, ctx=None):
"""Create a Z3 floating-point subtraction expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpSub(rm, x, y)
fpSub(RNE(), x, y)
>>> fpSub(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_sub, rm, a, b, ctx)
def fpMul(rm, a, b, ctx=None):
"""Create a Z3 floating-point multiplication expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpMul(rm, x, y)
fpMul(RNE(), x, y)
>>> fpMul(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_mul, rm, a, b, ctx)
def fpDiv(rm, a, b, ctx=None):
"""Create a Z3 floating-point division expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpDiv(rm, x, y)
fpDiv(RNE(), x, y)
>>> fpDiv(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_div, rm, a, b, ctx)
def fpRem(a, b, ctx=None):
"""Create a Z3 floating-point remainder expression.
>>> s = FPSort(8, 24)
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpRem(x, y)
fpRem(x, y)
>>> fpRem(x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin_norm(Z3_mk_fpa_rem, a, b, ctx)
def fpMin(a, b, ctx=None):
"""Create a Z3 floating-point minimum expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpMin(x, y)
fpMin(x, y)
>>> fpMin(x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin_norm(Z3_mk_fpa_min, a, b, ctx)
def fpMax(a, b, ctx=None):
"""Create a Z3 floating-point maximum expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpMax(x, y)
fpMax(x, y)
>>> fpMax(x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin_norm(Z3_mk_fpa_max, a, b, ctx)
def fpFMA(rm, a, b, c, ctx=None):
"""Create a Z3 floating-point fused multiply-add expression.
"""
return _mk_fp_tern(Z3_mk_fpa_fma, rm, a, b, c, ctx)
def fpSqrt(rm, a, ctx=None):
"""Create a Z3 floating-point square root expression.
"""
return _mk_fp_unary(Z3_mk_fpa_sqrt, rm, a, ctx)
def fpRoundToIntegral(rm, a, ctx=None):
"""Create a Z3 floating-point roundToIntegral expression.
"""
return _mk_fp_unary(Z3_mk_fpa_round_to_integral, rm, a, ctx)
def fpIsNaN(a, ctx=None):
"""Create a Z3 floating-point isNaN expression.
>>> s = FPSort(8, 24)
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpIsNaN(x)
fpIsNaN(x)
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_nan, a, ctx)
def fpIsInf(a, ctx=None):
"""Create a Z3 floating-point isInfinite expression.
>>> s = FPSort(8, 24)
>>> x = FP('x', s)
>>> fpIsInf(x)
fpIsInf(x)
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_infinite, a, ctx)
def fpIsZero(a, ctx=None):
"""Create a Z3 floating-point isZero expression.
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_zero, a, ctx)
def fpIsNormal(a, ctx=None):
"""Create a Z3 floating-point isNormal expression.
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_normal, a, ctx)
def fpIsSubnormal(a, ctx=None):
"""Create a Z3 floating-point isSubnormal expression.
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_subnormal, a, ctx)
def fpIsNegative(a, ctx=None):
"""Create a Z3 floating-point isNegative expression.
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_negative, a, ctx)
def fpIsPositive(a, ctx=None):
"""Create a Z3 floating-point isPositive expression.
"""
return _mk_fp_unary_pred(Z3_mk_fpa_is_positive, a, ctx)
def _check_fp_args(a, b):
if z3_debug():
_z3_assert(is_fp(a) or is_fp(b), "First or second argument must be a Z3 floating-point expression")
def fpLT(a, b, ctx=None):
"""Create the Z3 floating-point expression `other < self`.
>>> x, y = FPs('x y', FPSort(8, 24))
>>> fpLT(x, y)
x < y
>>> (x < y).sexpr()
'(fp.lt x y)'
"""
return _mk_fp_bin_pred(Z3_mk_fpa_lt, a, b, ctx)
def fpLEQ(a, b, ctx=None):
"""Create the Z3 floating-point expression `other <= self`.
>>> x, y = FPs('x y', FPSort(8, 24))
>>> fpLEQ(x, y)
x <= y
>>> (x <= y).sexpr()
'(fp.leq x y)'
"""
return _mk_fp_bin_pred(Z3_mk_fpa_leq, a, b, ctx)
def fpGT(a, b, ctx=None):
"""Create the Z3 floating-point expression `other > self`.
>>> x, y = FPs('x y', FPSort(8, 24))
>>> fpGT(x, y)
x > y
>>> (x > y).sexpr()
'(fp.gt x y)'
"""
return _mk_fp_bin_pred(Z3_mk_fpa_gt, a, b, ctx)
def fpGEQ(a, b, ctx=None):
"""Create the Z3 floating-point expression `other >= self`.
>>> x, y = FPs('x y', FPSort(8, 24))
>>> fpGEQ(x, y)
x >= y
>>> (x >= y).sexpr()
'(fp.geq x y)'
"""
return _mk_fp_bin_pred(Z3_mk_fpa_geq, a, b, ctx)
def fpEQ(a, b, ctx=None):
"""Create the Z3 floating-point expression `fpEQ(other, self)`.
>>> x, y = FPs('x y', FPSort(8, 24))
>>> fpEQ(x, y)
fpEQ(x, y)
>>> fpEQ(x, y).sexpr()
'(fp.eq x y)'
"""
return _mk_fp_bin_pred(Z3_mk_fpa_eq, a, b, ctx)
def fpNEQ(a, b, ctx=None):
"""Create the Z3 floating-point expression `Not(fpEQ(other, self))`.
>>> x, y = FPs('x y', FPSort(8, 24))
>>> fpNEQ(x, y)
Not(fpEQ(x, y))
>>> (x != y).sexpr()
'(distinct x y)'
"""
return Not(fpEQ(a, b, ctx))
def fpFP(sgn, exp, sig, ctx=None):
"""Create the Z3 floating-point value `fpFP(sgn, sig, exp)` from the three bit-vectors sgn, sig, and exp.
>>> s = FPSort(8, 24)
>>> x = fpFP(BitVecVal(1, 1), BitVecVal(2**7-1, 8), BitVecVal(2**22, 23))
>>> print(x)
fpFP(1, 127, 4194304)
>>> xv = FPVal(-1.5, s)
>>> print(xv)
-1.5
>>> slvr = Solver()
>>> slvr.add(fpEQ(x, xv))
>>> slvr.check()
sat
>>> xv = FPVal(+1.5, s)
>>> print(xv)
1.5
>>> slvr = Solver()
>>> slvr.add(fpEQ(x, xv))
>>> slvr.check()
unsat
"""
_z3_assert(is_bv(sgn) and is_bv(exp) and is_bv(sig), "sort mismatch")
_z3_assert(sgn.sort().size() == 1, "sort mismatch")
ctx = _get_ctx(ctx)
_z3_assert(ctx == sgn.ctx == exp.ctx == sig.ctx, "context mismatch")
return FPRef(Z3_mk_fpa_fp(ctx.ref(), sgn.ast, exp.ast, sig.ast), ctx)
def fpToFP(a1, a2=None, a3=None, ctx=None):
"""Create a Z3 floating-point conversion expression from other term sorts
to floating-point.
From a bit-vector term in IEEE 754-2008 format:
>>> x = FPVal(1.0, Float32())
>>> x_bv = fpToIEEEBV(x)
>>> simplify(fpToFP(x_bv, Float32()))
1
From a floating-point term with different precision:
>>> x = FPVal(1.0, Float32())
>>> x_db = fpToFP(RNE(), x, Float64())
>>> x_db.sort()
FPSort(11, 53)
From a real term:
>>> x_r = RealVal(1.5)
>>> simplify(fpToFP(RNE(), x_r, Float32()))
1.5
From a signed bit-vector term:
>>> x_signed = BitVecVal(-5, BitVecSort(32))
>>> simplify(fpToFP(RNE(), x_signed, Float32()))
-1.25*(2**2)
"""
ctx = _get_ctx(ctx)
if is_bv(a1) and is_fp_sort(a2):
return FPRef(Z3_mk_fpa_to_fp_bv(ctx.ref(), a1.ast, a2.ast), ctx)
elif is_fprm(a1) and is_fp(a2) and is_fp_sort(a3):
return FPRef(Z3_mk_fpa_to_fp_float(ctx.ref(), a1.ast, a2.ast, a3.ast), ctx)
elif is_fprm(a1) and is_real(a2) and is_fp_sort(a3):
return FPRef(Z3_mk_fpa_to_fp_real(ctx.ref(), a1.ast, a2.ast, a3.ast), ctx)
elif is_fprm(a1) and is_bv(a2) and is_fp_sort(a3):
return FPRef(Z3_mk_fpa_to_fp_signed(ctx.ref(), a1.ast, a2.ast, a3.ast), ctx)
else:
raise Z3Exception("Unsupported combination of arguments for conversion to floating-point term.")
def fpBVToFP(v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a bit-vector term to a floating-point term.
>>> x_bv = BitVecVal(0x3F800000, 32)
>>> x_fp = fpBVToFP(x_bv, Float32())
>>> x_fp
fpToFP(1065353216)
>>> simplify(x_fp)
1
"""
_z3_assert(is_bv(v), "First argument must be a Z3 bit-vector expression")
_z3_assert(is_fp_sort(sort), "Second argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_bv(ctx.ref(), v.ast, sort.ast), ctx)
def fpFPToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a floating-point term to a floating-point term of different precision.
>>> x_sgl = FPVal(1.0, Float32())
>>> x_dbl = fpFPToFP(RNE(), x_sgl, Float64())
>>> x_dbl
fpToFP(RNE(), 1)
>>> simplify(x_dbl)
1
>>> x_dbl.sort()
FPSort(11, 53)
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_fp(v), "Second argument must be a Z3 floating-point expression.")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_float(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpRealToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a real term to a floating-point term.
>>> x_r = RealVal(1.5)
>>> x_fp = fpRealToFP(RNE(), x_r, Float32())
>>> x_fp
fpToFP(RNE(), 3/2)
>>> simplify(x_fp)
1.5
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_real(v), "Second argument must be a Z3 expression or real sort.")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_real(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpSignedToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a signed bit-vector term (encoding an integer) to a floating-point term.
>>> x_signed = BitVecVal(-5, BitVecSort(32))
>>> x_fp = fpSignedToFP(RNE(), x_signed, Float32())
>>> x_fp
fpToFP(RNE(), 4294967291)
>>> simplify(x_fp)
-1.25*(2**2)
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_bv(v), "Second argument must be a Z3 bit-vector expression")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_signed(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpUnsignedToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from an unsigned bit-vector term (encoding an integer) to a floating-point term.
>>> x_signed = BitVecVal(-5, BitVecSort(32))
>>> x_fp = fpUnsignedToFP(RNE(), x_signed, Float32())
>>> x_fp
fpToFPUnsigned(RNE(), 4294967291)
>>> simplify(x_fp)
1*(2**32)
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_bv(v), "Second argument must be a Z3 bit-vector expression")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_unsigned(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpToFPUnsigned(rm, x, s, ctx=None):
"""Create a Z3 floating-point conversion expression, from unsigned bit-vector to floating-point expression."""
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_bv(x), "Second argument must be a Z3 bit-vector expression")
_z3_assert(is_fp_sort(s), "Third argument must be Z3 floating-point sort")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_unsigned(ctx.ref(), rm.ast, x.ast, s.ast), ctx)
def fpToSBV(rm, x, s, ctx=None):
"""Create a Z3 floating-point conversion expression, from floating-point expression to signed bit-vector.
>>> x = FP('x', FPSort(8, 24))
>>> y = fpToSBV(RTZ(), x, BitVecSort(32))
>>> print(is_fp(x))
True
>>> print(is_bv(y))
True
>>> print(is_fp(y))
False
>>> print(is_bv(x))
False
"""
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(x), "Second argument must be a Z3 floating-point expression")
_z3_assert(is_bv_sort(s), "Third argument must be Z3 bit-vector sort")
ctx = _get_ctx(ctx)
return BitVecRef(Z3_mk_fpa_to_sbv(ctx.ref(), rm.ast, x.ast, s.size()), ctx)
def fpToUBV(rm, x, s, ctx=None):
"""Create a Z3 floating-point conversion expression, from floating-point expression to unsigned bit-vector.
>>> x = FP('x', FPSort(8, 24))
>>> y = fpToUBV(RTZ(), x, BitVecSort(32))
>>> print(is_fp(x))
True
>>> print(is_bv(y))
True
>>> print(is_fp(y))
False
>>> print(is_bv(x))
False
"""
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(x), "Second argument must be a Z3 floating-point expression")
_z3_assert(is_bv_sort(s), "Third argument must be Z3 bit-vector sort")
ctx = _get_ctx(ctx)
return BitVecRef(Z3_mk_fpa_to_ubv(ctx.ref(), rm.ast, x.ast, s.size()), ctx)
def fpToReal(x, ctx=None):
"""Create a Z3 floating-point conversion expression, from floating-point expression to real.
>>> x = FP('x', FPSort(8, 24))
>>> y = fpToReal(x)
>>> print(is_fp(x))
True
>>> print(is_real(y))
True
>>> print(is_fp(y))
False
>>> print(is_real(x))
False
"""
if z3_debug():
_z3_assert(is_fp(x), "First argument must be a Z3 floating-point expression")
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_fpa_to_real(ctx.ref(), x.ast), ctx)
def fpToIEEEBV(x, ctx=None):
"""\brief Conversion of a floating-point term into a bit-vector term in IEEE 754-2008 format.
The size of the resulting bit-vector is automatically determined.
Note that IEEE 754-2008 allows multiple different representations of NaN. This conversion
knows only one NaN and it will always produce the same bit-vector representation of
that NaN.
>>> x = FP('x', FPSort(8, 24))
>>> y = fpToIEEEBV(x)
>>> print(is_fp(x))
True
>>> print(is_bv(y))
True
>>> print(is_fp(y))
False
>>> print(is_bv(x))
False
"""
if z3_debug():
_z3_assert(is_fp(x), "First argument must be a Z3 floating-point expression")
ctx = _get_ctx(ctx)
return BitVecRef(Z3_mk_fpa_to_ieee_bv(ctx.ref(), x.ast), ctx)
#########################################
#
# Strings, Sequences and Regular expressions
#
#########################################
class SeqSortRef(SortRef):
"""Sequence sort."""
def is_string(self):
"""Determine if sort is a string
>>> s = StringSort()
>>> s.is_string()
True
>>> s = SeqSort(IntSort())
>>> s.is_string()
False
"""
return Z3_is_string_sort(self.ctx_ref(), self.ast)
def basis(self):
return _to_sort_ref(Z3_get_seq_sort_basis(self.ctx_ref(), self.ast), self.ctx)
class CharSortRef(SortRef):
"""Character sort."""
def StringSort(ctx=None):
"""Create a string sort
>>> s = StringSort()
>>> print(s)
String
"""
ctx = _get_ctx(ctx)
return SeqSortRef(Z3_mk_string_sort(ctx.ref()), ctx)
def CharSort(ctx=None):
"""Create a character sort
>>> ch = CharSort()
>>> print(ch)
Char
"""
ctx = _get_ctx(ctx)
return CharSortRef(Z3_mk_char_sort(ctx.ref()), ctx)
def SeqSort(s):
"""Create a sequence sort over elements provided in the argument
>>> s = SeqSort(IntSort())
>>> s == Unit(IntVal(1)).sort()
True
"""
return SeqSortRef(Z3_mk_seq_sort(s.ctx_ref(), s.ast), s.ctx)
class SeqRef(ExprRef):
"""Sequence expression."""
def sort(self):
return SeqSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def __add__(self, other):
return Concat(self, other)
def __radd__(self, other):
return Concat(other, self)
def __getitem__(self, i):
if _is_int(i):
i = IntVal(i, self.ctx)
return _to_expr_ref(Z3_mk_seq_nth(self.ctx_ref(), self.as_ast(), i.as_ast()), self.ctx)
def at(self, i):
if _is_int(i):
i = IntVal(i, self.ctx)
return SeqRef(Z3_mk_seq_at(self.ctx_ref(), self.as_ast(), i.as_ast()), self.ctx)
def is_string(self):
return Z3_is_string_sort(self.ctx_ref(), Z3_get_sort(self.ctx_ref(), self.as_ast()))
def is_string_value(self):
return Z3_is_string(self.ctx_ref(), self.as_ast())
def as_string(self):
"""Return a string representation of sequence expression."""
if self.is_string_value():
string_length = ctypes.c_uint()
chars = Z3_get_lstring(self.ctx_ref(), self.as_ast(), byref(string_length))
return string_at(chars, size=string_length.value).decode("latin-1")
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def __le__(self, other):
return _to_expr_ref(Z3_mk_str_le(self.ctx_ref(), self.as_ast(), other.as_ast()), self.ctx)
def __lt__(self, other):
return _to_expr_ref(Z3_mk_str_lt(self.ctx_ref(), self.as_ast(), other.as_ast()), self.ctx)
def __ge__(self, other):
return _to_expr_ref(Z3_mk_str_le(self.ctx_ref(), other.as_ast(), self.as_ast()), self.ctx)
def __gt__(self, other):
return _to_expr_ref(Z3_mk_str_lt(self.ctx_ref(), other.as_ast(), self.as_ast()), self.ctx)
def _coerce_char(ch, ctx=None):
if isinstance(ch, str):
ctx = _get_ctx(ctx)
ch = CharVal(ch, ctx)
if not is_expr(ch):
raise Z3Exception("Character expression expected")
return ch
class CharRef(ExprRef):
"""Character expression."""
def __le__(self, other):
other = _coerce_char(other, self.ctx)
return _to_expr_ref(Z3_mk_char_le(self.ctx_ref(), self.as_ast(), other.as_ast()), self.ctx)
def to_int(self):
return _to_expr_ref(Z3_mk_char_to_int(self.ctx_ref(), self.as_ast()), self.ctx)
def to_bv(self):
return _to_expr_ref(Z3_mk_char_to_bv(self.ctx_ref(), self.as_ast()), self.ctx)
def is_digit(self):
return _to_expr_ref(Z3_mk_char_is_digit(self.ctx_ref(), self.as_ast()), self.ctx)
def CharVal(ch, ctx=None):
ctx = _get_ctx(ctx)
if isinstance(ch, str):
ch = ord(ch)
if not isinstance(ch, int):
raise Z3Exception("character value should be an ordinal")
return _to_expr_ref(Z3_mk_char(ctx.ref(), ch), ctx)
def CharFromBv(ch, ctx=None):
if not is_expr(ch):
raise Z3Expression("Bit-vector expression needed")
return _to_expr_ref(Z3_mk_char_from_bv(ch.ctx_ref(), ch.as_ast()), ch.ctx)
def CharToBv(ch, ctx=None):
ch = _coerce_char(ch, ctx)
return ch.to_bv()
def CharToInt(ch, ctx=None):
ch = _coerce_char(ch, ctx)
return ch.to_int()
def CharIsDigit(ch, ctx=None):
ch = _coerce_char(ch, ctx)
return ch.is_digit()
def _coerce_seq(s, ctx=None):
if isinstance(s, str):
ctx = _get_ctx(ctx)
s = StringVal(s, ctx)
if not is_expr(s):
raise Z3Exception("Non-expression passed as a sequence")
if not is_seq(s):
raise Z3Exception("Non-sequence passed as a sequence")
return s
def _get_ctx2(a, b, ctx=None):
if is_expr(a):
return a.ctx
if is_expr(b):
return b.ctx
if ctx is None:
ctx = main_ctx()
return ctx
def is_seq(a):
"""Return `True` if `a` is a Z3 sequence expression.
>>> print (is_seq(Unit(IntVal(0))))
True
>>> print (is_seq(StringVal("abc")))
True
"""
return isinstance(a, SeqRef)
def is_string(a):
"""Return `True` if `a` is a Z3 string expression.
>>> print (is_string(StringVal("ab")))
True
"""
return isinstance(a, SeqRef) and a.is_string()
def is_string_value(a):
"""return 'True' if 'a' is a Z3 string constant expression.
>>> print (is_string_value(StringVal("a")))
True
>>> print (is_string_value(StringVal("a") + StringVal("b")))
False
"""
return isinstance(a, SeqRef) and a.is_string_value()
def StringVal(s, ctx=None):
"""create a string expression"""
s = "".join(str(ch) if 32 <= ord(ch) and ord(ch) < 127 else "\\u{%x}" % (ord(ch)) for ch in s)
ctx = _get_ctx(ctx)
return SeqRef(Z3_mk_string(ctx.ref(), s), ctx)
def String(name, ctx=None):
"""Return a string constant named `name`. If `ctx=None`, then the global context is used.
>>> x = String('x')
"""
ctx = _get_ctx(ctx)
return SeqRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), StringSort(ctx).ast), ctx)
def Strings(names, ctx=None):
"""Return a tuple of String constants. """
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [String(name, ctx) for name in names]
def SubString(s, offset, length):
"""Extract substring or subsequence starting at offset"""
return Extract(s, offset, length)
def SubSeq(s, offset, length):
"""Extract substring or subsequence starting at offset"""
return Extract(s, offset, length)
def Empty(s):
"""Create the empty sequence of the given sort
>>> e = Empty(StringSort())
>>> e2 = StringVal("")
>>> print(e.eq(e2))
True
>>> e3 = Empty(SeqSort(IntSort()))
>>> print(e3)
Empty(Seq(Int))
>>> e4 = Empty(ReSort(SeqSort(IntSort())))
>>> print(e4)
Empty(ReSort(Seq(Int)))
"""
if isinstance(s, SeqSortRef):
return SeqRef(Z3_mk_seq_empty(s.ctx_ref(), s.ast), s.ctx)
if isinstance(s, ReSortRef):
return ReRef(Z3_mk_re_empty(s.ctx_ref(), s.ast), s.ctx)
raise Z3Exception("Non-sequence, non-regular expression sort passed to Empty")
def Full(s):
"""Create the regular expression that accepts the universal language
>>> e = Full(ReSort(SeqSort(IntSort())))
>>> print(e)
Full(ReSort(Seq(Int)))
>>> e1 = Full(ReSort(StringSort()))
>>> print(e1)
Full(ReSort(String))
"""
if isinstance(s, ReSortRef):
return ReRef(Z3_mk_re_full(s.ctx_ref(), s.ast), s.ctx)
raise Z3Exception("Non-sequence, non-regular expression sort passed to Full")
def Unit(a):
"""Create a singleton sequence"""
return SeqRef(Z3_mk_seq_unit(a.ctx_ref(), a.as_ast()), a.ctx)
def PrefixOf(a, b):
"""Check if 'a' is a prefix of 'b'
>>> s1 = PrefixOf("ab", "abc")
>>> simplify(s1)
True
>>> s2 = PrefixOf("bc", "abc")
>>> simplify(s2)
False
"""
ctx = _get_ctx2(a, b)
a = _coerce_seq(a, ctx)
b = _coerce_seq(b, ctx)
return BoolRef(Z3_mk_seq_prefix(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def SuffixOf(a, b):
"""Check if 'a' is a suffix of 'b'
>>> s1 = SuffixOf("ab", "abc")
>>> simplify(s1)
False
>>> s2 = SuffixOf("bc", "abc")
>>> simplify(s2)
True
"""
ctx = _get_ctx2(a, b)
a = _coerce_seq(a, ctx)
b = _coerce_seq(b, ctx)
return BoolRef(Z3_mk_seq_suffix(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def Contains(a, b):
"""Check if 'a' contains 'b'
>>> s1 = Contains("abc", "ab")
>>> simplify(s1)
True
>>> s2 = Contains("abc", "bc")
>>> simplify(s2)
True
>>> x, y, z = Strings('x y z')
>>> s3 = Contains(Concat(x,y,z), y)
>>> simplify(s3)
True
"""
ctx = _get_ctx2(a, b)
a = _coerce_seq(a, ctx)
b = _coerce_seq(b, ctx)
return BoolRef(Z3_mk_seq_contains(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def Replace(s, src, dst):
"""Replace the first occurrence of 'src' by 'dst' in 's'
>>> r = Replace("aaa", "a", "b")
>>> simplify(r)
"baa"
"""
ctx = _get_ctx2(dst, s)
if ctx is None and is_expr(src):
ctx = src.ctx
src = _coerce_seq(src, ctx)
dst = _coerce_seq(dst, ctx)
s = _coerce_seq(s, ctx)
return SeqRef(Z3_mk_seq_replace(src.ctx_ref(), s.as_ast(), src.as_ast(), dst.as_ast()), s.ctx)
def IndexOf(s, substr, offset=None):
"""Retrieve the index of substring within a string starting at a specified offset.
>>> simplify(IndexOf("abcabc", "bc", 0))
1
>>> simplify(IndexOf("abcabc", "bc", 2))
4
"""
if offset is None:
offset = IntVal(0)
ctx = None
if is_expr(offset):
ctx = offset.ctx
ctx = _get_ctx2(s, substr, ctx)
s = _coerce_seq(s, ctx)
substr = _coerce_seq(substr, ctx)
if _is_int(offset):
offset = IntVal(offset, ctx)
return ArithRef(Z3_mk_seq_index(s.ctx_ref(), s.as_ast(), substr.as_ast(), offset.as_ast()), s.ctx)
def LastIndexOf(s, substr):
"""Retrieve the last index of substring within a string"""
ctx = None
ctx = _get_ctx2(s, substr, ctx)
s = _coerce_seq(s, ctx)
substr = _coerce_seq(substr, ctx)
return ArithRef(Z3_mk_seq_last_index(s.ctx_ref(), s.as_ast(), substr.as_ast()), s.ctx)
def Length(s):
"""Obtain the length of a sequence 's'
>>> l = Length(StringVal("abc"))
>>> simplify(l)
3
"""
s = _coerce_seq(s)
return ArithRef(Z3_mk_seq_length(s.ctx_ref(), s.as_ast()), s.ctx)
def StrToInt(s):
"""Convert string expression to integer
>>> a = StrToInt("1")
>>> simplify(1 == a)
True
>>> b = StrToInt("2")
>>> simplify(1 == b)
False
>>> c = StrToInt(IntToStr(2))
>>> simplify(1 == c)
False
"""
s = _coerce_seq(s)
return ArithRef(Z3_mk_str_to_int(s.ctx_ref(), s.as_ast()), s.ctx)
def IntToStr(s):
"""Convert integer expression to string"""
if not is_expr(s):
s = _py2expr(s)
return SeqRef(Z3_mk_int_to_str(s.ctx_ref(), s.as_ast()), s.ctx)
def StrToCode(s):
"""Convert a unit length string to integer code"""
if not is_expr(s):
s = _py2expr(s)
return ArithRef(Z3_mk_string_to_code(s.ctx_ref(), s.as_ast()), s.ctx)
def StrFromCode(c):
"""Convert code to a string"""
if not is_expr(c):
c = _py2expr(c)
return SeqRef(Z3_mk_string_from_code(c.ctx_ref(), c.as_ast()), c.ctx)
def Re(s, ctx=None):
"""The regular expression that accepts sequence 's'
>>> s1 = Re("ab")
>>> s2 = Re(StringVal("ab"))
>>> s3 = Re(Unit(BoolVal(True)))
"""
s = _coerce_seq(s, ctx)
return ReRef(Z3_mk_seq_to_re(s.ctx_ref(), s.as_ast()), s.ctx)
# Regular expressions
class ReSortRef(SortRef):
"""Regular expression sort."""
def basis(self):
return _to_sort_ref(Z3_get_re_sort_basis(self.ctx_ref(), self.ast), self.ctx)
def ReSort(s):
if is_ast(s):
return ReSortRef(Z3_mk_re_sort(s.ctx.ref(), s.ast), s.ctx)
if s is None or isinstance(s, Context):
ctx = _get_ctx(s)
return ReSortRef(Z3_mk_re_sort(ctx.ref(), Z3_mk_string_sort(ctx.ref())), s.ctx)
raise Z3Exception("Regular expression sort constructor expects either a string or a context or no argument")
class ReRef(ExprRef):
"""Regular expressions."""
def __add__(self, other):
return Union(self, other)
def is_re(s):
return isinstance(s, ReRef)
def InRe(s, re):
"""Create regular expression membership test
>>> re = Union(Re("a"),Re("b"))
>>> print (simplify(InRe("a", re)))
True
>>> print (simplify(InRe("b", re)))
True
>>> print (simplify(InRe("c", re)))
False
"""
s = _coerce_seq(s, re.ctx)
return BoolRef(Z3_mk_seq_in_re(s.ctx_ref(), s.as_ast(), re.as_ast()), s.ctx)
def Union(*args):
"""Create union of regular expressions.
>>> re = Union(Re("a"), Re("b"), Re("c"))
>>> print (simplify(InRe("d", re)))
False
"""
args = _get_args(args)
sz = len(args)
if z3_debug():
_z3_assert(sz > 0, "At least one argument expected.")
_z3_assert(all([is_re(a) for a in args]), "All arguments must be regular expressions.")
if sz == 1:
return args[0]
ctx = args[0].ctx
v = (Ast * sz)()
for i in range(sz):
v[i] = args[i].as_ast()
return ReRef(Z3_mk_re_union(ctx.ref(), sz, v), ctx)
def Intersect(*args):
"""Create intersection of regular expressions.
>>> re = Intersect(Re("a"), Re("b"), Re("c"))
"""
args = _get_args(args)
sz = len(args)
if z3_debug():
_z3_assert(sz > 0, "At least one argument expected.")
_z3_assert(all([is_re(a) for a in args]), "All arguments must be regular expressions.")
if sz == 1:
return args[0]
ctx = args[0].ctx
v = (Ast * sz)()
for i in range(sz):
v[i] = args[i].as_ast()
return ReRef(Z3_mk_re_intersect(ctx.ref(), sz, v), ctx)
def Plus(re):
"""Create the regular expression accepting one or more repetitions of argument.
>>> re = Plus(Re("a"))
>>> print(simplify(InRe("aa", re)))
True
>>> print(simplify(InRe("ab", re)))
False
>>> print(simplify(InRe("", re)))
False
"""
return ReRef(Z3_mk_re_plus(re.ctx_ref(), re.as_ast()), re.ctx)
def Option(re):
"""Create the regular expression that optionally accepts the argument.
>>> re = Option(Re("a"))
>>> print(simplify(InRe("a", re)))
True
>>> print(simplify(InRe("", re)))
True
>>> print(simplify(InRe("aa", re)))
False
"""
return ReRef(Z3_mk_re_option(re.ctx_ref(), re.as_ast()), re.ctx)
def Complement(re):
"""Create the complement regular expression."""
return ReRef(Z3_mk_re_complement(re.ctx_ref(), re.as_ast()), re.ctx)
def Star(re):
"""Create the regular expression accepting zero or more repetitions of argument.
>>> re = Star(Re("a"))
>>> print(simplify(InRe("aa", re)))
True
>>> print(simplify(InRe("ab", re)))
False
>>> print(simplify(InRe("", re)))
True
"""
return ReRef(Z3_mk_re_star(re.ctx_ref(), re.as_ast()), re.ctx)
def Loop(re, lo, hi=0):
"""Create the regular expression accepting between a lower and upper bound repetitions
>>> re = Loop(Re("a"), 1, 3)
>>> print(simplify(InRe("aa", re)))
True
>>> print(simplify(InRe("aaaa", re)))
False
>>> print(simplify(InRe("", re)))
False
"""
return ReRef(Z3_mk_re_loop(re.ctx_ref(), re.as_ast(), lo, hi), re.ctx)
def Range(lo, hi, ctx=None):
"""Create the range regular expression over two sequences of length 1
>>> range = Range("a","z")
>>> print(simplify(InRe("b", range)))
True
>>> print(simplify(InRe("bb", range)))
False
"""
lo = _coerce_seq(lo, ctx)
hi = _coerce_seq(hi, ctx)
return ReRef(Z3_mk_re_range(lo.ctx_ref(), lo.ast, hi.ast), lo.ctx)
def Diff(a, b, ctx=None):
"""Create the difference regular epression
"""
return ReRef(Z3_mk_re_diff(a.ctx_ref(), a.ast, b.ast), a.ctx)
def AllChar(regex_sort, ctx=None):
"""Create a regular expression that accepts all single character strings
"""
return ReRef(Z3_mk_re_allchar(regex_sort.ctx_ref(), regex_sort.ast), regex_sort.ctx)
# Special Relations
def PartialOrder(a, index):
return FuncDeclRef(Z3_mk_partial_order(a.ctx_ref(), a.ast, index), a.ctx)
def LinearOrder(a, index):
return FuncDeclRef(Z3_mk_linear_order(a.ctx_ref(), a.ast, index), a.ctx)
def TreeOrder(a, index):
return FuncDeclRef(Z3_mk_tree_order(a.ctx_ref(), a.ast, index), a.ctx)
def PiecewiseLinearOrder(a, index):
return FuncDeclRef(Z3_mk_piecewise_linear_order(a.ctx_ref(), a.ast, index), a.ctx)
def TransitiveClosure(f):
"""Given a binary relation R, such that the two arguments have the same sort
create the transitive closure relation R+.
The transitive closure R+ is a new relation.
"""
return FuncDeclRef(Z3_mk_transitive_closure(f.ctx_ref(), f.ast), f.ctx)
class PropClosures:
def __init__(self):
self.bases = {}
self.lock = None
def set_threaded(self):
if self.lock is None:
import threading
self.lock = threading.Lock()
def get(self, ctx):
if self.lock:
with self.lock:
r = self.bases[ctx]
else:
r = self.bases[ctx]
return r
def set(self, ctx, r):
if self.lock:
with self.lock:
self.bases[ctx] = r
else:
self.bases[ctx] = r
def insert(self, r):
if self.lock:
with self.lock:
id = len(self.bases) + 3
self.bases[id] = r
else:
id = len(self.bases) + 3
self.bases[id] = r
return id
_prop_closures = None
def ensure_prop_closures():
global _prop_closures
if _prop_closures is None:
_prop_closures = PropClosures()
def user_prop_push(ctx):
_prop_closures.get(ctx).push()
def user_prop_pop(ctx, num_scopes):
_prop_closures.get(ctx).pop(num_scopes)
def user_prop_fresh(id, ctx):
_prop_closures.set_threaded()
prop = _prop_closures.get(id)
new_prop = prop.fresh()
_prop_closures.set(new_prop.id, new_prop)
return ctypes.c_void_p(new_prop.id)
def user_prop_fixed(ctx, cb, id, value):
prop = _prop_closures.get(ctx)
prop.cb = cb
prop.fixed(_to_expr_ref(ctypes.c_void_p(id), prop.ctx()), _to_expr_ref(ctypes.c_void_p(value), prop.ctx()))
prop.cb = None
def user_prop_final(ctx, cb):
prop = _prop_closures.get(ctx)
prop.cb = cb
prop.final()
prop.cb = None
def user_prop_eq(ctx, cb, x, y):
prop = _prop_closures.get(ctx)
prop.cb = cb
x = _to_expr_ref(ctypes.c_void_p(x), prop.ctx())
y = _to_expr_ref(ctypes.c_void_p(y), prop.ctx())
prop.eq(x, y)
prop.cb = None
def user_prop_diseq(ctx, cb, x, y):
prop = _prop_closures.get(ctx)
prop.cb = cb
x = _to_expr_ref(ctypes.c_void_p(x), prop.ctx())
y = _to_expr_ref(ctypes.c_void_p(y), prop.ctx())
prop.diseq(x, y)
prop.cb = None
_user_prop_push = push_eh_type(user_prop_push)
_user_prop_pop = pop_eh_type(user_prop_pop)
_user_prop_fresh = fresh_eh_type(user_prop_fresh)
_user_prop_fixed = fixed_eh_type(user_prop_fixed)
_user_prop_final = final_eh_type(user_prop_final)
_user_prop_eq = eq_eh_type(user_prop_eq)
_user_prop_diseq = eq_eh_type(user_prop_diseq)
class UserPropagateBase:
#
# Either solver is set or ctx is set.
# Propagators that are created throuh callbacks
# to "fresh" inherit the context of that is supplied
# as argument to the callback.
# This context should not be deleted. It is owned by the solver.
#
def __init__(self, s, ctx=None):
assert s is None or ctx is None
ensure_prop_closures()
self.solver = s
self._ctx = None
self.cb = None
self.id = _prop_closures.insert(self)
self.fixed = None
self.final = None
self.eq = None
self.diseq = None
if ctx:
# TBD fresh is broken: ctx is not of the right type when we reach here.
self._ctx = Context()
#Z3_del_context(self._ctx.ctx)
#self._ctx.ctx = ctx
#self._ctx.eh = Z3_set_error_handler(ctx, z3_error_handler)
#Z3_set_ast_print_mode(ctx, Z3_PRINT_SMTLIB2_COMPLIANT)
if s:
Z3_solver_propagate_init(self.ctx_ref(),
s.solver,
ctypes.c_void_p(self.id),
_user_prop_push,
_user_prop_pop,
_user_prop_fresh)
def __del__(self):
if self._ctx:
self._ctx.ctx = None
def ctx(self):
if self._ctx:
return self._ctx
else:
return self.solver.ctx
def ctx_ref(self):
return self.ctx().ref()
def add_fixed(self, fixed):
assert not self.fixed
assert not self._ctx
Z3_solver_propagate_fixed(self.ctx_ref(), self.solver.solver, _user_prop_fixed)
self.fixed = fixed
def add_final(self, final):
assert not self.final
assert not self._ctx
Z3_solver_propagate_final(self.ctx_ref(), self.solver.solver, _user_prop_final)
self.final = final
def add_eq(self, eq):
assert not self.eq
assert not self._ctx
Z3_solver_propagate_eq(self.ctx_ref(), self.solver.solver, _user_prop_eq)
self.eq = eq
def add_diseq(self, diseq):
assert not self.diseq
assert not self._ctx
Z3_solver_propagate_diseq(self.ctx_ref(), self.solver.solver, _user_prop_diseq)
self.diseq = diseq
def push(self):
raise Z3Exception("push needs to be overwritten")
def pop(self, num_scopes):
raise Z3Exception("pop needs to be overwritten")
def fresh(self):
raise Z3Exception("fresh needs to be overwritten")
def add(self, e):
assert self.solver
assert not self._ctx
return Z3_solver_propagate_register(self.ctx_ref(), self.solver.solver, e.ast)
#
# Propagation can only be invoked as during a fixed or final callback.
#
def propagate(self, e, ids, eqs=[]):
_ids, num_fixed = _to_ast_array(ids)
num_eqs = len(eqs)
_lhs, _num_lhs = _to_ast_array([x for x, y in eqs])
_rhs, _num_lhs = _to_ast_array([y for x, y in eqs])
Z3_solver_propagate_consequence(e.ctx.ref(), ctypes.c_void_p(
self.cb), num_fixed, _ids, num_eqs, _lhs, _rhs, e.ast)
def conflict(self, deps):
self.propagate(BoolVal(False, self.ctx()), deps, eqs=[])
|
py | 1a3588657f967fabf0b0c430dd52db322068109b | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.translate.textblob_translator import TextBlobTranslator
class TestTextBlobTranslator(unittest.TestCase):
def test_languages(self):
translator = TextBlobTranslator()
languages = translator.languages()
self.assertTrue(languages.startswith("AFRIKAANS, ALBANIAN, "))
self.assertTrue(translator.supports_language('ENGLISH'))
self.assertFalse(translator.supports_language('KLINGON'))
def test_language_codes(self):
translator = TextBlobTranslator()
self.assertEqual("EN", translator.language_code("ENGLISH"))
self.assertEqual("UNKNOWN", translator.language_code("KLINGON"))
def test_detect_language(self):
translator = TextBlobTranslator()
self.assertEqual("EN", translator.detect("Hello"))
self.assertEqual("FR", translator.detect("Bonjour"))
# Cantonese, currently not supported by Google Translate
self.assertEqual("UNKNOWN", translator.detect("粵語", "UNKNOWN"))
def test_translate(self):
translator = TextBlobTranslator()
translated = translator.translate("Hello", from_lang='EN', to_lang='FR')
self.assertTrue("Bonjour", translated)
|
py | 1a358935fc8042b2d2316143582a15fd2dde8f32 | # Generated by Django 3.1.7 on 2021-04-08 22:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ('-id',)},
),
migrations.RemoveField(
model_name='user',
name='confirmation_code',
),
migrations.AlterField(
model_name='user',
name='role',
field=models.CharField(choices=[('user', 'User'), ('moderator', 'Moderator'), ('admin', 'Admin')], default='user', max_length=9, verbose_name='Роль пользователя'),
),
]
|
py | 1a358947ffc9ecec4207e7a8e5813d401d7dac7c | from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import warnings
import time
from collections import deque
from .utils.generic_utils import Progbar
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \
and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \
and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'], \
verbose=self.verbose)
self.current = 0
self.tot_loss = 0.
self.tot_acc = 0.
def on_batch_begin(self, batch, logs={}):
if self.current < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.current += batch_size
loss = logs.get('loss')
self.log_values.append(('loss', loss))
self.tot_loss += loss * batch_size
if self.params['show_accuracy']:
accuracy = logs.get('accuracy')
self.log_values.append(('acc.', accuracy))
self.tot_acc += accuracy * batch_size
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.current < self.params['nb_sample']:
self.progbar.update(self.current, self.log_values)
def on_epoch_end(self, epoch, logs={}):
self.log_values.append(('loss', self.tot_loss / self.current))
if self.params['show_accuracy']:
self.log_values.append(('acc.', self.tot_acc / self.current))
if self.params['do_validation']:
val_loss = logs.get('val_loss')
self.log_values.append(('val. loss', val_loss))
if self.params['show_accuracy']:
val_acc = logs.get('val_accuracy')
self.log_values.append(('val. acc.', val_acc))
self.progbar.update(self.current, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.loss = []
if self.params['show_accuracy']:
self.accuracy = []
if self.params['do_validation']:
self.validation_loss = []
if self.params['show_accuracy']:
self.validation_accuracy = []
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.tot_loss = 0.
self.tot_accuracy = 0.
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
self.tot_loss += logs.get('loss', 0.) * batch_size
if self.params['show_accuracy']:
self.tot_accuracy += logs.get('accuracy', 0.) * batch_size
def on_epoch_end(self, epoch, logs={}):
val_loss = logs.get('val_loss')
val_acc = logs.get('val_accuracy')
self.epoch.append(epoch)
self.loss.append(self.tot_loss / self.seen)
if self.params['show_accuracy']:
self.accuracy.append(self.tot_accuracy / self.seen)
if self.params['do_validation']:
self.validation_loss.append(val_loss)
if self.params['show_accuracy']:
self.validation_accuracy.append(val_acc)
class ModelCheckpoint(Callback):
def __init__(self, filepath, verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.loss = []
self.best_loss = np.Inf
self.val_loss = []
self.best_val_loss = np.Inf
def on_epoch_end(self, epoch, logs={}):
'''currently, on_epoch_end receives epoch_logs from keras.models.Sequential.fit
which does only contain, if at all, the validation loss and validation accuracy'''
if self.save_best_only and self.params['do_validation']:
cur_val_loss = logs.get('val_loss')
self.val_loss.append(cur_val_loss)
if cur_val_loss < self.best_val_loss:
if self.verbose > 0:
print("Epoch %05d: valdidation loss improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.best_val_loss, cur_val_loss, self.filepath))
self.best_val_loss = cur_val_loss
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: validation loss did not improve" % (epoch))
elif self.save_best_only and not self.params['do_validation']:
import warnings
warnings.warn("Can save best model only with validation data, skipping", RuntimeWarning)
elif not self.save_best_only:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
|
py | 1a3589480a16e21c41b413a54236ab89f4d01048 | from __future__ import unicode_literals
"""
To try running Django tests using green you can run:
./manage.py test --testrunner=green.djangorunner.DjangoRunner
To make the change permanent for your project, in settings.py add:
TEST_RUNNER="green.djangorunner.DjangoRunner"
"""
from argparse import Namespace
import os
import sys
from green.config import mergeConfig
from green.loader import GreenTestLoader
from green.output import GreenStream
from green.runner import run
from green.suite import GreenTestSuite
# If we're not being run from an actual django project, set up django config
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'green.djangorunner')
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = ')9^_e(=cisybdt4m4+fs+_wb%d$!9mpcoy0um^alvx%gexj#jv'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myproj.urls'
WSGI_APPLICATION = 'myproj.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# End of django fake config stuff
def django_missing():
raise ImportError("No django module installed")
try:
import django
if django.VERSION[:2] < (1, 6): # pragma: no cover
raise ImportError("Green integration supports Django 1.6+")
from django.test.runner import DiscoverRunner
class DjangoRunner(DiscoverRunner):
def __init__(self, verbose=-1, **kwargs):
super(DjangoRunner, self).__init__(**kwargs)
self.verbose = verbose
self.loader = GreenTestLoader()
@classmethod
def add_arguments(cls, parser):
parser.add_argument (
'--green-verbosity',
action='store', dest='verbose', default=-1, type=int,
help="""
Green 'verbose' level for tests. Value should be an integer
that green supports. For example: --green-verbosity 3""")
super(DjangoRunner, cls).add_arguments(parser)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
# Django setup
self.setup_test_environment()
django_db = self.setup_databases()
# Green
if type(test_labels) == tuple:
test_labels = list(test_labels)
else:
raise ValueError("test_labels should be a tuple of strings")
if not test_labels:
test_labels = ['.']
args = mergeConfig(Namespace())
if self.verbose != -1:
args.verbose = self.verbose
args.targets = test_labels
stream = GreenStream(sys.stdout)
suite = self.loader.loadTargets(args.targets)
if not suite:
suite = GreenTestSuite()
result = run(suite, stream, args)
# Django teardown
self.teardown_databases(django_db)
self.teardown_test_environment()
return self.suite_result(suite, result)
except ImportError: # pragma: no cover
DjangoRunner = django_missing
|
py | 1a358968e80853ac5ef2bf96430840095ed61e30 | import unittest
from onlinejudge_api.main import main
class GetProblemPOJTest(unittest.TestCase):
def test_1000(self):
url = 'http://poj.org/problem?id=1000'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "http://poj.org/problem?id=1000",
"tests": [{
"input": "1 2\r\n",
"output": "3\r\n"
}],
"context": {}
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_2104(self):
url = 'http://poj.org/problem?id=2104'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "http://poj.org/problem?id=2104",
"tests": [{
"input": "7 3\r\n1 5 2 6 3 7 4\r\n2 5 3\r\n4 4 1\r\n1 7 3\r\n",
"output": "5\r\n6\r\n3\r\n"
}],
"context": {}
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_3150(self):
url = 'http://poj.org/problem?id=3150'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "http://poj.org/problem?id=3150",
"tests": [{
"input": "5 3 1 1\r\n1 2 2 1 2\r\n",
"output": "2 2 2 2 1\r\n"
}, {
"input": "5 3 1 10\r\n1 2 2 1 2\r\n",
"output": "2 0 0 2 2\r\n"
}],
"context": {}
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
|
py | 1a358ac6ae851f596072aec8c2223e56cf2c547f | import obspy
from mth5.utils.pathing import DATA_DIR
def load_sample_network_inventory(xml_file_handle, verbose=False):
""" """
iris_dir = DATA_DIR.joinpath("iris")
xml_file_path = iris_dir.joinpath(xml_file_handle)
xml_file_path_str = xml_file_path.__str__()
if verbose:
print(f"Loading {xml_file_path_str}")
inventory = obspy.read_inventory(xml_file_path_str)
# inventory = obspy.read_inventory(xml_file_path.__str__())
return inventory
def decimation_info_is_degenerate(obspy_stage):
"""
Check a few condtions that may apply to an obspy stage which if true
imply that the decimation information can be stripped out as it bears
no information about aany data transformation;
Case 1: All these attrs are None decimation has no information:
{'decimation_input_sample_rate', 'decimation_factor',
'decimation_offset', 'decimation_delay', 'decimation_correction'}
Case 2:
"""
cond1 = stage.stage_gain in [1.0, None]
cond2 = stage.decimation_factor in [1, None]
if cond1 & cond2:
return True
else:
return False
def decimation_info_is_pure_delay(stage):
cond1 = stage.stage_gain == 1.0
cond2 = stage.decimation_factor == 1
cond3 = stage.decimation_delay != 0.0
cond4 = stage.decimation_correction == 0.0
if cond1 & cond2 & cond3 & cond4:
return True
else:
return False
def stage_gain_is_degenerate():
# if gain is 1.0 ignore it
pass
|
py | 1a358b01a97e5065731a5d9a91885787dc31e884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import xarray as xr
__author__ = "Louis Richard"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def trace(inp):
r"""Computes trace of the time series of 2nd order tensors.
Parameters
----------
inp : xarray.DataArray
Time series of the input 2nd order tensor.
Returns
-------
out : xarray.DataArray
Time series of the trace of the input tensor.
Examples
--------
>>> from pyrfu import mms, pyrf
Time interval
>>> tint = ["2015-10-30T05:15:20.000", "2015-10-30T05:16:20.000"]
Spacecraft index
>>> mms_id = 1
Load magnetic field and ion temperature
>>> b_xyz = mms.get_data("B_gse_fgm_srvy_l2", tint, mms_id)
>>> t_xyz_i = mms.get_data("Ti_gse_fpi_fast_l2", tint, mms_id)
Rotate to ion temperature tensor to field aligned coordinates
>>> t_xyzfac_i = mms.rotate_tensor(t_xyz_i, "fac", b_xyz, "pp")
Compute scalar temperature
>>> t_i = pyrf.trace(t_xyzfac_i)
"""
inp_data = inp.data
out_data = inp_data[:, 0, 0] + inp_data[:, 1, 1] + inp_data[:, 2, 2]
# Attributes
attrs = inp.attrs
# Change tensor order from 2 (matrix) to 0 (scalar)
attrs["TENSOR_ORDER"] = 0
out = xr.DataArray(out_data, coords=[inp.time.data], dims=["time"],
attrs=attrs)
return out
|
py | 1a358c816d6bfdb3088afc5529b991449ccefd39 | from validator.rules_src.max import Max
from validator.rules_src.min import Min
class Between(Max, Min):
"""
>>> Between(2, 15).check(23)
False
>>> Between(2, 15).check(12)
True
"""
def __init__(self, min_value, max_value):
Min.__init__(self, min_value)
Max.__init__(self, max_value)
def check(self, arg):
if Min.check(self, arg) and Max.check(self, arg):
return True
self.set_errror_message(
f"Expected Between: {self.min_value} and {self.max_value}, Got: {arg}"
)
return False
def __from_str__(self):
Min.__from_str__(self)
Max.__from_str__(self)
|
py | 1a358c8c1dce03c4d44091c38d44f966aab74cf2 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import re
from datetime import datetime, timedelta
from io import BytesIO
from pathlib import Path
import h5py
import pybufrkit
import pytest
import requests
import wradlib as wrl
from tests.provider.dwd.radar import station_reference_pattern_unsorted
from wetterdienst.provider.dwd.radar import (
DwdRadarDataFormat,
DwdRadarDataSubset,
DwdRadarParameter,
DwdRadarPeriod,
DwdRadarResolution,
DwdRadarValues,
)
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
from wetterdienst.util.datetime import round_minutes
HERE = Path(__file__).parent
def test_radar_request_radolan_cdc_hourly_alignment_1():
"""
Verify the alignment of RADOLAN_CDC timestamps
to designated interval marks of HH:50.
Here, the given timestamp is at 00:53
and will be floored to 00:50.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.HOURLY,
period=DwdRadarPeriod.HISTORICAL,
start_date="2019-08-08 00:53:53",
)
assert request.start_date == datetime(year=2019, month=8, day=8, hour=0, minute=50, second=0)
def test_radar_request_radolan_cdc_hourly_alignment_2():
"""
Verify the alignment of RADOLAN_CDC timestamps
to designated interval marks of HH:50.
Here, the given timestamp is at 00:42
and will be floored to 23:50 on the previous day.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.HOURLY,
period=DwdRadarPeriod.HISTORICAL,
start_date="2019-08-08 00:42:42",
)
assert request.start_date == datetime(year=2019, month=8, day=7, hour=23, minute=50, second=0)
@pytest.mark.remote
def test_radar_request_radolan_cdc_historic_hourly_data():
"""
Verify data acquisition for RADOLAN_CDC/hourly/historical.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.HOURLY,
period=DwdRadarPeriod.HISTORICAL,
start_date="2019-08-08 00:50:00",
)
assert request == DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.HOURLY,
period=DwdRadarPeriod.HISTORICAL,
start_date=datetime(year=2019, month=8, day=8, hour=0, minute=50, second=0),
)
radolan_hourly_backup_url = (
"https://github.com/earthobservations/testdata/raw/main/"
"opendata.dwd.de/climate_environment/CDC/grids_germany/"
"hourly/radolan/historical/bin/2019/radolan_hourly_201908080050"
)
payload = requests.get(radolan_hourly_backup_url)
radolan_hourly = BytesIO(payload.content)
radolan_hourly_test = next(request.query()).data
assert radolan_hourly.getvalue() == radolan_hourly_test.getvalue()
@pytest.mark.remote
def test_radar_request_radolan_cdc_historic_daily_data():
"""
Verify data acquisition for RADOLAN_CDC/daily/historical.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.DAILY,
period=DwdRadarPeriod.HISTORICAL,
start_date="2019-08-08 00:50:00",
)
assert request == DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.DAILY,
period=DwdRadarPeriod.HISTORICAL,
start_date=datetime(year=2019, month=8, day=8, hour=0, minute=50, second=0),
)
radolan_daily_backup_url = (
"https://github.com/earthobservations/testdata/raw/main/"
"opendata.dwd.de/climate_environment/CDC/grids_germany/"
"daily/radolan/historical/bin/2019/radolan_daily_201908080050"
)
payload = requests.get(radolan_daily_backup_url)
radolan_hourly = BytesIO(payload.content)
radolan_hourly_test = next(request.query()).data
assert radolan_hourly.getvalue() == radolan_hourly_test.getvalue()
@pytest.mark.xfail(reason="Out of service", strict=True)
@pytest.mark.remote
def test_radar_request_composite_historic_fx_yesterday():
"""
Example for testing radar/composite FX for a specific date.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.FX_REFLECTIVITY,
start_date=timestamp,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
# Verify number of results.
assert len(results) == 25
# Verify data.
payload = results[0].data.getvalue()
# TODO: Use wradlib to parse binary format.
# https://docs.wradlib.org/en/stable/notebooks/radolan/radolan_format.html
date_time = request.start_date.strftime("%d%H%M")
month_year = request.start_date.strftime("%m%y")
header = (
f"FX{date_time}10000{month_year}BY.......VS 3SW 2.12.0PR E-01INT 5GP 900x 900VV 000MF 00000002MS "
f"..<{station_reference_pattern_unsorted}>"
)
assert re.match(bytes(header, encoding="ascii"), payload[:160])
@pytest.mark.xfail(reason="Out of service", strict=True)
@pytest.mark.remote
def test_radar_request_composite_historic_fx_timerange():
"""
Example for testing radar/composite FX for a timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.FX_REFLECTIVITY,
start_date=timestamp,
end_date=timedelta(minutes=10),
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
# Verify number of results.
assert len(results) == 50
# Verify all timestamps are properly propagated from the tarfile.
assert all(
request.start_date == result.timestamp or request.start_date + timedelta(minutes=5) for result in results
)
@pytest.mark.remote
def test_radar_request_composite_historic_radolan_rw_yesterday():
"""
Verify acquisition of radar/composite/radolan_rw data works
when using a specific date.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.RW_REFLECTIVITY,
start_date=timestamp,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
requested_attrs = wrl.io.parse_dwd_composite_header(requested_header)
attrs = {
"producttype": "RW",
"datetime": request.start_date.to_pydatetime(),
"precision": 0.1,
"intervalseconds": 3600,
"nrow": 900,
"ncol": 900,
"radarlocations": [
"asb",
"boo",
"ros",
"hnr",
"umd",
"pro",
"ess",
"fld",
"drs",
"neu",
"nhb",
"oft",
"eis",
"tur",
"isn",
"fbg",
"mem",
],
"moduleflag": 1,
}
# radar locations can change over time -> check if at least 10 radar locations
# were found and at least 5 of them match with the provided one
assert len(requested_attrs["radarlocations"]) >= 10
assert len(list(set(requested_attrs["radarlocations"]) & set(attrs["radarlocations"]))) >= 5
skip_attrs = ["radarid", "datasize", "maxrange", "radarlocations", "radolanversion"]
for attr in skip_attrs:
requested_attrs.pop(attr, None)
del attrs["radarlocations"]
assert requested_attrs == attrs
@pytest.mark.remote
def test_radar_request_composite_historic_radolan_rw_timerange():
"""
Verify acquisition of radar/composite/radolan_rw data works
when using a specific date, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.RW_REFLECTIVITY,
start_date=timestamp,
end_date=timedelta(hours=3),
)
results = list(request.query())
# Verify number of results.
assert len(results) == 3
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
requested_attrs = wrl.io.parse_dwd_composite_header(requested_header)
assert request.start_date.strftime("m%y") == requested_attrs["datetime"].strftime("m%y")
assert request.start_date.strftime("%d%H%M") == requested_attrs["datetime"].strftime("%d%H%M")
attrs = {
"producttype": "RW",
"precision": 0.1,
"intervalseconds": 3600,
"nrow": 900,
"ncol": 900,
"radarlocations": [
"asb",
"boo",
"ros",
"hnr",
"umd",
"pro",
"ess",
"fld",
"drs",
"neu",
"nhb",
"oft",
"eis",
"tur",
"isn",
"fbg",
"mem",
],
"moduleflag": 1,
}
# radar locations can change over time -> check if at least 10 radar locations
# were found and at least 5 of them match with the provided one
assert len(requested_attrs["radarlocations"]) >= 10
assert len(list(set(requested_attrs["radarlocations"]) & set(attrs["radarlocations"]))) >= 5
skip_attrs = [
"datetime",
"radarid",
"datasize",
"maxrange",
"radarlocations",
"radolanversion",
]
for attr in skip_attrs:
requested_attrs.pop(attr, None)
del attrs["radarlocations"]
assert requested_attrs == attrs
@pytest.mark.remote
def test_radar_request_site_historic_dx_yesterday():
"""
Verify acquisition of radar/site/DX data works
when using a specific date.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.DX_REFLECTIVITY,
start_date=timestamp,
site=DwdRadarSite.BOO,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
requested_attrs = wrl.io.radolan.parse_dx_header(requested_header)
timestamp_aligned = round_minutes(timestamp, 5)
assert timestamp_aligned.strftime("%m%y") == requested_attrs["datetime"].strftime("%m%y")
assert timestamp_aligned.strftime("%d%H%M") == requested_attrs["datetime"].strftime("%d%H%M")
attrs = {
"producttype": "DX",
"version": " 2",
"cluttermap": 0,
"dopplerfilter": 4,
"statfilter": 0,
"elevprofile": [0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
"message": "",
}
skip_attrs = ["datetime", "bytes", "radarid"]
for attr in skip_attrs:
requested_attrs.pop(attr, None)
assert requested_attrs == attrs
@pytest.mark.remote
def test_radar_request_site_historic_dx_timerange():
"""
Verify acquisition of radar/site/DX data works
when using a specific date, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.DX_REFLECTIVITY,
start_date=timestamp,
end_date=timedelta(hours=0.5),
site=DwdRadarSite.BOO,
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 6
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
requested_attrs = wrl.io.radolan.parse_dx_header(requested_header)
timestamp_aligned = round_minutes(timestamp, 5)
assert timestamp_aligned.strftime("%m%y") == requested_attrs["datetime"].strftime("%m%y")
assert timestamp_aligned.strftime("%d%H%M") == requested_attrs["datetime"].strftime("%d%H%M")
attrs = {
"producttype": "DX",
"version": " 2",
"cluttermap": 0,
"dopplerfilter": 4,
"statfilter": 0,
"elevprofile": [0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
"message": "",
}
skip_attrs = ["bytes", "radarid", "datetime"]
for attr in skip_attrs:
requested_attrs.pop(attr, None)
assert requested_attrs == attrs
@pytest.mark.remote
def test_radar_request_site_historic_pe_binary_yesterday():
"""
Verify acquisition of radar/site/PE_ECHO_TOP data works
when using a specific date.
This time, we will use the BINARY data format.
"""
# Acquire data from yesterday at this time.
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.PE_ECHO_TOP,
start_date=timestamp,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.BINARY,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
date_time = request.start_date.strftime("%d%H")
month_year = request.start_date.strftime("%m%y")
header = (
f"PE{date_time}..10132{month_year}BY ....?VS 1LV12 1.0 2.0 3.0 4.0 5.0 "
f"6.0 7.0 8.0 9.0 10.0 11.0 12.0CO0CD0CS0ET 5.0FL....MS"
)
assert re.match(header, requested_header)
@pytest.mark.remote
def test_radar_request_site_historic_pe_bufr():
"""
Verify acquisition of radar/site/PE_ECHO_TOP data works
when using a specific date.
This time, we will use the BUFR data format.
"""
# Acquire data from yesterday at this time.
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.PE_ECHO_TOP,
start_date=timestamp,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.BUFR,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
header = b"\x00\x00\x00\x00\x00...BUFR"
assert re.match(header, payload), payload[:20]
# Read BUFR file.
decoder = pybufrkit.decoder.Decoder()
decoder.process(payload, info_only=True)
@pytest.mark.remote
@pytest.mark.parametrize(
"fmt",
[
DwdRadarDataFormat.BINARY,
DwdRadarDataFormat.BUFR,
],
)
def test_radar_request_site_historic_pe_timerange(fmt):
"""
Verify acquisition of radar/site/PE_ECHO_TOP data works
when using date ranges.
The proof will use these parameters to acquire data:
- start_date: Yesterday at this time
- end_date: start_date + 1 hour
This time, we will test both the BINARY and BUFR data format.
"""
start_date = datetime.utcnow() - timedelta(days=1)
end_date = timedelta(hours=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.PE_ECHO_TOP,
start_date=start_date,
end_date=end_date,
site=DwdRadarSite.BOO,
fmt=fmt,
)
assert request.start_date.minute % 5 == 0
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) >= 1
# TODO: Verify data.
@pytest.mark.xfail
@pytest.mark.remote
def test_radar_request_site_historic_px250_bufr_yesterday():
"""
Example for testing radar/site PX250 for a specific date.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.PX250_REFLECTIVITY,
start_date=timestamp,
site=DwdRadarSite.BOO,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
header = b"\x00\x00\x00\x00\x00...BUFR"
assert re.match(header, payload), payload[:20]
# Read BUFR file.
decoder = pybufrkit.decoder.Decoder()
bufr = decoder.process(payload, info_only=True)
# Verify timestamp in BUFR metadata.
timestamp_aligned = round_minutes(timestamp, 5)
bufr_timestamp = datetime(
bufr.year.value,
bufr.month.value,
bufr.day.value,
bufr.hour.value,
bufr.minute.value,
)
assert timestamp_aligned == bufr_timestamp
@pytest.mark.remote
def test_radar_request_site_historic_px250_bufr_timerange():
"""
Example for testing radar/site PX250 for a specific date, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.PX250_REFLECTIVITY,
start_date=timestamp,
end_date=timedelta(hours=1),
site=DwdRadarSite.BOO,
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 12
# TODO: Verify data.
@pytest.mark.remote
def test_radar_request_site_historic_sweep_pcp_v_bufr_yesterday():
"""
Example for testing radar/site sweep-precipitation for a specific date,
this time in BUFR format.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=timestamp,
site=DwdRadarSite.ASB,
fmt=DwdRadarDataFormat.BUFR,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[1]
payload = buffer.getvalue()
# Read BUFR file.
decoder = pybufrkit.decoder.Decoder()
bufr = decoder.process(payload, info_only=True)
# Verify timestamp in BUFR metadata.
timestamp_aligned = round_minutes(timestamp, 5)
bufr_timestamp = datetime(
bufr.year.value + 2000,
bufr.month.value,
bufr.day.value,
bufr.hour.value,
bufr.minute.value,
)
assert timestamp_aligned == bufr_timestamp
@pytest.mark.remote
def test_radar_request_site_historic_sweep_pcp_v_bufr_timerange():
"""
Example for testing radar/site sweep-precipitation for a specific date,
this time in BUFR format, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=timestamp,
end_date=timedelta(hours=1),
site=DwdRadarSite.ASB,
fmt=DwdRadarDataFormat.BUFR,
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 12
# TODO: Verify data.
@pytest.mark.remote
def test_radar_request_site_historic_sweep_vol_v_bufr_yesterday():
"""
Example for testing radar/site sweep_vol_v for a specific date,
this time in BUFR format.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=timestamp,
site=DwdRadarSite.ASB,
fmt=DwdRadarDataFormat.BUFR,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[1]
payload = buffer.getvalue()
# Read BUFR file.
decoder = pybufrkit.decoder.Decoder()
bufr = decoder.process(payload, info_only=True)
# Verify timestamp in BUFR metadata.
timestamp_aligned = round_minutes(timestamp, 5)
bufr_timestamp = datetime(
bufr.year.value + 2000,
bufr.month.value,
bufr.day.value,
bufr.hour.value,
bufr.minute.value,
)
assert timestamp_aligned == bufr_timestamp
@pytest.mark.remote
def test_radar_request_site_historic_sweep_vol_v_bufr_timerange():
"""
Example for testing radar/site sweep_vol_v for a specific date,
this time in BUFR format, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=timestamp,
end_date=timedelta(hours=0.5),
site=DwdRadarSite.ASB,
fmt=DwdRadarDataFormat.BUFR,
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 60
# TODO: Verify data.
@pytest.mark.remote
def test_radar_request_site_historic_sweep_pcp_v_hdf5_yesterday():
"""
Example for testing radar/site sweep-precipitation for a specific date,
this time in HDF5 format.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=timestamp,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
# Verify number of elements.
assert len(results) == 1
# Get payload.
buffer = results[0][1]
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# h5dump ras07-stqual-pcpng01_sweeph5onem_vradh_00-2020093000403400-boo-10132-hd5
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Pcp-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 1
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
assert hdf["/dataset1/data1/data"].shape == (360, 600)
timestamp = round_minutes(request.start_date, 5)
assert hdf["/what"].attrs.get("date") == bytes(timestamp.strftime("%Y%m%d"), encoding="ascii")
assert hdf["/what"].attrs.get("time").startswith(bytes(timestamp.strftime("%H%M"), encoding="ascii"))
@pytest.mark.remote
def test_radar_request_site_historic_sweep_pcp_v_hdf5_timerange():
"""
Example for testing radar/site sweep-precipitation for a specific date,
this time in HDF5 format, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=timestamp,
end_date=timedelta(hours=1),
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 12
# TODO: Verify data.
@pytest.mark.remote
def test_radar_request_site_historic_sweep_vol_v_hdf5_yesterday():
"""
Example for testing radar/site sweep-precipitation for a specific date,
this time in HDF5 format.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=timestamp,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
# Verify number of elements.
assert len(results) == 10
# Get payload from first file.
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# h5dump ras07-stqual-vol5minng01_sweeph5onem_vradh_00-2020092917055800-boo-10132-hd5
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Vol-5Min-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
assert hdf["/dataset1/data1/data"].shape in ((360, 180), (360, 720), (361, 720))
timestamp = round_minutes(request.start_date, 5)
assert hdf["/what"].attrs.get("date") == bytes(timestamp.strftime("%Y%m%d"), encoding="ascii")
assert hdf["/what"].attrs.get("time").startswith(bytes(timestamp.strftime("%H%M"), encoding="ascii"))
# Verify that the second file is the second scan / elevation level.
buffer = results[1].data
hdf = h5py.File(buffer, "r")
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 2
timestamp = round_minutes(request.start_date, 5)
assert hdf["/what"].attrs.get("date") == bytes(timestamp.strftime("%Y%m%d"), encoding="ascii")
assert hdf["/what"].attrs.get("time").startswith(bytes(timestamp.strftime("%H%M"), encoding="ascii"))
@pytest.mark.remote
def test_radar_request_site_historic_sweep_vol_v_hdf5_timerange():
"""
Example for testing radar/site sweep-precipitation for a specific date,
this time in HDF5 format, with timerange.
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=timestamp,
end_date=timedelta(hours=0.5),
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 60
# TODO: Verify data.
@pytest.mark.remote
def test_radar_request_radvor_re_yesterday():
"""
Verify acquisition of radar/radvor/re data works
when using a specific date. Querying one point
in time should yield 25 results for a single
5 minute time step.
https://opendata.dwd.de/weather/radar/radvor/re/
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.RE_REFLECTIVITY,
start_date=timestamp,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 25
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
requested_attrs = wrl.io.parse_dwd_composite_header(requested_header)
attrs = {
"producttype": "RE",
"datetime": request.start_date.to_pydatetime(),
"precision": 0.001,
"intervalseconds": 3600,
"nrow": 900,
"ncol": 900,
"radarlocations": [
"deasb",
"deboo",
"dedrs",
"deeis",
"deess",
"defbg",
"defld",
"dehnr",
"deisn",
"demem",
"deneu",
"denhb",
"deoft",
"depro",
"deros",
"detur",
"deumd",
],
"predictiontime": 0,
"moduleflag": 8,
}
# radar locations can change over time -> check if at least 10 radar locations
# were found and at least 5 of them match with the provided one
assert len(requested_attrs["radarlocations"]) >= 10
assert len(list(set(requested_attrs["radarlocations"]) & set(attrs["radarlocations"]))) >= 5
skip_attrs = [
"radarid",
"datasize",
"radolanversion",
"quantification",
"maxrange",
"radarlocations",
]
for attr in skip_attrs:
requested_attrs.pop(attr, None)
del attrs["radarlocations"]
assert requested_attrs == attrs
@pytest.mark.remote
def test_radar_request_radvor_re_timerange():
"""
Verify acquisition of radar/radvor/re data works
when using a specific date. Querying for 15 minutes
worth of data should yield 75 results.
https://opendata.dwd.de/weather/radar/radvor/re/
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.RE_REFLECTIVITY,
start_date=timestamp,
end_date=timedelta(minutes=3 * 5),
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 3 * 25
# TODO: Verify data.
@pytest.mark.remote
def test_radar_request_radvor_rq_yesterday():
"""
Verify acquisition of radar/radvor/rq data works
when using a specific date. Querying one point
in time should yield 3 results for a single
15 minute time step.
https://opendata.dwd.de/weather/radar/radvor/rq/
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.RQ_REFLECTIVITY,
start_date=timestamp,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 3
buffer = results[0].data
# Verify data.
requested_header = wrl.io.read_radolan_header(buffer)
requested_attrs = wrl.io.parse_dwd_composite_header(requested_header)
attrs = {
"producttype": "RQ",
"datetime": request.start_date.to_pydatetime(),
"precision": 0.1,
"intervalseconds": 3600,
"nrow": 900,
"ncol": 900,
"radarlocations": [
"asb",
"boo",
"drs",
"eis",
"ess",
"fbg",
"fld",
"hnr",
"isn",
"mem",
"neu",
"nhb",
"oft",
"pro",
"ros",
"tur",
"umd",
],
"predictiontime": 0,
"moduleflag": 8,
}
# radar locations can change over time -> check if at least 10 radar locations
# were found and at least 5 of them match with the provided one
assert len(requested_attrs["radarlocations"]) >= 10
assert len(list(set(requested_attrs["radarlocations"]) & set(attrs["radarlocations"]))) >= 5
skip_attrs = [
"datasize",
"quantification",
"radarid",
"maxrange",
"radolanversion",
"radarlocations",
]
for attr in skip_attrs:
requested_attrs.pop(attr, None)
del attrs["radarlocations"]
assert requested_attrs == attrs
@pytest.mark.remote
def test_radar_request_radvor_rq_timerange():
"""
Verify acquisition of radar/radvor/rq data works
when using a specific date. Querying for 45 minutes
worth of data should yield 9 results.
https://opendata.dwd.de/weather/radar/radvor/rq/
"""
timestamp = datetime.utcnow() - timedelta(days=1)
request = DwdRadarValues(
parameter=DwdRadarParameter.RQ_REFLECTIVITY,
start_date=timestamp,
end_date=timedelta(minutes=3 * 15),
)
# Verify number of elements.
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 3 * 3
# TODO: Verify data.
|
py | 1a358d0232d3030d5577e8317764879874795ada | # coding: utf-8
import os
import copy
import collections
import collections.abc # 2022.02.28 - Python 3.3 or greater
import types
from collections import namedtuple
# 2022.02.28 - Python 3.3 or greater; import from __init__.py
from . import PY3K, PY3K3
from jinja2 import nodes
from jinja2 import Environment, TemplateNotFound, FileSystemLoader
from jinja2.ext import Extension
from jinja2.loaders import split_template_path
from jinja2.utils import open_if_exists
from schema import Schema
from snaql.convertors import (
guard_bool,
guard_case,
guard_date,
guard_datetime,
guard_float,
guard_integer,
guard_regexp,
guard_string,
guard_time,
guard_timedelta,
)
class RawFileSystemLoader(FileSystemLoader):
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = os.path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = os.path.getmtime(filename)
# Need to save original raw template before compilation
environment.sql_params.setdefault('raws', {}).update({
template: [c.strip() for c in contents.splitlines()]
})
def uptodate():
try:
return os.path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
class JinjaSQLExtension(Extension):
tags = set(['sql', 'query'])
def parse(self, parser):
lineno = next(parser.stream).lineno
expr = parser.parse_expression()
args = [expr]
kwargs = [nodes.Keyword('func', expr)]
if parser.stream.skip_if('comma'):
# Optional 'note' for function docstring
if (
parser.stream.current.type == 'name' and
parser.stream.current.value in (
'note', 'cond_for', 'depends_on'
)
):
stream_type = parser.stream.current.value
next(parser.stream)
parser.stream.expect('assign')
# Depends meta is always a list
if stream_type == 'depends_on':
c_expr = parser.parse_list()
else:
c_expr = parser.parse_expression()
args.append(c_expr)
kwargs.append(nodes.Keyword(stream_type, c_expr))
body = parser.parse_statements(
['name:endsql', 'name:endquery'], drop_needle=True
)
raw_template = self.environment.sql_params['raws'][parser.name]
# Lines range of original raw template
raw_lines = slice(lineno, parser.stream.current.lineno-1)
self.environment.sql_params.setdefault('funcs', {}).update({
expr.value: {'raw_sql': '\n '.join(raw_template[raw_lines])}
})
call_node = nodes.Call(
self.attr('_sql_process', lineno=lineno),
args, kwargs, None, None
)
return nodes.CallBlock(call_node, [], [], body)
def _sql_process(self, *args, **kwargs):
caller = kwargs['caller']
raw_sql = '\n '.join(x.strip() for x in caller().split('\n') if x)
if 'cond_for' in kwargs:
origin = (
self.environment.sql_params['funcs'].get(kwargs['cond_for'])
)
if origin:
origin.setdefault('conds', []).append(kwargs['cond_for'])
origin = self.environment.sql_params['funcs'].get(kwargs['func'])
origin.update({
'sql': raw_sql,
'note': kwargs.get('note'),
'is_cond': 'cond_for' in kwargs,
'depends_on': kwargs.get('depends_on', []),
'node': None,
})
if origin['is_cond']:
origin['cond_for'] = kwargs['cond_for']
return raw_sql
class SnaqlDepNode(object):
def __init__(self, name):
self.name = name
self.edges = []
def add_edge(self, node):
self.edges.append(node)
def __str__(self):
return '<SnaqlDepNode %s>' % self.name
def __repr__(self):
return '<SnaqlDepNode %s>' % self.name
class SnaqlException(Exception):
pass
class Snaql(object):
def __init__(self, sql_root, sql_ns):
self.sql_root = sql_root
self.jinja_env = Environment(
trim_blocks=True,
extensions=[JinjaSQLExtension],
loader=RawFileSystemLoader(os.path.join(self.sql_root, sql_ns)),
)
self.jinja_env.filters.update({
'guards.string': guard_string,
'guards.integer': guard_integer,
'guards.datetime': guard_datetime,
'guards.date': guard_date,
'guards.float': guard_float,
'guards.timedelta': guard_timedelta,
'guards.time': guard_time,
'guards.case': guard_case,
'guards.regexp': guard_regexp,
'guards.bool': guard_bool,
})
self.jinja_env.extend(sql_params={})
def gen_func(self, name, meta_struct, env):
def subrender_cond(owner_name, cond_func, context):
if (
# 2022.02.28 - Handle deprecated collections class since Python 3.3
isinstance(cond_func, collections.abc.Callable if PY3K3 else collections.Callable) and
cond_func.is_cond
):
cond_struct = meta_struct['funcs'][cond_func.func_name]
if cond_struct['cond_for'] != owner_name:
raise SnaqlException(
'"%s" is not proper condition for "%s"' % (
cond_func.func_name,
owner_name
)
)
cond_tmpl = env.from_string(
meta_struct['funcs'][cond_func.func_name]['raw_sql']
)
return cond_tmpl.render(**context).strip()
return cond_func
def fn(**kwargs):
if meta_struct['funcs'][name]['is_cond']:
raise SnaqlException((
'"%s" is condition for "%s" and can not '
'be rendered outside of it\'s scope'
) % (name, meta_struct['funcs'][name]['cond_for']))
if kwargs:
for point, val in kwargs.items():
maybe_cond_sql = subrender_cond(name, val, kwargs)
if maybe_cond_sql:
kwargs[point] = maybe_cond_sql
if (
# 2022.02.28 - Handle deprecated collections class since Python 3.3
isinstance(val, collections.abc.Iterable if PY3K3 else collections.Iterable) and
not isinstance(
val, (str if PY3K else types.StringTypes, dict)
)
):
val = [subrender_cond(name, v, kwargs) for v in val]
kwargs[point] = [v for v in val if v]
if 'schema' in kwargs and isinstance(kwargs['schema'], Schema):
validation_schema = kwargs.pop('schema')
kwargs = validation_schema.validate(kwargs)
sql_tmpl = (
env.from_string(meta_struct['funcs'][name]['raw_sql'])
)
return sql_tmpl.render(**kwargs).strip()
return meta_struct['funcs'][name]['sql']
fn.__doc__ = meta_struct['funcs'][name]['note']
fn.is_cond = meta_struct['funcs'][name]['is_cond']
fn.func_name = str(name)
return fn
def gen_dep_graph(self, node, accum):
for edge in node.edges:
if edge not in accum:
self.gen_dep_graph(edge, accum)
accum.append(node)
return accum
def load_queries(self, sql_path):
template = self.jinja_env.get_template(sql_path)
template.render()
factory_methods = {}
meta_struct = copy.deepcopy(self.jinja_env.sql_params)
blocks = set(meta_struct['funcs'])
node = SnaqlDepNode('root')
for name, block in meta_struct['funcs'].items():
# Dependency graph building
block['node'] = block['node'] or SnaqlDepNode(name)
for dep in block['depends_on']:
if dep not in blocks:
raise SnaqlException(
'"%s" block not found in "%s"' % (dep, sql_path)
)
if meta_struct['funcs'][dep]['node'] is None:
meta_struct['funcs'][dep]['node'] = SnaqlDepNode(dep)
block['node'].add_edge(meta_struct['funcs'][dep]['node'])
node.add_edge(block['node'])
fn = self.gen_func(name, meta_struct, self.jinja_env)
factory_methods[name] = fn
edges_accum = []
graph = self.gen_dep_graph(node, edges_accum)
graph.pop() # root node
factory_methods['ordered_blocks'] = [
factory_methods[n.name]
for n in graph
]
factory = namedtuple('SQLFactory', factory_methods.keys())
struct = factory(*factory_methods.values())
self.jinja_env.sql_params.clear()
return struct
|
py | 1a358d7e44346a09d7f797338bbec6dd185d5125 | from unittest import mock
import pytest
from directory_api_client.base import AbstractAPIClient
class APIClient(AbstractAPIClient):
version = 123
@pytest.fixture
def client():
return APIClient(
base_url='https://example.com',
api_key='test',
sender_id='test',
timeout=5,
)
@mock.patch.object(AbstractAPIClient, 'fallback_cache_get')
def test_fallback_cache_used(mock_fallback_cache_get, client):
client.get('http://www.thing.com', use_fallback_cache=True)
assert mock_fallback_cache_get.call_count == 1
assert mock_fallback_cache_get.call_args == mock.call('http://www.thing.com')
@mock.patch.object(AbstractAPIClient, 'get')
@mock.patch.object(AbstractAPIClient, 'fallback_cache_get')
def test_fallback_cache_not_used(mock_fallback_cache_get, mock_get, client):
client.get('http://www.thing.com')
assert mock_fallback_cache_get.call_count == 0
assert mock_get.call_count == 1
|
py | 1a358db5976b233c568b315b8f19a7768f32c480 |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import tempfile
import tarfile
import radical.utils as ru
import radical.saga as rs
rs.fs = rs.filesystem
from ... import states as rps
from ... import constants as rpc
from ... import utils as rpu
from .base import TMGRStagingInputComponent
from ...staging_directives import complete_url
# if we receive more than a certain numnber of tasks in a bulk, we create the
# task sandboxes in a remote bulk op. That limit is defined here, along with
# the definition of the bulk mechanism used to create the sandboxes:
# saga: use SAGA bulk ops
# tar : unpack a locally created tar which contains all sandboxes
TASK_BULK_MKDIR_THRESHOLD = 16
TASK_BULK_MKDIR_MECHANISM = 'tar'
# ------------------------------------------------------------------------------
#
class Default(TMGRStagingInputComponent):
"""
This component performs all tmgr side input staging directives for compute
tasks. It gets tasks from the tmgr_staging_input_queue, in
TMGR_STAGING_INPUT_PENDING state, will advance them to TMGR_STAGING_INPUT
state while performing the staging, and then moves then to the
AGENT_SCHEDULING_PENDING state, passing control to the agent.
"""
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
TMGRStagingInputComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize(self):
# we keep a cache of SAGA dir handles
self._fs_cache = dict()
self._js_cache = dict()
self._pilots = dict()
self._pilots_lock = ru.RLock()
self.register_input(rps.TMGR_STAGING_INPUT_PENDING,
rpc.TMGR_STAGING_INPUT_QUEUE, self.work)
# FIXME: this queue is inaccessible, needs routing via mongodb
self.register_output(rps.AGENT_STAGING_INPUT_PENDING, None)
# we subscribe to the command channel to learn about pilots being added
# to this task manager.
self.register_subscriber(rpc.CONTROL_PUBSUB, self._base_command_cb)
# --------------------------------------------------------------------------
#
def finalize(self):
for fs in list(self._fs_cache.values()): fs.close()
for js in list(self._js_cache.values()): js.close()
# --------------------------------------------------------------------------
#
def _base_command_cb(self, topic, msg):
# keep track of `add_pilots` commands and updates self._pilots
# accordingly.
cmd = msg.get('cmd')
arg = msg.get('arg')
if cmd not in ['add_pilots']:
self._log.debug('skip cmd %s', cmd)
pilots = arg.get('pilots', [])
if not isinstance(pilots, list):
pilots = [pilots]
with self._pilots_lock:
for pilot in pilots:
pid = pilot['uid']
self._log.debug('add pilot %s', pid)
if pid not in self._pilots:
self._pilots[pid] = pilot
return True
# --------------------------------------------------------------------------
#
def work(self, tasks):
if not isinstance(tasks, list):
tasks = [tasks]
self.advance(tasks, rps.TMGR_STAGING_INPUT, publish=True, push=False)
# we first filter out any tasks which don't need any input staging, and
# advance them again as a bulk. We work over the others one by one, and
# advance them individually, to avoid stalling from slow staging ops.
no_staging_tasks = list()
staging_tasks = list()
for task in tasks:
# no matter if we perform any staging or not, we will push the full
# task info to the DB on the next advance, and will pass control to
# the agent.
task['$all'] = True
task['control'] = 'agent_pending'
# check if we have any staging directives to be enacted in this
# component
actionables = list()
for sd in task['description'].get('input_staging', []):
if sd['action'] in [rpc.TRANSFER, rpc.TARBALL]:
actionables.append(sd)
if actionables:
staging_tasks.append([task, actionables])
else:
no_staging_tasks.append(task)
# Optimization: if we obtained a large bulk of tasks, we at this point
# attempt a bulk mkdir for the task sandboxes, to free the agent of
# performing that operation. That implies that the agent needs to check
# sandbox existence before attempting to create them now.
#
# Note that this relies on the tmgr scheduler to assigning the sandbox
# to the task.
#
# Note further that we need to make sure that all tasks are actually
# pointing into the same target file system, so we need to cluster by
# filesystem before checking the bulk size. For simplicity we actually
# cluster by pilot ID, which is sub-optimal for task bulks which go to
# different pilots on the same resource (think OSG).
#
# Note further that we skip the bulk-op for all tasks for which we
# actually need to stage data, since the mkdir will then implicitly be
# done anyways.
#
# Caveat: we can actually only (reasonably) do this if we know some
# details about the pilot, because otherwise we'd have too much guessing
# to do about the pilot configuration (sandbox, access schema, etc), so
# we only attempt this optimization for tasks scheduled to pilots for
# which we learned those details.
task_sboxes_by_pid = dict()
for task in no_staging_tasks:
sbox = task['task_sandbox']
pid = task['pilot']
if pid not in task_sboxes_by_pid:
task_sboxes_by_pid[pid] = list()
task_sboxes_by_pid[pid].append(sbox)
# now trigger the bulk mkdir for all filesystems which have more than
# a certain tasks tohandle in this bulk:
for pid in task_sboxes_by_pid:
with self._pilots_lock:
pilot = self._pilots.get(pid)
if not pilot:
# we don't feel inclined to optimize for unknown pilots
self._log.debug('pid unknown - skip optimizion', pid)
continue
session_sbox = self._session._get_session_sandbox(pilot)
task_sboxes = task_sboxes_by_pid[pid]
if len(task_sboxes) >= TASK_BULK_MKDIR_THRESHOLD:
self._log.debug('tar %d sboxes', len(task_sboxes))
# no matter the bulk mechanism, we need a SAGA handle to the
# remote FS
sbox_fs = ru.Url(session_sbox) # deep copy
sbox_fs.path = '/'
sbox_fs_str = str(sbox_fs)
if sbox_fs_str not in self._fs_cache:
self._fs_cache[sbox_fs_str] = \
rs.fs.Directory(sbox_fs, session=self._session)
saga_dir = self._fs_cache[sbox_fs_str]
# we have two options for a bulk mkdir:
# 1) ask SAGA to create the sandboxes in a bulk op
# 2) create a tarball with all task sandboxes, push
# it over, and untar it (one untar op then creates all dirs).
# We implement both
if TASK_BULK_MKDIR_MECHANISM == 'saga':
tc = rs.task.Container()
for sbox in task_sboxes:
tc.add(saga_dir.make_dir(sbox, ttype=rs.TASK))
tc.run()
tc.wait()
elif TASK_BULK_MKDIR_MECHANISM == 'tar':
tmp_path = tempfile.mkdtemp(prefix='rp_agent_tar_dir')
tmp_dir = os.path.abspath(tmp_path)
tar_name = '%s.%s.tar' % (self._session.uid, self.uid)
tar_tgt = '%s/%s' % (tmp_dir, tar_name)
tar_url = ru.Url('file://localhost/%s' % tar_tgt)
# we want pathnames which are relative to the session
# sandbox. Ignore all other sandboxes - the agent will have
# to create those.
root = str(session_sbox)
rlen = len(root)
rels = list()
for path in task_sboxes:
if path.startswith(root):
rels.append(path[rlen + 1:])
rpu.create_tar(tar_tgt, rels)
tar_rem_path = "%s/%s" % (str(session_sbox), tar_name)
self._log.debug('sbox: %s [%s]', session_sbox,
type(session_sbox))
self._log.debug('copy: %s -> %s', tar_url, tar_rem_path)
saga_dir.copy(tar_url, tar_rem_path,
flags=rs.fs.CREATE_PARENTS)
# get a job service handle to the target resource and run
# the untar command. Use the hop to skip the batch system
js_url = pilot['js_hop']
self._log.debug('js : %s', js_url)
if js_url in self._js_cache:
js_tmp = self._js_cache[js_url]
else:
js_tmp = rs.job.Service(js_url, session=self._session)
self._js_cache[js_url] = js_tmp
cmd = "tar xvf %s/%s -C %s" % (session_sbox.path, tar_name,
session_sbox.path)
j = js_tmp.run_job(cmd)
j.wait()
self._log.debug('untar : %s', cmd)
self._log.debug('untar : %s\n---\n%s\n---\n%s',
j.get_stdout_string(), j.get_stderr_string(),
j.exit_code)
if no_staging_tasks:
# nothing to stage, push to the agent
self.advance(no_staging_tasks, rps.AGENT_STAGING_INPUT_PENDING,
publish=True, push=True)
for task,actionables in staging_tasks:
self._handle_task(task, actionables)
# --------------------------------------------------------------------------
#
def _handle_task(self, task, actionables):
# FIXME: we should created task sandboxes in a bulk
uid = task['uid']
self._prof.prof("create_sandbox_start", uid=uid)
src_context = {'pwd' : os.getcwd(), # !!!
'task' : task['task_sandbox'],
'pilot' : task['pilot_sandbox'],
'resource' : task['resource_sandbox']}
tgt_context = {'pwd' : task['task_sandbox'], # !!!
'task' : task['task_sandbox'],
'pilot' : task['pilot_sandbox'],
'resource' : task['resource_sandbox']}
# we have actionable staging directives, and thus we need a task
# sandbox.
sandbox = rs.Url(task["task_sandbox"])
tmp = rs.Url(task["task_sandbox"])
# url used for cache (sandbox url w/o path)
tmp.path = '/'
key = str(tmp)
self._log.debug('key %s / %s', key, tmp)
if key not in self._fs_cache:
self._fs_cache[key] = rs.fs.Directory(tmp, session=self._session)
saga_dir = self._fs_cache[key]
saga_dir.make_dir(sandbox, flags=rs.fs.CREATE_PARENTS)
self._prof.prof("create_sandbox_stop", uid=uid)
# Loop over all transfer directives and filter out tarball staging
# directives. Those files are added into a tarball, and a single
# actionable to stage that tarball replaces the original actionables.
# create a new actionable list during the filtering
new_actionables = list()
tar_file = None
for sd in actionables:
# don't touch non-tar SDs
if sd['action'] != rpc.TARBALL:
new_actionables.append(sd)
else:
action = sd['action']
flags = sd['flags'] # NOTE: we don't use those
did = sd['uid']
src = sd['source']
tgt = sd['target']
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._prof.prof('staging_in_tar_start', uid=uid, msg=did)
# create a tarfile on the first match, and register for transfer
if not tar_file:
tmp_file = tempfile.NamedTemporaryFile(
prefix='rp_usi_%s.' % uid,
suffix='.tar',
delete=False)
tar_path = tmp_file.name
tar_file = tarfile.open(fileobj=tmp_file, mode='w')
tar_src = ru.Url('file://localhost/%s' % tar_path)
tar_tgt = ru.Url('task:////%s.tar' % uid)
tar_did = ru.generate_id('sd')
tar_sd = {'action' : rpc.TRANSFER,
'flags' : rpc.DEFAULT_FLAGS,
'uid' : tar_did,
'source' : str(tar_src),
'target' : str(tar_tgt),
}
new_actionables.append(tar_sd)
# add the src file
tar_file.add(src.path, arcname=tgt.path)
self._prof.prof('staging_in_tar_stop', uid=uid, msg=did)
# make sure tarball is flushed to disk
if tar_file:
tar_file.close()
# work on the filtered TRANSFER actionables
for sd in new_actionables:
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
if action == rpc.TRANSFER:
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
# Check if the src is a folder, if true
# add recursive flag if not already specified
if os.path.isdir(src.path):
flags |= rs.fs.RECURSIVE
# Always set CREATE_PARENTS
flags |= rs.fs.CREATE_PARENTS
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._prof.prof('staging_in_start', uid=uid, msg=did)
saga_dir.copy(src, tgt, flags=flags)
self._prof.prof('staging_in_stop', uid=uid, msg=did)
if tar_file:
# some tarball staging was done. Add a staging directive for the
# agent to untar the tarball, and clean up.
tar_sd['action'] = rpc.TARBALL
task['description']['input_staging'].append(tar_sd)
os.remove(tar_path)
# staging is done, we can advance the task at last
self.advance(task, rps.AGENT_STAGING_INPUT_PENDING,
publish=True, push=True)
# ------------------------------------------------------------------------------
|
py | 1a358e15e34de22fda1dddccb91a4925e0cb4e35 | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from sg_jira.handlers import EntityIssueHandler
from sg_jira.constants import (
SHOTGUN_JIRA_ID_FIELD,
SHOTGUN_SYNC_IN_JIRA_FIELD,
SHOTGUN_JIRA_URL_FIELD,
)
from sg_jira.errors import InvalidShotgunValue
class AssetIssueHandler(EntityIssueHandler):
"""
A handler which syncs a ShotGrid Asset as a Jira Issue
"""
# Define the mapping between Shotgun Asset fields and Jira Issue fields
__ASSET_FIELDS_MAPPING = {
"code": "summary",
"description": "description",
"tags": "labels",
"created_by": "reporter",
"tasks": None,
"sg_status_list": None,
}
# The type of Issue link to use when linking a Task Issue to the Issue
# representing the Asset.
__JIRA_PARENT_LINK_TYPE = "relates to"
# Define the mapping between Jira Issue fields and Shotgun Asset fields
# if the Shotgun target is None, it means the target field is not settable
# directly.
__ISSUE_FIELDS_MAPPING = {
"summary": "code",
"description": "description",
"status": "sg_status_list",
"labels": "tags",
}
@property
def _shotgun_asset_fields(self):
"""
Return the list of fields to ask for when retrieving an Asset from
ShotGrid.
"""
return [
"project.Project.%s" % SHOTGUN_JIRA_ID_FIELD,
"project.Project.name",
SHOTGUN_JIRA_ID_FIELD,
] + self._supported_shotgun_fields_for_shotgun_event()
@property
def _sg_jira_status_mapping(self):
"""
Return a dictionary where keys are ShotGrid status short codes and values
are Jira Issue status names.
"""
return {
"wtg": "To Do",
"rdy": "Open",
"ip": "In Progress",
"fin": "Done",
"hld": "Backlog",
"omt": "Closed",
}
@property
def _supported_shotgun_fields_for_jira_event(self):
""""
Return the list of fields this handler can process for a Jira event.
:returns: A list of strings.
"""
# By convention we might have `None` as values in our mapping dictionary
# meaning that we handle a specific Jira field but there is not a direct
# mapping to a Shotgun field and a special logic must be implemented
# and called to perform the update to Shotgun.
return [field for field in self.__ISSUE_FIELDS_MAPPING.values() if field]
def _supported_shotgun_fields_for_shotgun_event(self):
"""
Return the list of ShotGrid fields that this handler can process for a
ShotGrid to Jira event.
"""
return list(self.__ASSET_FIELDS_MAPPING.keys())
def _get_jira_issue_field_for_shotgun_field(
self, shotgun_entity_type, shotgun_field
):
"""
Returns the Jira Issue field id to use to sync the given ShotGrid Entity
type field.
:param str shotgun_entity_type: A ShotGrid Entity type, e.g. 'Task'.
:param str shotgun_field: A ShotGrid Entity field name, e.g. 'sg_status_list'.
:returns: A string or ``None``.
"""
if shotgun_entity_type != "Asset":
return None
return self.__ASSET_FIELDS_MAPPING.get(shotgun_field)
def _get_shotgun_entity_field_for_issue_field(self, jira_field_id):
"""
Returns the ShotGrid field name to use to sync the given Jira Issue field.
:param str jira_field_id: A Jira Issue field id, e.g. 'summary'.
:returns: A string or ``None``.
"""
return self.__ISSUE_FIELDS_MAPPING.get(jira_field_id)
def _sync_asset_to_jira(self, shotgun_asset, event_meta=None):
"""
Update an existing Jira Issue from the ShotGrid Asset fields.
:param shotgun_asset: A ShotGrid Asset dictionary.
:param event_meta: A ShotGrid Event meta data dictionary or ``None``.
:returns: ``True`` if a Jira Issue was updated, ``False`` otherwise.
"""
jira_issue_key = shotgun_asset[SHOTGUN_JIRA_ID_FIELD]
if not jira_issue_key:
return False
jira_issue = self._get_jira_issue_and_validate(jira_issue_key, shotgun_asset)
if not jira_issue:
return False
# Process all supported fields if no event meta data was provided.
if not event_meta:
return self._sync_shotgun_fields_to_jira(shotgun_asset, jira_issue)
sg_field = event_meta["attribute_name"]
try:
jira_field, jira_value = self._get_jira_issue_field_sync_value(
jira_issue.fields.project,
jira_issue,
shotgun_asset["type"],
sg_field,
event_meta.get("added"),
event_meta.get("removed"),
event_meta.get("new_value"),
)
except InvalidShotgunValue as e:
self._logger.warning(
"Unable to update Jira %s %s for event %s: %s"
% (jira_issue.fields.issuetype.name, jira_issue.key, event_meta, e,)
)
self._logger.debug("%s" % e, exc_info=True)
return False
if jira_field:
self._logger.debug(
"Updating Jira %s %s field with %s"
% (jira_issue, jira_field, jira_value)
)
jira_issue.update(fields={jira_field: jira_value})
return True
# Special cases not handled by a direct update
if sg_field == "sg_status_list":
shotgun_status = event_meta["new_value"]
return self._sync_shotgun_status_to_jira(
jira_issue,
shotgun_status,
"Updated from Shotgun %s(%d) moving to %s"
% (shotgun_asset["type"], shotgun_asset["id"], shotgun_status),
)
return False
def _get_jira_issue_link(self, from_issue, to_issue_key):
"""
Retrieve an existing link between the given Jira Issue and another Issue
with the given key.
:param from_issue: A :class:`jira.Issue` instance.
:param str to_issue_key: An Issue key.
:returns: An Issue link or ``None``.
"""
for issue_link in from_issue.fields.issuelinks:
# Depending link directions we either get "inwardIssue" or "outwardIssue"
# populated.
if issue_link.raw.get("inwardIssue"):
if issue_link.inwardIssue.key == to_issue_key:
# Note: we don't check the Issue Link type and return any link
# which is n the right direction.
return issue_link
return None
def _sync_asset_tasks_change_to_jira(self, shotgun_asset, added, removed):
"""
Update Jira with tasks changes for the given ShotGrid Asset.
:param shotgun_asset: A ShotGrid Asset dictionary.
:param added: A list of ShotGrid Task dictionaries which were added to
the given Asset.
:param removed: A list of ShotGrid Task dictionaries which were removed from
the given Asset.
:returns: ``True`` if the given changes could be processed sucessfully,
``False`` otherwise.
"""
jira_issue_key = shotgun_asset[SHOTGUN_JIRA_ID_FIELD]
jira_issue = None
if jira_issue_key:
# Retrieve the Issue if we should have one
jira_issue = self.get_jira_issue(jira_issue_key)
if not jira_issue:
self._logger.warning(
"Unable to find Jira Issue %s for Shotgun Asset %s"
% (jira_issue_key, shotgun_asset)
)
# Better to stop processing.
return False
updated = False
if jira_issue and removed:
# Check if we should update dependencies because it was attached to
# a synced Task which has been removed.
sg_tasks = self._shotgun.find(
"Task",
[
["id", "in", [x["id"] for x in removed]],
[SHOTGUN_JIRA_ID_FIELD, "is_not", None],
[SHOTGUN_SYNC_IN_JIRA_FIELD, "is", True],
],
["content", SHOTGUN_JIRA_ID_FIELD],
)
to_delete = []
for sg_task in sg_tasks:
issue_link = self._get_jira_issue_link(
jira_issue, sg_task[SHOTGUN_JIRA_ID_FIELD]
)
if issue_link:
self._logger.debug(
"Found a Jira link between %s and %s to delete"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
to_delete.append(issue_link)
else:
self._logger.debug(
"Didn't find a Jira link between %s and %s to delete"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
# Delete the links, if any
for issue_link in to_delete:
self._logger.info("Deleting Jira link %s" % (issue_link))
self._jira.delete_issue_link(issue_link.id)
updated = True
if added:
# Collect the list of Tasks which are linked to Jira Issues
sg_tasks = self._shotgun.find(
"Task",
[
["id", "in", [x["id"] for x in added]],
[SHOTGUN_JIRA_ID_FIELD, "is_not", None],
[SHOTGUN_SYNC_IN_JIRA_FIELD, "is", True],
],
["content", SHOTGUN_JIRA_ID_FIELD, SHOTGUN_SYNC_IN_JIRA_FIELD],
)
if not sg_tasks:
# Nothing to do
return False
if not jira_issue:
# Check if the Project is linked to a Jira Project
jira_project_key = shotgun_asset[
"project.Project.%s" % SHOTGUN_JIRA_ID_FIELD
]
if not jira_project_key:
self._logger.debug(
"Skipping tasks change event for %s (%d) for Project %s "
"not linked to a Jira Project"
% (
shotgun_asset["type"],
shotgun_asset["id"],
shotgun_asset["project"],
)
)
return False
jira_project = self.get_jira_project(jira_project_key)
if not jira_project:
self._logger.warning(
"Unable to find Jira Project %s for Shotgun Project %s."
% (jira_project_key, shotgun_asset["project"],)
)
return False
# Time to create the Issue
jira_issue = self._create_jira_issue_for_entity(
shotgun_asset,
jira_project,
self._issue_type,
summary=shotgun_asset["code"],
timetracking={
"originalEstimate": "0 m" # Null estimate in the case it is mandatory
},
)
self._shotgun.update(
shotgun_asset["type"],
shotgun_asset["id"],
{
SHOTGUN_JIRA_ID_FIELD: jira_issue.key,
SHOTGUN_JIRA_URL_FIELD: {
"url": jira_issue.permalink(),
"name": "View in Jira",
},
},
)
updated = True
for sg_task in sg_tasks:
issue_link = self._get_jira_issue_link(
jira_issue, sg_task[SHOTGUN_JIRA_ID_FIELD]
)
if not issue_link:
self._logger.info(
"Linking Jira Issue %s to %s"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
self._jira.create_issue_link(
type=self.__JIRA_PARENT_LINK_TYPE,
# Note: depending on the link type, e.g. "blocks" or
# "is blocked", the inward and outward values might need
# to be swapped
inwardIssue=sg_task[SHOTGUN_JIRA_ID_FIELD],
outwardIssue=jira_issue.key,
comment={
"body": "Linking %s to %s"
% (shotgun_asset["code"], sg_task["content"],),
},
)
updated = True
else:
self._logger.debug(
"Jira Issue %s is already linked to %s"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
return updated
def _sync_shotgun_fields_to_jira(
self, sg_entity, jira_issue, exclude_shotgun_fields=None
):
"""
Update the given Jira Issue with values from the given ShotGrid Entity.
An optional list of ShotGrid fields can be provided to exclude them from
the sync.
:param sg_entity: A ShotGrid Entity dictionary.
:param jira_issue: A :class:`jira.Issue` instance.
:param exclude_shotgun_fields: An optional list of ShotGrid field names which
shouldn't be synced.
"""
if exclude_shotgun_fields is None:
exclude_shotgun_fields = []
issue_data = {}
for sg_field, jira_field in self.__ASSET_FIELDS_MAPPING.items():
if sg_field in exclude_shotgun_fields:
continue
if jira_field is None:
# Special cases where a direct update is not possible.
continue
shotgun_value = sg_entity[sg_field]
if isinstance(shotgun_value, list):
removed = []
added = shotgun_value
new_value = None
else:
removed = None
added = None
new_value = shotgun_value
try:
jira_field, jira_value = self._get_jira_issue_field_sync_value(
jira_issue.fields.project,
jira_issue,
sg_entity["type"],
sg_field,
added,
removed,
new_value,
)
if jira_field:
issue_data[jira_field] = jira_value
except InvalidShotgunValue as e:
self._logger.warning(
"Unable to update Jira %s %s %s field from Shotgun value %s: %s"
% (
jira_issue.fields.issuetype.name,
jira_issue.key,
jira_field,
shotgun_value,
e,
)
)
self._logger.debug("%s" % e, exc_info=True)
if issue_data:
self._logger.debug(
"Updating Jira %s %s with %s. Currently: %s"
% (
jira_issue.fields.issuetype.name,
jira_issue.key,
issue_data,
jira_issue,
)
)
jira_issue.update(fields=issue_data)
# Sync status
if "sg_status_list" not in exclude_shotgun_fields:
self._sync_shotgun_status_to_jira(
jira_issue,
sg_entity["sg_status_list"],
"Updated from Shotgun %s(%d) moving to %s"
% (sg_entity["type"], sg_entity["id"], sg_entity["sg_status_list"]),
)
def _sync_shotgun_task_asset_to_jira(self, shotgun_task):
"""
Sync the Asset attached to the given ShotGrid Task to Jira.
:param shotgun_task: A ShotGrid Task dictionary.
:returns: ``True`` if any update happened, ``False`` otherwise.
"""
# Retrieve the Asset linked to the Task, if any
shotgun_asset = self._shotgun.find_one(
"Asset", [["tasks", "is", shotgun_task]], self._shotgun_asset_fields
)
# make sure we have a full entity needed with the injected "name" key, etc.
shotgun_asset = self._shotgun.consolidate_entity(
shotgun_asset, fields=self._shotgun_asset_fields
)
self._logger.debug(
"Retrieved Asset %s linked to Task %s" % (shotgun_asset, shotgun_task)
)
if not shotgun_asset:
return False
updated = False
res = self._sync_asset_tasks_change_to_jira(
shotgun_asset, added=[shotgun_task], removed=[]
)
if res:
updated = True
if self._sync_asset_to_jira(shotgun_asset):
updated = True
return updated
def setup(self):
"""
Check the Jira and ShotGrid site, ensure that the sync can safely happen.
This can be used as well to cache any value which is slow to retrieve.
"""
self._shotgun.assert_field(
"Asset", SHOTGUN_JIRA_ID_FIELD, "text", check_unique=True
)
self._shotgun.assert_field("Asset", SHOTGUN_JIRA_URL_FIELD, "url")
def accept_shotgun_event(self, entity_type, entity_id, event):
"""
Accept or reject the given event for the given ShotGrid Entity.
:returns: ``True`` if the event is accepted for processing, ``False`` otherwise.
"""
# We only accept Assets
if entity_type != "Asset":
return False
meta = event["meta"]
field = meta["attribute_name"]
if field not in self._supported_shotgun_fields_for_shotgun_event():
self._logger.debug(
"Rejecting Shotgun event with unsupported Shotgun field %s: %s"
% (field, event)
)
return False
return True
def process_shotgun_event(self, entity_type, entity_id, event):
"""
Process the given ShotGrid event for the given ShotGrid Entity
:param str entity_type: The ShotGrid Entity type to sync.
:param int entity_id: The id of the ShotGrid Entity to sync.
:param event: A dictionary with the event meta data for the change.
:returns: True if the event was successfully processed, False if the
sync didn't happen for any reason.
"""
meta = event["meta"]
shotgun_field = meta["attribute_name"]
if shotgun_field == SHOTGUN_SYNC_IN_JIRA_FIELD:
# Note: in this case the Entity is a Task.
return self._sync_shotgun_task_asset_to_jira(
{"type": entity_type, "id": entity_id}
)
asset_fields = [
"project",
"project.Project.%s" % SHOTGUN_JIRA_ID_FIELD,
"project.Project.name",
SHOTGUN_JIRA_ID_FIELD,
] + self._supported_shotgun_fields_for_shotgun_event()
sg_entity = self._shotgun.consolidate_entity(
{"type": entity_type, "id": entity_id}, fields=asset_fields
)
if not sg_entity:
self._logger.warning(
"Unable to find Shotgun %s (%s)." % (entity_type, entity_id)
)
return False
# When an Entity is created in Shotgun, a unique event is generated for
# each field value set in the creation of the Entity. These events
# have an additional "in_create" key in the metadata, identifying them
# as events from the initial create event.
#
# When the bridge processes the first event, it loads all of the Entity
# field values from Shotgun and creates the Jira Issue with those
# values. So the remaining Shotgun events with the "in_create"
# metadata key can be ignored since we've already handled all of
# those field updates.
# We use the Jira id field value to check if we're processing the first
# event. If it exists with in_create, we know the comment has already
# been created.
if sg_entity[SHOTGUN_JIRA_ID_FIELD] and meta.get("in_create"):
self._logger.debug(
"Rejecting Shotgun event for %s.%s field update during "
"create. Issue was already created in Jira: %s"
% (sg_entity["type"], shotgun_field, event)
)
return False
# Update existing synced Issue (if any) Issue dependencies
# Note: deleting a Task does not seem to trigger an Asset.tasks change?
if shotgun_field == "tasks":
return self._sync_asset_tasks_change_to_jira(
sg_entity, meta["added"], meta["removed"],
)
# Update the Jira Issue itself
return self._sync_asset_to_jira(sg_entity, meta)
|
py | 1a35905bc12badfbfbd693fb1fcd0fc87df209cf | from field import *
from getMirnov import *
from scipy.constants import mu_0
#SDAS
shotV=42952
shotH=44330 #44123 175A, 42966 260A, 44330 XA
shotP=43066
#Coil signals
vert, times, tbs = getSignal(ch_vert, shotV )
hor, times, tbs = getSignal(ch_hor, shotH )
prim, times, tbs = getSignal(ch_prim, shotP )
#mirnov signals
times, dataV = getMirnovs(shotV,mirnv,True)
times, dataH = getMirnovs(shotH,mirnv,True)
times, dataP = getMirnovs(shotP,mirnv,True)
#computes the flux on each mirnov normalized for a 1 amp current running on coils in Rw,Zw
def getMirnovFlux(Rw_,Zw_,polarity,windings,biotSavart=True):
#mirnov positions
radius=9.35 #cm
angle=345. - 30.*np.arange(12)
geometryZ=radius*np.sin(np.radians(angle)) #positions of the mirnovs
geometryR=radius*np.cos(np.radians(angle))
#loop on the mirnovs
Hr=np.zeros(len(angle))
Hz=np.zeros(len(angle))
i=0
for r,z in zip(geometryR,geometryZ):
#loop on the PFCs
for Rw,Zw, sign in zip(Rw_,Zw_,polarity):
if biotSavart:
coilHr, coilHz= biotsavart((r+46.)*1e-2, z*1e-2, Rw*1e-2,Zw*1e-2,1.0) #46.
else:
coilHr, coilHz= Hcoil((r+46.)*1e-2, z*1e-2, Rw*1e-2,Zw*1e-2) #46.
Hr[i]+=sign*coilHr
Hz[i]+=sign*coilHz
i+=1
Hr=np.asarray(Hr)
Hz=np.asarray(Hz)
Hp=-Hr*np.sin(np.radians(angle))+Hz*np.cos(np.radians(angle))
return Hp*windings*50*49e-6
V=getMirnovFlux([58.,58.,35.,35.],[-7.,7.,-7.,7.],[-1.,-1.,1.,1.],5)*340
H=getMirnovFlux([58.,58.],[-7.,7.],[+1.,-1.],4)*260
P=getMirnovFlux([62.,62.],[-13.,13.],[+1.,1.],14)*157
#get te mirnov flat top value with heaviside shots
def flatTops (data,from_=4000, to_=6000):
return np.asarray([np.mean(np.array(i)[from_:to_]) for i in data])
#Horizontal coils position calculation
'''
np.asarray(squareSums)
%matplotlib qt4
plt.figure()
plt.contourf(np.arange(-0.5,0.5,0.1),np.arange(-1.,1.,0.1),np.transpose(np.asarray(squareSums)))
plt.colorbar()
plt.xlabel("dR")
plt.ylabel("dZ")
'''
ftH=flatTops(dataH,2000,4000)
xx1=np.arange(-4,0,0.2)
xx2=np.arange(-7,-2,0.2)
yy1=np.arange(-4,0,0.2)
yy2=np.arange(2,6,0.2)
ii=np.arange(0,1,1)
error_min=[1,0,0,0,0,0]
for di in ii:
for dx1 in xx1:
print dx1
for dx2 in xx2:
for dy1 in yy1:
for dy2 in yy2:
H=getMirnovFlux([58.+dx1,58.+dx2],[-7+dy1,7.+dy2],[+1.,-1.],4, biotSavart=False)*(180+di)
err=np.sqrt(((-ftH-H)**2).sum())
if err < error_min[0]:
error_min=[err,dx1,dx2,dy1,dy2,di]
error_min
#ErrorMIn (-4,-5.2,-3.2,4.8)
H0=getMirnovFlux([58.,58.],[-7.,7.],[+1.,-1.],4, biotSavart=False)*175 #260
H2=getMirnovFlux([58+error_min[1],58+error_min[2]],[-7+error_min[3],7.+error_min[4]],[+1.,-1.],4, biotSavart=False)*(175+error_min[5])
H3=getMirnovFlux([58-2.52,58-5.3632],[-7-2.756,7.+4.1782],[+1.,-1.],4,biotSavart=False)*(175)
#Variation of I
def varyCurrent(ii,dx1=0,dx2=0,dy1=0,dy2=0):
var_i=[]
for di in ii:
H=getMirnovFlux([58.+dx1,58.+dx2],[-7+dy1,7.+dy2],[+1.,-1.],4, biotSavart=False)*(175+di)
err=np.sqrt(((ftH+H)**2).sum())
var_i.append(err)
return(var_i)
ftH=flatTops(dataH,2000,4000)
ii=np.arange(-50,51,1)
var_i0=varyCurrent(ii)
var_i=varyCurrent(ii,-2.52,-5.36,-2.756,4.1782)
plt.figure()
plt.plot(ii, np.array(var_i0)*1e6, label="original position")
plt.plot(ii, np.array(var_i)*1e6, label="optimized position")
plt.xlabel("dI on active coil [A]")
plt.ylabel("RMS error [uV s]")
plt.title ("Pulse #44330 - Variation of Hfield current")
plt.legend()
plt.grid()
%matplotlib qt4
#Variation of I 2D
def varyCurrent2D(ii,dx1=0,dx2=0,dy1=0,dy2=0):
var_i2D=[]
for di1 in ii:
var_i=[]
H1=getMirnovFlux([58.+dx1],[-7+dy1],[+1.],4, biotSavart=False)*(175+di1)
for di2 in ii:
H2=getMirnovFlux([58.+dx2],[7.+dy2],[-1.],4, biotSavart=False)*(175+di2)
err=np.sqrt(((-ftH-(H1+H2))**2).sum())
var_i.append(err)
var_i2D.append(np.asarray(var_i))
return(var_i2D)
ftH=flatTops(dataH,2000,4000)
ii=np.arange(-30,72,2)
#var_i0=varyCurrent(ii)
var_i=varyCurrent2D(ii,-2.52,-5.36,-2.756,4.1782)
plt.figure()
plt.contourf(ii,ii,np.transpose(np.asarray(var_i)*1e6))
plt.colorbar()
plt.xlabel("dI1")
plt.ylabel("dI2")
'''
squareSums=[]
for dx1 in xx1:
err1=[]
H=getMirnovFlux([58.+dx1,58.+dx2],[-7-1,7.+4],[+1.,-1.],4, biotSavart=False)*260
err=((ftH-H)**2)
err1.append(err.sum())
for dx2 in xx2:
squareSums.append(np.asarray(err1))
plt.figure()
plt.contourf(xx1,xx2,np.transpose(np.log(np.asarray(squareSums))))
plt.colorbar()
plt.xlabel("dx1")
plt.ylabel("dx2")
'''
H0=getMirnovFlux([58.,58.],[-7.,7.],[+1.,-1.],4, biotSavart=False)*180 #260
H2=getMirnovFlux([58-2.52,58-5.3632],[-7-2.756,7.+4.1782],[+1.,-1.],4,biotSavart=False)*180
H3=getMirnovFlux([58-2.52,58-5.3632],[-7-2.756,7.+4.1782],[+1.,-1.],4,biotSavart=False)*(175+30)
H11=getMirnovFlux([58.+dx1],[-7.+dy1],[1.],4, biotSavart=False)*(175+32)
H12=getMirnovFlux([58.+dx2],[7.+dy2],[-1.],4, biotSavart=False)*(175+17)
plt.figure()
plt.plot(np.arange(12)+1,-ftH*1e6, label="Measured")
plt.plot(np.arange(12)+1,H2*1e6,label="Optimized, 180A")
plt.plot(np.arange(12)+1,(H11+H12)*1e6,label="Optimized, 208,192A")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #44330 - Mirnov flux with optimized coil position")
plt.legend()
plt.figure()
plt.plot(np.arange(12)+1,-ftH*1e6, label="Measured")
#plt.plot(np.arange(12)+1,H0*1e6,label="Original, 180A")
plt.plot(np.arange(12)+1,H2*1e6,label="Optimized, 180A")
plt.plot(np.arange(12)+1,H3*1e6,label="Optimized, 205A")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #44330 - Mirnov flux with optimized coil position")
plt.legend()
'''
plt.figure()
plt.plot(np.arange(12)+1,P*1e8)
plt.plot(np.arange(12)+1,flatTops(dataP)*1e8)
'''
|
py | 1a35912d06ecb5f03b0267fbc4f8cb294161a9c1 | import json
import tornado.web
class HomersHandler(tornado.web.RequestHandler):
async def get(self, name):
homer = await self.settings["mongo_db"].homers.find_one(
{"name": name})
if homer is None:
raise tornado.web.HTTPError(
404, f"Missing homer: {name}")
self.finish(homer["content"])
async def post(self, name):
await self.settings["mongo_db"].homers.replace_one(
{"name": name},
{
"name": name,
"content": json.loads(self.request.body)
},
upsert=True)
self.set_status(204)
self.finish()
|
py | 1a35928245efb2c27d5d7a4484dc0d024c40a063 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions list' command."""
import sys
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as base_exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.third_party.apitools.base.py import exceptions
from googlecloudsdk.third_party.apitools.base.py import list_pager
class List(base.ListCommand):
"""Lists all the functions in a given region."""
def Collection(self):
return 'functions.projects.regions.functions'
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Objects representing user functions.
"""
client = self.context['functions_client']
list_generator = list_pager.YieldFromList(
service=client.projects_regions_functions,
request=self.BuildRequest(args),
limit=args.limit, field='functions',
batch_size_attribute='pageSize')
# Decorators (e.g. util.CatchHTTPErrorRaiseHTTPException) don't work
# for generators. We have to catch the exception above the iteration loop,
# but inside the function.
try:
for item in list_generator:
yield item
except exceptions.HttpError as error:
msg = util.GetHttpErrorMessage(error)
unused_type, unused_value, traceback = sys.exc_info()
raise base_exceptions.HttpException, msg, traceback
def BuildRequest(self, args):
"""This method creates a ListRequest message to be send to GCF.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
A ListRequest message.
"""
messages = self.context['functions_messages']
project = properties.VALUES.core.project.Get(required=True)
location = 'projects/{0}/regions/{1}'.format(
project, args.region)
return messages.CloudfunctionsProjectsRegionsFunctionsListRequest(
location=location)
|
py | 1a3592fa233e98b050e03a52ea05bb880afd140f | import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data import sampler
from torch import cuda
def to_var(x, device, requires_grad=False, volatile=False):
"""
Varialbe type that automatically choose cpu or cuda
"""
#@if torch.cuda.is_available():
# x = x.to(device)
return Variable(x, requires_grad=requires_grad, volatile=volatile).to(device)
def prune_rate(model, verbose=True):
"""
Print out prune rate for each layer and the whole network
"""
total_nb_param = 0
nb_zero_param = 0
layer_id = 0
for parameter in model.parameters():
param_this_layer = 1
for dim in parameter.data.size():
param_this_layer *= dim
total_nb_param += param_this_layer
# only pruning linear and conv layers
if len(parameter.data.size()) != 1:
layer_id += 1
zero_param_this_layer = \
np.count_nonzero(parameter.cpu().data.numpy()==0)
nb_zero_param += zero_param_this_layer
if verbose:
print("Layer {} | {} layer | {:.2f}% parameters pruned" \
.format(
layer_id,
'Conv' if len(parameter.data.size()) == 4 \
else 'Linear',
100.*zero_param_this_layer/param_this_layer,
))
pruning_perc = 100.*nb_zero_param/total_nb_param
if verbose:
print("Final pruning rate: {:.2f}%".format(pruning_perc))
return pruning_perc
def arg_nonzero_min(a):
"""
nonzero argmin of a non-negative array
"""
if not a:
return
min_ix, min_v = None, None
# find the starting value (should be nonzero)
for i, e in enumerate(a):
if e != 0:
min_ix = i
min_v = e
if not min_ix:
print('Warning: all zero')
return np.inf, np.inf
# search for the smallest nonzero
for i, e in enumerate(a):
if e < min_v and e != 0:
min_v = e
min_ix = i
return min_v, min_ix
|
py | 1a35930dbac4cdfa75d0f9918e0d433cf7a9435b | import os
from scipy.io import loadmat
class DATA:
def __init__(self, image_name, bboxes):
self.image_name = image_name
self.bboxes = bboxes
class WIDER(object):
def __init__(self, file_to_label, path_to_image=None):
self.file_to_label = file_to_label
self.path_to_image = path_to_image
self.f = loadmat(file_to_label)
self.event_list = self.f['event_list']
self.file_list = self.f['file_list']
self.face_bbx_list = self.f['face_bbx_list']
def next(self):
for event_idx, event in enumerate(self.event_list):
# fix error of "can't not .. bytes and strings"
e = str(event[0][0].encode('utf-8'))[2:-1]
for file, bbx in zip(self.file_list[event_idx][0],
self.face_bbx_list[event_idx][0]):
f = file[0][0].encode('utf-8')
# print(type(e), type(f)) # bytes, bytes
# fix error of "can't not .. bytes and strings"
f = str(f)[2:-1]
# path_of_image = os.path.join(self.path_to_image, str(e), str(f)) + ".jpg"
path_of_image = self.path_to_image + '/' + e + '/' + f + ".jpg"
# print(path_of_image)
bboxes = []
bbx0 = bbx[0]
for i in range(bbx0.shape[0]):
xmin, ymin, xmax, ymax = bbx0[i]
bboxes.append((int(xmin), int(ymin), int(xmax), int(ymax)))
yield DATA(path_of_image, bboxes)
|
py | 1a359421dc10402ad4126bbfa9aacc18e663efe7 | #coding: utf8
import sublime, sublime_plugin
import sys, os
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
PACKAGES_PATH = sublime.packages_path() or os.path.dirname(BASE_PATH)
if(BASE_PATH not in sys.path):
sys.path += [BASE_PATH] + [os.path.join(BASE_PATH, 'lib')] + [os.path.join(BASE_PATH, 'SublimeJS/core')]
import SublimeJS.loader.pyv8loader as pyv8loader
from SublimeJS.core.context import Context, import_pyv8
from SublimeJS.core.context import js_file_reader as _js_file_reader
from SublimeJS.loader.pyv8loader import LoaderDelegate
is_python3 = sys.version_info[0] > 2
# JS context
ctx = None
# sublime-v8 Settings
# settings = None
# Default ST settings
# user_settings = None
def is_st3():
return sublime.version()[0] == '3'
#######################
# Reload Lib
# The original idea is borrowed from
# https://github.com/wbond/sublime_package_control/blob/master/package_control/reloader.py
import imp
reload_mods = []
for mod in sys.modules:
if mod.startswith('SublimeJS') and sys.modules[mod] != None:
reload_mods.append(mod)
mods_load_order = [
'SublimeJS.core.semver',
'SublimeJS.loader.pyv8loader',
'SublimeJS.core.file',
'SublimeJS.core.http',
'SublimeJS.core.process',
'SublimeJS.core.child_process',
'SublimeJS.core.fs',
'SublimeJS.core.context',
]
for mod in mods_load_order:
if mod in reload_mods:
m = sys.modules[mod]
if 'on_module_reload' in m.__dict__:
m.on_module_reload()
imp.reload(sys.modules[mod])
def convert(obj):
from PyV8 import JSObject, JSArray, JSFunction
if type(obj) == JSArray:
return [convert(v) for v in obj]
if type(obj) == JSObject:
return dict([[str(k), convert(obj.__getattr__(str(k)))] for k in
obj.__members__])
return obj
##################################
# Global Events
class EventDispatcher(sublime_plugin.EventListener):
def on_new(self, view):
if(ctx):
return ctx.call('global.E.emit',['new', view])
return True
def on_clone(self, view):
if(ctx):
return ctx.call('global.E.emit',['clone', view])
return True
def on_load(self, view):
if(ctx):
return ctx.call('global.E.emit',['load', view])
return True
def on_pre_close(self, view):
if(ctx):
return ctx.call('global.E.emit',['pre_close', view])
return True
def on_close(self, view):
if(ctx):
return ctx.call('global.E.emit',['close', view])
return True
def on_pre_save(self, view):
if(ctx):
return ctx.call('global.E.emit',['pre_save', view])
return True
def on_post_save(self, view):
if(ctx):
return ctx.call('global.E.emit',['post_save', view])
return True
def on_modified(self, view):
if(ctx):
return ctx.call('global.E.emit',['modified', view])
return True
def on_selection_modified(self, view):
if(ctx):
return ctx.call('global.E.emit',['selection_modified', view])
return True
def on_activated(self, view):
if(ctx):
ctx.call('global.E.emit',['activated', view])
ctx.call('global.E.emit',['focus', view])
return True
def on_deactived(self, view):
if(ctx):
ctx.call('global.E.emit',['deactived', view])
ctx.call('global.E.emit',['blur', view])
return True
def on_text_command(self, view, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['text_command', view, command_name, args])
return (command_name, args)
def on_window_command(self, window, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['window_command', window, command_name, args])
return (command_name, args)
def post_text_command(self, view, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['post_text_command', view, command_name, args])
return True
def post_window_command(self, window, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['post_window_command', window, command_name, args])
return True
def on_query_context(self, view, key, operator, operand, match_all):
if(ctx):
return ctx.call('global.E.emit',['query_context', view, key, operator, operand, match_all])
return True
def on_query_completions(self, view, prefix, locations):
if(ctx):
ctx._js_ctx.enter()
ret = convert(ctx.call('global.E.on_query_completions',[view, prefix, locations]))
ctx._js_ctx.leave()
return ret
return None
##########################
# Base Class of JSCommand
class JSTextCommand(sublime_plugin.TextCommand):
def __init__(self, view):
self.view = view
def run(self, edit, args=None):
command = self.__class__.__name__[0:-7].lower()
ctx.call('global.runCommand', [command, self.view, edit, args]);
# ctx.load_js_file(os.path.join(BASE_PATH, mod), {'view':self.view, 'edit': edit})
class JSWindowCommand(sublime_plugin.WindowCommand):
def __init__(self, window):
self.window = window
def run(self, args=None):
command = self.__class__.__name__[0:-7].lower()
ctx.call('global.runCommand', [command, self.window, args]);
class JSApplicationCommand(sublime_plugin.ApplicationCommand):
def run(self, args=None):
command = self.__class__.__name__[0:-7].lower()
ctx.call('global.runCommand', [command, args]);
class HelloWorldCommand(JSTextCommand):
'''demo'''
pass
############################
def init():
"Init sublime-v8 engine"
# setup environment for PyV8 loading
pyv8_paths = [
os.path.join(PACKAGES_PATH, 'PyV8'),
os.path.join(PACKAGES_PATH, 'PyV8', pyv8loader.get_arch()),
os.path.join(PACKAGES_PATH, 'PyV8', 'pyv8-%s' % pyv8loader.get_arch())
]
sys.path += pyv8_paths
# unpack recently loaded binary, is exists
for p in pyv8_paths:
pyv8loader.unpack_pyv8(p)
###################################
# if you need update PyV8, comment this
try:
import PyV8
except:
pass
###################################
# create JS environment
delegate = SublimeLoaderDelegate()
pyv8loader.load(pyv8_paths[1], delegate)
class SublimeLoaderDelegate(LoaderDelegate):
load_cache = []
def __init__(self, settings=None):
if settings is None:
settings = {}
#for k in ['http_proxy', 'https_proxy', 'timeout']:
# if user_settings.has(k):
# settings[k] = user_settings.get(k, None)
LoaderDelegate.__init__(self, settings)
self.state = None
self.message = 'Loading PyV8 binary, please wait'
self.i = 0
self.addend = 1
self.size = 8
def on_start(self, *args, **kwargs):
self.state = 'loading'
def on_progress(self, *args, **kwargs):
if kwargs['progress'].is_background:
return
before = self.i % self.size
after = (self.size - 1) - before
msg = '%s [%s=%s]' % (self.message, ' ' * before, ' ' * after)
if not after:
self.addend = -1
if not before:
self.addend = 1
self.i += self.addend
sublime.set_timeout(lambda: sublime.status_message(msg), 0)
def on_complete(self, *args, **kwargs):
self.state = 'complete'
if kwargs['progress'].is_background:
return
sublime.set_timeout(self.on_ready, 0)
def on_ready(self):
sublime.status_message('PyV8 binary successfully loaded')
if(not ctx):
globals()['ctx'] = JSCore(self.log)
from PyV8 import JSClass, JSObject, JSArray, JSFunction
ctx.JSClass = lambda obj: JSClass(convert(obj))
ctx.JSObject = lambda obj: JSObject(convert(obj))
ctx.JSArray = lambda obj: JSArray(convert(obj))
ctx.JSFunction = lambda obj: JSFunction(convert(obj))
ctx.load_js_file(os.path.join(BASE_PATH, "SublimeJS/js/core.js"))
if('js_loading_queue' in globals()):
for i in globals()['js_loading_queue']:
ctx.load_js_file(i)
def on_error(self, exit_code=-1, thread=None):
self.state = 'error'
sublime.set_timeout(lambda: show_pyv8_error(exit_code), 0)
def setting(self, name, default=None):
"Returns specified setting name"
return self.settings.get(name, default)
def log(self, message):
print('JS: %s' % message)
def plugin_loaded():
sublime.set_timeout(init, 200)
##################
# Init plugin
if not is_python3:
init()
class Console:
def __init__(self, logger):
self.logger = logger;
def log(self, msg):
self.logger(msg);
import hashlib, urllib
class JSCore(Context):
_reload = False
def __init__(self, logger):
self.console = Console(logger);
Context.__init__(self, logger)
if('firstload__' not in globals()):
globals()['firstload__'] = True
else:
globals()['firstload__'] = False
def registerCommand(self, name, commandType):
name = name + 'Command'
fullName = '<' + commandType + '>SublimeJS.v8.' + name;
if(fullName not in globals()):
if(commandType == 'TextCommand'):
globals()[fullName] = type(name, (JSTextCommand,), {})
if(commandType == 'WindowCommand'):
globals()[fullName] = type(name, (JSWindowCommand,), {})
if(commandType == 'ApplicationCommand'):
globals()[fullName] = type(name, (JSApplicationCommand,), {})
if(not self._reload):
self._reload = True
if(self._reload):
self.reload(not globals()['firstload__'])
@property
def sublime(self):
return sublime
def reload(self, force=False):
if(force):
def _reload():
sublime.active_window().active_view().run_command('save')
sublime.active_window().run_command('close')
sublime.active_window().open_file(__file__)
sublime.set_timeout(_reload, 200)
else:
sublime.set_timeout(lambda:sublime_plugin.reload_plugin('SublimeJS.v8'),200)
def md5(self, str):
return hashlib.md5(str).hexdigest()
class JS:
_base_dir = None
def __init__(self, base):
self._base_dir = base;
def boot(self, file = 'index.js'):
if(globals()['ctx']):
globals()['ctx'].load_js_file(os.path.join(self._base_dir, file))
else:
if(not 'js_loading_queue' in globals()):
globals()['js_loading_queue'] = []
globals()['js_loading_queue'].append(os.path.join(self._base_dir, file))
def getContext():
return globals()['ctx']
|
py | 1a35956174a602e3687dbf49dea50f478d76060e | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_text
from django.db.models import Sum
__all__ = ['CounterManager', ]
class CounterManager(models.Manager):
def for_model(self, model, total=False):
"""
QuerySet for all counter records for a particular model (either an instance or
a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_text(model._get_pk_val()))
if total:
qs = qs.aggregate(Sum('hits'))['hits__sum']
return qs
|
py | 1a359800d25cb82aea4a3aef152f61464d6c6583 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
from nnformer.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation
from nnformer.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnformer.utilities.to_torch import maybe_to_torch, to_cuda
from nnformer.network_architecture.generic_UNet import Generic_UNet
from nnformer.network_architecture.Swin_Unet_l_gelunorm import swintransformer
from nnformer.network_architecture.initialization import InitWeights_He
from nnformer.network_architecture.neural_network import SegmentationNetwork
from nnformer.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \
get_patch_size, default_3D_augmentation_params
from nnformer.training.dataloading.dataset_loading import unpack_dataset
from nnformer.training.network_training.nnFormerTrainer_synapse import nnFormerTrainer_synapse
from nnformer.utilities.nd_softmax import softmax_helper
from sklearn.model_selection import KFold
from torch import nn
from torch.cuda.amp import autocast
from nnformer.training.learning_rate.poly_lr import poly_lr
from batchgenerators.utilities.file_and_folder_operations import *
class nnFormerTrainerV2_Synapse(nnFormerTrainer_synapse):
"""
Info for Fabian: same as internal nnFormerTrainerV2_2
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 1e-2
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.pin_memory = True
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- enforce to only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory,
use_nondetMultiThreadedAugmenter=False
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
#self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
# len(self.net_num_pool_op_kernel_sizes),
# self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
# dropout_op_kwargs,
# net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
# self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
self.network = swintransformer(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
checkpoint = torch.load("../Pretrained_weight/pretrain_Synapse.model", map_location='cuda')
self.network.load_state_dict(checkpoint['state_dict'])
print('I am using the pre_train weight!!')
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
"""
due to deep supervision the return value and the reference are now lists of tensors. We only need the full
resolution output because this is what we are interested in in the end. The others are ignored
:param output:
:param target:
:return:
"""
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.do_ds = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().predict_preprocessed_data_return_seg_and_softmax(data,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size, use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
"""
gradient clipping improves training stability
:param data_generator:
:param do_backprop:
:param run_online_evaluation:
:return:
"""
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def do_split(self):
"""
The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,
so always the same) and save it as splits_final.pkl file in the preprocessed data directory.
Sometimes you may want to create your own split for various reasons. For this you will need to create your own
splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in
it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)
and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to
use a random 80:20 data split.
:return:
"""
if self.fold == "all":
# if fold==all then we use all images for training and validation
tr_keys = val_keys = list(self.dataset.keys())
else:
splits_file = join(self.dataset_directory, "splits_final.pkl")
# if the split file does not exist we need to create it
if not isfile(splits_file):
self.print_to_log_file("Creating new 5-fold cross-validation split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
else:
self.print_to_log_file("Using splits from existing split file:", splits_file)
splits = load_pickle(splits_file)
self.print_to_log_file("The split file contains %d splits." % len(splits))
splits[self.fold]['train']=np.array(['img0006','img0007' ,'img0009', 'img0010', 'img0021' ,'img0023' ,'img0024','img0026' ,'img0027' ,'img0031', 'img0033' ,'img0034' \
,'img0039', 'img0040','img0005', 'img0028', 'img0030', 'img0037'])
splits[self.fold]['val']=np.array(['img0001', 'img0002', 'img0003', 'img0004', 'img0008', 'img0022','img0025', 'img0029', 'img0032', 'img0035', 'img0036', 'img0038'])
self.print_to_log_file("Desired fold for training: %d" % self.fold)
if self.fold < len(splits):
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
self.print_to_log_file("This split has %d training and %d validation cases."
% (len(tr_keys), len(val_keys)))
else:
self.print_to_log_file("INFO: You requested fold %d for training but splits "
"contain only %d folds. I am now creating a "
"random (but seeded) 80:20 split!" % (self.fold, len(splits)))
# if we request a fold that is not in the split file, create a random 80:20 split
rnd = np.random.RandomState(seed=12345 + self.fold)
keys = np.sort(list(self.dataset.keys()))
idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)
idx_val = [i for i in range(len(keys)) if i not in idx_tr]
tr_keys = [keys[i] for i in idx_tr]
val_keys = [keys[i] for i in idx_val]
self.print_to_log_file("This random 80:20 split has %d training and %d validation cases."
% (len(tr_keys), len(val_keys)))
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def setup_DA_params(self):
"""
- we increase roation angle from [-15, 15] to [-30, 30]
- scale range is now (0.7, 1.4), was (0.85, 1.25)
- we don't do elastic deformation anymore
:return:
"""
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["num_cached_per_thread"] = 2
def maybe_update_lr(self, epoch=None):
"""
if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1
(maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.
herefore we need to do +1 here)
:param epoch:
:return:
"""
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)
self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))
def on_epoch_end(self):
"""
overwrite patient-based early stopping. Always run to 1000 epochs
:return:
"""
super().on_epoch_end()
continue_training = self.epoch < self.max_num_epochs
# it can rarely happen that the momentum of nnFormerTrainerV2 is too high for some dataset. If at epoch 100 the
# estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95
if self.epoch == 100:
if self.all_val_eval_metrics[-1] == 0:
self.optimizer.param_groups[0]["momentum"] = 0.95
self.network.apply(InitWeights_He(1e-2))
self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too "
"high momentum. High momentum (0.99) is good for datasets where it works, but "
"sometimes causes issues such as this one. Momentum has now been reduced to "
"0.95 and network weights have been reinitialized")
return continue_training
def run_training(self):
"""
if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
continued epoch with self.initial_lr
we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
:return:
"""
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.do_ds
self.network.do_ds = True
ret = super().run_training()
self.network.do_ds = ds
return ret
|
py | 1a3598de6bcde1309b8c34dddb363d751a65fe07 | # -*- coding: utf-8 -*-
# This file is part of the OpenSYMORO project. Please see
# https://github.com/symoro/symoro/blob/master/LICENCE for the licence.
"""
This module of SYMORO package provides symbolic
solutions for inverse geompetric problem.
"""
from heapq import heapify, heappop
from sympy import var, sin, cos, eye, atan2, sqrt, pi
from sympy import Matrix, Symbol, Expr, trigsimp
from pysymoro.geometry import transform_list, to_matrix
from symoroutils import symbolmgr
from symoroutils import tools
EMPTY = var("EMPTY")
T_GENERAL = Matrix([var("s1,n1,a1,p1"), var("s2,n2,a2,p2"),
var("s3,n3,a3,p3"), [0, 0, 0, 1]])
# Dictionary for equation type classification.
eq_dict = {(1, 0, 0): 0, (0, 1, 0): 1, (1, 1, 0): 2,
(0, 2, 0): 3, (0, 2, 1): 4}
def _paul_solve(robo, symo, nTm, n, m, known_vars=None):
if known_vars is None:
knowns = set()
else:
knowns = set(known_vars)
chain = robo.loop_chain(m, n)
th_all = set()
r_all = set()
# Create the set of all knowns symbols
for i in chain:
if i >= 0:
if robo.sigma[i] == 0 and isinstance(robo.theta[i], Expr):
th_all.add(robo.theta[i])
if isinstance(robo.r[i], Expr):
knowns |= robo.r[i].atoms(Symbol)
if robo.sigma[i] == 1 and isinstance(robo.r[i], Expr):
r_all.add(robo.r[i])
if isinstance(robo.theta[i], Expr):
knowns |= robo.theta[i].atoms(Symbol)
if isinstance(robo.gamma[i], Expr):
knowns |= robo.gamma[i].atoms(Symbol)
if isinstance(robo.alpha[i], Expr):
knowns |= robo.alpha[i].atoms(Symbol)
if isinstance(robo.d[i], Expr):
knowns |= robo.d[i].atoms(Symbol)
if isinstance(robo.b[i], Expr):
knowns |= robo.b[i].atoms(Symbol)
while True:
repeat = False
iTm = nTm.copy()
tr_list = transform_list(robo, n, m)
_replace_EMPTY(iTm, tr_list)
tr_list.reverse()
tr_const, tr_list = _extract_const_transforms(tr_list, knowns)
for trc in tr_const:
iTm = iTm * trc.matrix_inv()
tr_list.reverse()
while tr_list:
tr_const, tr_list = _extract_const_transforms(tr_list, knowns)
for trc in tr_const:
iTm = trc.matrix_inv() * iTm
tr = tr_list.pop(0)
if tr.val.atoms(Symbol) - knowns:
M_eq = tr.matrix() * to_matrix(tr_list, simplify=False)
while True:
found = _look_for_eq(symo, M_eq - iTm,
knowns, th_all, r_all)
repeat |= found
if not found or th_all | r_all <= knowns:
break
iTm = tr.matrix_inv() * iTm
if th_all | r_all <= knowns:
break
if not repeat or th_all | r_all <= knowns:
break
return knowns
def _replace_EMPTY(T, tr_list):
T_sym = to_matrix(tr_list, simplify=True)
for e1 in xrange(4):
for e2 in xrange(4):
if T[e1, e2].has(EMPTY):
T[e1, e2] = T_sym[e1, e2]
def _extract_const_transforms(tr_list, knowns):
var_idx = len(tr_list)
var_found = False
for i, tr in enumerate(tr_list):
if not var_found:
if tr.val.atoms(Symbol) - knowns:
var_found = True
var_idx = i
elif tr.axis == tr_list[var_idx].axis:
if not tr.val.atoms(Symbol) - knowns:
tr_list[i] = tr_list[var_idx]
tr_list[var_idx] = tr
var_idx = i
else:
break
return tr_list[:var_idx], tr_list[var_idx:]
def _look_for_eq(symo, M_eq, knowns, th_all, r_all):
cont_search = False
eq_candidates = [list() for list_index in xrange(5)]
for e1 in xrange(3):
for e2 in xrange(4):
eq = M_eq[e1, e2]
if not isinstance(eq, Expr) or eq.is_Atom:
continue
th_vars = (eq.atoms(Symbol) & th_all) - knowns
arg_ops = [at.count_ops()-1 for at in eq.atoms(sin, cos)
if not at.atoms(Symbol) & knowns]
if th_vars and arg_ops:
arg_sum = max(arg_ops)
else:
arg_sum = 0
rs_s = (eq.atoms(Symbol) & r_all) - knowns
eq_features = (len(rs_s), len(th_vars), arg_sum)
if eq_features in eq_dict:
eq_key = eq_dict[eq_features]
eq_pack = (eq, list(rs_s), list(th_vars))
eq_candidates[eq_key].append(eq_pack)
cont_search |= _try_solve_0(symo, eq_candidates[0], knowns)
cont_search |= _try_solve_1(symo, eq_candidates[1], knowns)
cont_search |= _try_solve_2(symo, eq_candidates[2] +
eq_candidates[1], knowns)
cont_search |= _try_solve_3(symo, eq_candidates[3], knowns)
cont_search |= _try_solve_4(symo, eq_candidates[4], knowns)
return cont_search
def loop_solve(robo, symo, know=None):
# TODO: rewrite; Add parallelogram detection
q_vec = [robo.get_q(i) for i in xrange(robo.NF)]
loops = []
if know is None:
know = robo.q_active
# set(q for i, q in enumerate(q_vec) if robo.mu[i] == 1)
for i, j in robo.loop_terminals:
chain = robo.loop_chain(i, j)
know_ij = set(q_vec[i] for i in chain if q_vec[i] in know)
unknow_ij = set(q_vec[i] for i in chain if q_vec[i] not in know)
loops.append([len(unknow_ij), i, j, know_ij, unknow_ij])
while loops:
heapify(loops)
loop = heappop(loops)
res_know = _paul_solve(robo, symo, eye(4), *loop[1:4])
for l in loops:
found = l[4] & res_know
l[3] |= found
l[4] -= found
l[0] = len(l[4])
def igm_paul(robo, T_ref, n):
if isinstance(T_ref, list):
T_ref = Matrix(4, 4, T_ref)
symo = symbolmgr.SymbolManager()
symo.file_open(robo, 'igm')
symo.write_params_table(robo, 'Inverse Geometric Model for frame %s' % n)
_paul_solve(robo, symo, T_ref, 0, n)
symo.file_close()
return symo
# TODO: think about smarter way of matching
def _try_solve_0(symo, eq_sys, knowns):
res = False
for eq, [r], th_names in eq_sys:
X = tools.get_max_coef(eq, r)
if X != 0:
Y = X*r - eq
symo.write_line("# Solving type 1")
X = symo.replace(trigsimp(X), 'X', r)
Y = symo.replace(trigsimp(Y), 'Y', r)
symo.add_to_dict(r, Y/X)
knowns.add(r)
res = True
return res
def _try_solve_1(symo, eq_sys, knowns):
res = False
for i in xrange(len(eq_sys)):
eqi, rs_i, [th_i] = eq_sys[i]
if th_i in knowns:
continue
Xi, Yi, Zi, i_ok = _get_coefs(eqi, sin(th_i), cos(th_i), th_i)
zero = tools.ZERO
i_ok &= sum([Xi == zero, Yi == zero, Zi == zero]) <= 1
if not i_ok:
continue
j_ok = False
for j in xrange(i+1, len(eq_sys)):
eqj, rs_j, [th_j] = eq_sys[j]
if th_i == th_j:
Xj, Yj, Zj, j_ok = _get_coefs(eqj, sin(th_j), cos(th_j), th_i)
j_ok &= (Xi*Yj != Xj*Yi)
if j_ok:
break
if j_ok:
symo.write_line("# Solving type 3")
_solve_type_3(symo, Xi, Yi, -Zi, Xj, Yj, -Zj, th_i)
else:
symo.write_line("# Solving type 2")
_solve_type_2(symo, Xi, Yi, -Zi, th_i)
knowns.add(th_i)
res = True
return res
def _try_solve_2(symo, eq_sys, knowns):
if all(len(rs) == 0 for eq, rs, ths in eq_sys):
return False
for i in xrange(len(eq_sys)):
all_ok = False
for j in xrange(len(eq_sys)):
eqj, rs_j, ths_j = eq_sys[j]
eqi, rs_i, ths_i = eq_sys[i]
if i == j or set(ths_i) != set(ths_j) or set(rs_j) != set(rs_i):
continue
th = ths_i[0]
C, S = cos(th), sin(th)
r = rs_i[0]
X1, Y1, Z1, i_ok = _get_coefs(eqi, S, r, th, r)
X2, Y2, Z2, j_ok = _get_coefs(eqj, C, r, th, r)
all_ok = j_ok and i_ok and not eqi.has(C) and not eqj.has(S)
if all_ok:
eq_type = 5
break
X1, Y1, Z1, i_ok = _get_coefs(eqi, S, C, th, r)
X2, Y2, Z2, j_ok = _get_coefs(eqj, C, S, th, r)
i_ok &= X1.has(r) and not Z1.has(r) and Y1 == tools.ZERO
j_ok &= X2.has(r) and not Z2.has(r) and Y2 == tools.ZERO
all_ok = j_ok and i_ok
if all_ok:
eq_type = 4
X1 /= r
X2 /= r
break
else:
eq_type = -1
if not all_ok or eq_type == -1:
continue
symo.write_line("# Solving type %s" % eq_type)
if eq_type == 4:
_solve_type_4(symo, X1, -Y1, X2, -Y2, th, r)
else:
_solve_type_5(symo, X1, -Y1, -Z1, X2, -Y2, -Z2, th, r)
knowns |= {th, r}
return True
return False
def _match_coef(A1, A2, B1, B2):
return A1 == A2 and B1 == B2 or A1 == -A2 and B1 == -B2
def _try_solve_3(symo, eq_sys, knowns):
for i in xrange(len(eq_sys)):
all_ok = False
for j in xrange(len(eq_sys)):
eqj, rs_j, ths_i = eq_sys[j]
eqi, rs_i, ths_j = eq_sys[i]
if i == j or set(ths_i) != set(ths_j):
continue
th1 = ths_i[0]
th2 = ths_i[1]
C1, S1 = cos(th1), sin(th1)
C2, S2 = cos(th2), sin(th2)
X1, Y1, ZW1, i_ok = _get_coefs(eqi, C1, S1, th1)
X2, Y2, ZW2, j_ok = _get_coefs(eqj, S1, C1, th1)
Y2 = -Y2
V1, W1, Z1, iw_ok = _get_coefs(ZW1, C2, S2, th1, th2)
V2, W2, Z2, jw_ok = _get_coefs(ZW2, S2, C2, th1, th2)
W2 = -W2
all_ok = j_ok and i_ok and jw_ok and iw_ok
all_ok &= _check_const((X1, Y1), th2)
if X1 == 0 or Y1 == 0:
X1, Y1, V1, W1 = V1, W1, X1, Y1
X2, Y2, V2, W2 = V2, W2, X2, Y2
th1, th2 = th2, th1
all_ok &= _match_coef(X1, X2, Y1, Y2)
all_ok &= _match_coef(V1, V2, W1, W2)
eps = 1
if X1 == X2 and Y1 == Y2:
if W1 == -W2 and V1 == -V2:
eps = -1
else:
if W1 == W2 and V1 == V2:
eps = -1
Z2 = -Z2
for a in (X1, X2, Y1, Y2):
all_ok &= not a.has(C2)
all_ok &= not a.has(S2)
if all_ok:
break
if not all_ok:
continue
symo.write_line("# Solving type 6, 7")
_solve_type_7(symo, V1, W1, -X1, -Y1, -Z1, -Z2, eps, th1, th2)
knowns |= {th1, th2}
return True
return False
# TODO: make it with itertool
def _try_solve_4(symo, eq_sys, knowns):
res = False
for i in xrange(len(eq_sys)):
all_ok = False
for j in xrange(len(eq_sys)):
eqj, rs_j, ths_j = eq_sys[j]
eqi, rs_i, ths_i = eq_sys[i]
if i == j or set(ths_i) != set(ths_j):
continue
th12 = ths_i[0] + ths_i[1]
if eqi.has(sin(ths_i[0])) or eqi.has(cos(ths_i[0])):
th1 = ths_i[0]
th2 = ths_i[1]
else:
th1 = ths_i[1]
th2 = ths_i[0]
C1, S1 = cos(th1), sin(th1)
C12, S12 = cos(th12), sin(th12)
X1, Y1, Z1, i_ok = _get_coefs(eqi, C1, C12, th1, th2)
X2, Y2, Z2, j_ok = _get_coefs(eqj, S1, S12, th1, th2)
all_ok = (X1*Y2 == Y1*X2 and i_ok and j_ok)
all_ok &= X1 != 0 and Y1 != 0
all_ok &= not eqi.has(S1) and not eqi.has(S12)
all_ok &= not eqj.has(C1) and not eqj.has(C12)
if all_ok:
break
if not all_ok:
continue
symo.write_line("# Solving type 8")
_solve_type_8(symo, X1, Y1, -Z1, -Z2, th1, th2)
knowns |= {th1, th2}
res = True
return res
def _solve_type_2(symo, X, Y, Z, th):
"""Solution for the equation:
X*S + Y*C = Z
"""
symo.write_line("# X*sin({0}) + Y*cos({0}) = Z".format(th))
X = symo.replace(trigsimp(X), 'X', th)
Y = symo.replace(trigsimp(Y), 'Y', th)
Z = symo.replace(trigsimp(Z), 'Z', th)
YPS = var('YPS'+str(th))
if X == tools.ZERO and Y != tools.ZERO:
C = symo.replace(Z/Y, 'C', th)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(th, atan2(YPS*sqrt(1-C**2), C))
elif X != tools.ZERO and Y == tools.ZERO:
S = symo.replace(Z/X, 'S', th)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(th, atan2(S, YPS*sqrt(1-S**2)))
elif Z == tools.ZERO:
symo.add_to_dict(YPS, (tools.ONE, tools.ZERO))
symo.add_to_dict(th, atan2(-Y, X) + YPS*pi)
else:
B = symo.replace(X**2 + Y**2, 'B', th)
D = symo.replace(B - Z**2, 'D', th)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
S = symo.replace((X*Z + YPS * Y * sqrt(D))/B, 'S', th)
C = symo.replace((Y*Z - YPS * X * sqrt(D))/B, 'C', th)
symo.add_to_dict(th, atan2(S, C))
def _solve_type_3(symo, X1, Y1, Z1, X2, Y2, Z2, th):
"""Solution for the system:
X1*S + Y1*C = Z1
X2*S + Y2*C = Z2
"""
symo.write_line("# X1*sin({0}) + Y1*cos({0}) = Z1".format(th))
symo.write_line("# X2*sin({0}) + Y2*cos({0}) = Z2".format(th))
X1 = symo.replace(trigsimp(X1), 'X1', th)
Y1 = symo.replace(trigsimp(Y1), 'Y1', th)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th)
X2 = symo.replace(trigsimp(X2), 'X2', th)
Y2 = symo.replace(trigsimp(Y2), 'Y2', th)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th)
if X1 == tools.ZERO and Y2 == tools.ZERO:
symo.add_to_dict(th, atan2(Z2/X2, Z1/Y1))
elif X2 == tools.ZERO and Y1 == tools.ZERO:
symo.add_to_dict(th, atan2(Z1/X1, Z2/Y2))
else:
D = symo.replace(X1*Y2-X2*Y1, 'D', th)
C = symo.replace((Z2*X1 - Z1*X2)/D, 'C', th)
S = symo.replace((Z1*Y2 - Z2*Y1)/D, 'S', th)
symo.add_to_dict(th, atan2(S, C))
def _solve_type_4(symo, X1, Y1, X2, Y2, th, r):
"""Solution for the system:
X1*S*r = Y1
X2*C*r = Y2
"""
symo.write_line("# X1*sin({0})*{1} = Y1".format(th, r))
symo.write_line("# X2*cos({0})*{1} = Y2".format(th, r))
X1 = symo.replace(trigsimp(X1), 'X1', th)
Y1 = symo.replace(trigsimp(Y1), 'Y1', th)
X2 = symo.replace(trigsimp(X2), 'X2', th)
Y2 = symo.replace(trigsimp(Y2), 'Y2', th)
YPS = var('YPS' + r)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(r, YPS*sqrt((Y1/X1)**2 + (Y2/X2)**2))
symo.add_to_dict(th, atan2(Y1/(X1*r), Y2/(X2*r)))
def _solve_type_5(symo, X1, Y1, Z1, X2, Y2, Z2, th, r):
"""Solution for the system:
X1*S = Y1 + Z1*r
X2*C = Y2 + Z2*r
"""
symo.write_line("# X1*sin({0}) = Y1 + Z1*{1}".format(th, r))
symo.write_line("# X2*cos({0}) = Y2 + Z2*{1}".format(th, r))
X1 = symo.replace(trigsimp(X1), 'X1', th)
Y1 = symo.replace(trigsimp(Y1), 'Y1', th)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th)
X2 = symo.replace(trigsimp(X2), 'X2', th)
Y2 = symo.replace(trigsimp(Y2), 'Y2', th)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th)
V1 = symo.replace(Y1/X1, 'V1', r)
W1 = symo.replace(Z1/X1, 'W1', r)
V2 = symo.replace(Y2/X2, 'V2', r)
W2 = symo.replace(Z2/X2, 'W2', r)
_solve_square(W1**2 + W2**2, 2*(V1*W1 + V2*W2), V1**2 + V2**2, r)
_solve_type_3(X1, tools.ZERO, Y1 + Z1*r, tools.ZERO, X2, Y2 + Z2*r)
def _solve_type_7(symo, V, W, X, Y, Z1, Z2, eps, th_i, th_j):
"""Solution for the system:
V1*Cj + W1*Sj = X*Ci + Y*Si + Z1
eps*(V2*Sj - W2*Cj) = X*Si - Y*Ci + Z2
"""
s = "# V*cos({0}) + W*sin({0}) = X*cos({1}) + Y*sin({1}) + Z1"
symo.write_line(s.format(th_j, th_i))
s = "# eps*(V*sin({0}) - W*cos({0})) = X*sin({1}) - Y*cos({1}) + Z2"
symo.write_line(s.format(th_j, th_i))
V = symo.replace(trigsimp(V), 'V', th_i)
W = symo.replace(trigsimp(W), 'W', th_i)
X = symo.replace(trigsimp(X), 'X', th_i)
Y = symo.replace(trigsimp(Y), 'Y', th_i)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th_i)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th_i)
B1 = symo.replace(2*(Z1*Y + Z2*X), 'B1', th_i)
B2 = symo.replace(2*(Z1*X - Z2*Y), 'B2', th_i)
B3 = symo.replace(V**2 + W**2 - X**2 - Y**2 - Z1**2 - Z2**2, 'B3', th_i)
_solve_type_2(symo, B1, B2, B3, th_i)
Zi1 = symo.replace(X*cos(th_i) + Y*sin(th_i) + Z1, 'Zi1', th_j)
Zi2 = symo.replace(X*sin(th_i) - Y*cos(th_i) + Z2, 'Zi2', th_j)
_solve_type_3(symo, W, V, Zi1, eps*V, -eps*W, Zi2, th_j)
def _solve_type_8(symo, X, Y, Z1, Z2, th_i, th_j):
"""Solution for the system:
X*Ci + Y*Cij = Z1
X*Si + Y*Sij = Z2
"""
symo.write_line("# X*cos({0}) + Y*cos({0} + {1}) = Z1".format(th_i, th_j))
symo.write_line("# X*sin({0}) + Y*sin({0} + {1}) = Z2".format(th_i, th_j))
X = symo.replace(trigsimp(X), 'X', th_j)
Y = symo.replace(trigsimp(Y), 'Y', th_j)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th_j)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th_j)
Cj = symo.replace((Z1**2 + Z2**2 - X**2 - Y**2) / (2*X*Y), 'C', th_j)
YPS = var('YPS%s' % th_j)
symo.add_to_dict(YPS, (tools.ONE, -tools.ONE))
symo.add_to_dict(th_j, atan2(YPS*sqrt(1 - Cj**2), Cj))
Q1 = symo.replace(X + Y*cos(th_j), 'Q1', th_i)
Q2 = symo.replace(Y*sin(th_j), 'Q2', th_i)
Den = symo.replace(Q1**2 + Q2**2, 'Den', th_i)
Si = symo.replace((Q1*Z2 - Q2*Z1)/Den, 'S', th_i)
Ci = symo.replace((Q1*Z1 + Q2*Z2)/Den, 'C', th_i)
symo.add_to_dict(th_i, atan2(Si, Ci))
def _solve_square(symo, A, B, C, x):
""" solution for the equation:
A*x**2 + B*x + C = 0
"""
A = symo.replace(A, 'A', x)
B = symo.replace(B, 'B', x)
C = symo.replace(C, 'C', x)
Delta = symo.repalce(B**2 - 4*A*C, 'Delta', x)
YPS = var('YPS' + x)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(x, (-B + YPS*sqrt(Delta))/(2*A))
def _check_const(consts, *xs):
is_ok = True
for coef in consts:
for x in xs:
is_ok &= not coef.has(x)
return is_ok
def _get_coefs(eq, A1, A2, *xs):
eqe = eq.expand()
X = tools.get_max_coef(eqe, A1)
eqe = eqe.xreplace({A1: tools.ZERO})
Y = tools.get_max_coef(eqe, A2)
Z = eqe.xreplace({A2: tools.ZERO})
# is_ok = not X.has(A2) and not X.has(A1) and not Y.has(A2)
is_ok = True
is_ok &= _check_const((X, Y, Z), *xs)
return X, Y, Z, is_ok
|
py | 1a35996fcd777cee3273334a083c785931ea609c | #appModules/totalcmd.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2012 NVDA Contributors
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
from NVDAObjects.IAccessible import IAccessible
import speech
import controlTypes
oldActivePannel=0
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.windowClassName in ("TMyListBox", "TMyListBox.UnicodeClass"):
clsList.insert(0, TCList)
class TCList(IAccessible):
def event_gainFocus(self):
global oldActivePannel
if oldActivePannel !=self.windowControlID:
oldActivePannel=self.windowControlID
obj=self
while obj and obj.parent and obj.parent.windowClassName!="TTOTAL_CMD":
obj=obj.parent
counter=0
while obj and obj.previous and obj.windowClassName!="TPanel":
obj=obj.previous
if obj.windowClassName!="TDrivePanel":
counter+=1
if counter==2:
speech.speakMessage(_("left"))
else:
speech.speakMessage(_("right"))
super(TCList,self).event_gainFocus()
def reportFocus(self):
if self.name:
speakList=[]
if controlTypes.STATE_SELECTED in self.states:
speakList.append(controlTypes.stateLabels[controlTypes.STATE_SELECTED])
speakList.append(self.name.split("\\")[-1])
speech.speakMessage(" ".join(speakList))
else:
super(TCList,self).reportFocus()
|
py | 1a359986935b6134420ff4f1c5e63118addf849d | #Algorithm that reads an employee's salary and displays his new salary with a 15% increase
print('=' * 10, 'DEFIANCE 013', '=' * 10)
nam = input('What is the employees name?: ')
sal = float(input('What is the employees salary? R$'))
adj = sal * 15 / 100
nsa = sal + adj
print('-' * 35)
print('The employee {} won: R${:.2f}'.format(nam, sal))
print('With a 15 percent increase in the value of R${:.2f}'.format(adj))
print('$' * 15)
print('Starts to receive R${:.2f}'.format(nsa))
print('$' * 15)
|
py | 1a359aa78d25e5641ccc48034d4c444dfdf70ce6 | def SetItemButtonTexture(button, texture):
iconTexture = getglobal(button.GetName() + "IconTexture")
if texture:
iconTexture.Show()
else:
iconTexture.Hide()
iconTexture.SetTexture(texture)
iconTexture.SetWidth(36)
iconTexture.SetHeight(36)
# ClientAPI.LogInfo("Set icon texture for " + iconTexture.GetName() + " to " + texture)
def SetItemButtonCount(button, count):
itemCount = getglobal(button.GetName() + "Count")
if count > 0:
itemCount.SetText(count.ToString())
itemCount.Show()
else:
itemCount.Hide()
def SetActionButtonTexture(button, texture):
iconTexture = getglobal(button.GetName() + "NormalTexture")
if texture:
iconTexture.Show()
else:
iconTexture.Hide()
iconTexture.SetTexture(texture)
iconTexture.SetWidth(36)
iconTexture.SetHeight(36)
Console.WriteLine("Set icon texture for " + iconTexture.GetName() + " to " + texture)
def PaperDollFrame_OnLoad(frame):
print "In onload"
# None of these stats are available from the server yet
# CharacterAttackFrameLabel.SetText("Melee Attack")
# CharacterDamageFrameLabel.SetText("Damage:")
# CharacterAttackPowerFrameLabel.SetText("Attack:")
# CharacterRangedAttackFrameLabel.SetText("Ranged Attack")
# CharacterRangedDamageFrameLabel.SetText("Damage:")
# CharacterRangedAttackPowerFrameLabel.SetText("Attack:")
# CharacterArmorFrameLabel.SetText("Armor:")
def PaperDollFrame_OnShow(frame):
print "Should show stats"
#PaperDollFrame_SetGuild();
#PaperDollFrame_SetLevel();
PaperDollFrame_SetStats()
#PaperDollFrame_SetResistances();
# PaperDollFrame_SetArmor();
# PaperDollFrame_SetDamage();
#PaperDollFrame_SetAttackPower();
#PaperDollFrame_SetAttackBothHands();
#PaperDollFrame_SetRangedAttack();
#PaperDollFrame_SetRangedDamage();
#PaperDollFrame_SetRangedAttackPower();
def PaperDollFrame_SetStats():
STATS = "Strength", "Dexterity", "Intelligence"
for i in range(1, 4):
label = getglobal("CharacterStatFrame%dLabel" % i)
text = getglobal("CharacterStatFrame%dStatText" % i)
frame = getglobal("CharacterStatFrame%d" % i)
label.SetText(STATS[i - 1])
base, stat, posBuff, negBuff = UnitStat("unit", i)
text.SetText(stat)
def PaperDollFrame_OnHide(frame):
print "Hide"
def PaperDollItemSlotButton_OnLoad(frame):
slotName = frame.GetName()
id, textureName = GetInventorySlotInfo(slotName.Substring(10))
frame.SetID(id)
texture = getglobal(slotName + "IconTexture")
texture.SetTexture(textureName)
# frame.backgroundTextureName = textureName
PaperDollItemSlotButton_Update(frame)
def PaperDollItemSlotButton_OnClick(frame):
print "Got click in inventory button: %d" % frame.GetID()
def PaperDollItemSlotButton_Update(frame):
textureName = GetInventoryItemTexture("player", frame.GetID())
if textureName:
SetItemButtonTexture(frame, textureName)
SetItemButtonCount(frame, GetInventoryItemCount("player", frame.GetID()))
else:
SetItemButtonTexture(frame, "default")
SetItemButtonCount(frame, 0)
# SetItemButtonTextureVertexColor(frame, 1.0, 1.0, 1.0)
# SetItemButtonNormalTextureVertexColor(frame, 1.0, 1.0, 1.0)
# cooldown:Hide();
|
py | 1a359b4e7b913e5da40c53399dd1b57bf4cda692 | #!/usr/bin/env python3
import requests
import sys
# Antiga URL = "https://brasil.io/api/dataset/covid19/caso/data/?city=Manaus"
URL_UF = "https://api.brasil.io/v1/dataset/covid19/caso_full/data/?state=CE&is_last=True&page=1"
URL_MUN = "https://api.brasil.io/v1/dataset/covid19/caso/data/?city=Fortaleza"
h=dict()
h['Authorization'] = 'Token ' + ( str(sys.argv[1]) if (len(sys.argv)>1) else '')
data = requests.get(URL_MUN, headers=h)
if data.status_code == 200:
with open('./data/data.json', 'w') as f:
f.write(data.text)
|
py | 1a359b5056aafe462fc3cf7aa8c0219b149b1422 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
from wsme import types as wtypes
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers import v1
from ironic.api.controllers.v1 import versions
from ironic.api import expose
ID_VERSION1 = 'v1'
class Version(base.APIBase):
"""An API version representation.
This class represents an API version, including the minimum and
maximum minor versions that are supported within the major version.
"""
id = wtypes.text
"""The ID of the (major) version, also acts as the release number"""
links = [link.Link]
"""A Link that point to a specific version of the API"""
status = wtypes.text
"""Status of the version.
One of:
* CURRENT - the latest version of API,
* SUPPORTED - supported, but not latest, version of API,
* DEPRECATED - supported, but deprecated, version of API.
"""
version = wtypes.text
"""The current, maximum supported (major.minor) version of API."""
min_version = wtypes.text
"""Minimum supported (major.minor) version of API."""
def __init__(self, id, min_version, version, status='CURRENT'):
self.id = id
self.links = [link.Link.make_link('self', pecan.request.public_url,
self.id, '', bookmark=True)]
self.status = status
self.version = version
self.min_version = min_version
class Root(base.APIBase):
name = wtypes.text
"""The name of the API"""
description = wtypes.text
"""Some information about this API"""
versions = [Version]
"""Links to all the versions available in this API"""
default_version = Version
"""A link to the default version of the API"""
@staticmethod
def convert():
root = Root()
root.name = "OpenStack Ironic API"
root.description = ("Ironic is an OpenStack project which aims to "
"provision baremetal machines.")
root.default_version = Version(ID_VERSION1,
versions.MIN_VERSION_STRING,
versions.MAX_VERSION_STRING)
root.versions = [root.default_version]
return root
class RootController(rest.RestController):
_versions = [ID_VERSION1]
"""All supported API versions"""
_default_version = ID_VERSION1
"""The default API version"""
v1 = v1.Controller()
@expose.expose(Root)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
return Root.convert()
@pecan.expose()
def _route(self, args, request=None):
"""Overrides the default routing behavior.
It redirects the request to the default version of the ironic API
if the version number is not specified in the url.
"""
if args[0] and args[0] not in self._versions:
args = [self._default_version] + args
return super(RootController, self)._route(args, request)
|
py | 1a359c030bf1ec39aa367ef262305de898dd5065 | import pytest
import tweepy
import requests
import unittest.mock
from pocs.utils.social_twitter import SocialTwitter
from pocs.utils.social_slack import SocialSlack
@pytest.fixture(scope='module')
def twitter_config():
twitter_config = {'consumer_key': 'mock_consumer_key', 'consumer_secret': 'mock_consumer_secret', 'access_token': 'mock_access_token', 'access_token_secret': 'access_token_secret'}
return twitter_config
@pytest.fixture(scope='module')
def slack_config():
slack_config = {'webhook_url': 'mock_webhook_url', 'output_timestamp': False}
return slack_config
# Twitter sink tests
def test_no_consumer_key(twitter_config):
with unittest.mock.patch.dict(twitter_config), pytest.raises(ValueError) as ve:
del twitter_config['consumer_key']
SocialTwitter(**twitter_config)
assert False # We don't reach this point
assert 'consumer_key parameter is not defined.' == str(ve.value)
def test_no_consumer_secret(twitter_config):
with unittest.mock.patch.dict(twitter_config), pytest.raises(ValueError) as ve:
del twitter_config['consumer_secret']
SocialTwitter(**twitter_config)
assert False # We don't reach this point
assert 'consumer_secret parameter is not defined.' == str(ve.value)
def test_no_access_token(twitter_config):
with unittest.mock.patch.dict(twitter_config), pytest.raises(ValueError) as ve:
del twitter_config['access_token']
SocialTwitter(**twitter_config)
assert False # We don't reach this point
assert 'access_token parameter is not defined.' == str(ve.value)
def test_no_access_token_secret(twitter_config):
with unittest.mock.patch.dict(twitter_config), pytest.raises(ValueError) as ve:
del twitter_config['access_token_secret']
SocialTwitter(**twitter_config)
assert False # We don't reach this point
assert 'access_token_secret parameter is not defined.' == str(ve.value)
def test_send_message_twitter(twitter_config):
with unittest.mock.patch.object(tweepy.API, 'update_status') as mock_update_status:
social_twitter = SocialTwitter(**twitter_config)
mock_message = "mock_message"
mock_timestamp = "mock_timestamp"
social_twitter.send_message(mock_message, mock_timestamp)
mock_update_status.assert_called_once_with('{} - {}'.format(mock_message, mock_timestamp))
def test_send_message_twitter_no_timestamp(twitter_config):
with unittest.mock.patch.dict(twitter_config, {'output_timestamp': False}), unittest.mock.patch.object(tweepy.API, 'update_status') as mock_update_status:
social_twitter = SocialTwitter(**twitter_config)
mock_message = "mock_message"
mock_timestamp = "mock_timestamp"
social_twitter.send_message(mock_message, mock_timestamp)
mock_update_status.assert_called_once_with(mock_message)
# Slack sink tests
def test_no_webhook_url(slack_config):
with unittest.mock.patch.dict(slack_config), pytest.raises(ValueError) as ve:
del slack_config['webhook_url']
slack_config = SocialSlack(**slack_config)
assert 'webhook_url parameter is not defined.' == str(ve.value)
def test_send_message_slack(slack_config):
with unittest.mock.patch.object(requests, 'post') as mock_post:
social_slack = SocialSlack(**slack_config)
mock_message = "mock_message"
mock_timestamp = "mock_timestamp"
social_slack.send_message(mock_message, mock_timestamp)
mock_post.assert_called_once_with(slack_config['webhook_url'], json={'text': mock_message})
def test_send_message_slack_timestamp(slack_config):
with unittest.mock.patch.dict(slack_config, {'output_timestamp': True}), unittest.mock.patch.object(requests, 'post') as mock_post:
social_slack = SocialSlack(**slack_config)
mock_message = "mock_message"
mock_timestamp = "mock_timestamp"
social_slack.send_message(mock_message, mock_timestamp)
mock_post.assert_called_once_with(slack_config['webhook_url'], json={'text': '{} - {}'.format(mock_message, mock_timestamp)})
|
py | 1a359cc2bf3a9fe85a0a9342642b0a667a3f9c32 | # Copyright 2021 University College London. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal processing operations."""
import numpy as np
import tensorflow as tf
from tensorflow_mri.python.util import check_util
def hamming(arg, name='hamming'):
"""Calculate a Hamming window at the specified coordinates.
Coordinates should be in the range `[-pi, pi]`. The center of the window
is at 0.
Args:
arg: Input tensor.
name: Name to use for the scope.
Returns:
The value of a Hamming window at the specified coordinates.
"""
return _raised_cosine(arg, 0.54, 0.46, name=name)
def _raised_cosine(arg, a, b, name=None):
"""Helper function for computing a raised cosine window.
Args:
arg: Input tensor.
a: The alpha parameter to the raised cosine filter.
b: The beta parameter to the raised cosine filter.
name: Name to use for the scope.
Returns:
A `Tensor` of shape `arg.shape`.
"""
with tf.name_scope(name):
return a - b * tf.math.cos(arg + np.pi)
def filter_kspace(kspace, traj, filter_type='hamming'):
"""Filter *k*-space.
Multiplies *k*-space by a filtering function.
Args:
kspace: A `Tensor` of any shape. The input *k*-space.
traj: A `Tensor` of shape `kspace.shape + [N]`, where `N` is the number of
spatial dimensions.
filter_type: A `str`. Must be one of `"hamming"`.
Returns:
A `Tensor` of shape `kspace.shape`. The filtered *k*-space.
"""
# TODO: add support for Cartesian *k*-space.
kspace = tf.convert_to_tensor(kspace)
filter_type = check_util.validate_enum(
filter_type, valid_values={'hamming'}, name='filter_type')
filter_func = {
'hamming': hamming
}[filter_type]
traj_norm = tf.norm(traj, axis=-1)
return kspace * tf.cast(filter_func(traj_norm), kspace.dtype)
def crop_kspace(kspace, traj, cutoff, mode='low_pass'):
"""Crop *k*-space.
Crops all frequencies above or below the specified frequency.
Args:
kspace: A `Tensor` of any shape. The input *k*-space.
traj: A `Tensor` of shape `kspace.shape + [N]`, where `N` is the number of
spatial dimensions.
cutoff: A `float` between `-pi` and `pi`. The cutoff frequency.
mode: A `str`. Must be one of `low_pass` or `high_pass`.
Returns:
A `Tensor`. The cropped *k*-space.
"""
# TODO: add support for Cartesian *k*-space.
mode = check_util.validate_enum(mode, {'low_pass', 'high_pass'}, 'mode')
traj_norm = tf.norm(traj, axis=-1)
if mode == 'low_pass':
mask = traj_norm < cutoff
elif mode == 'high_pass':
mask = traj_norm > cutoff
filt_kspace = tf.gather_nd(kspace, tf.where(mask))
filt_traj = tf.gather_nd(traj, tf.where(mask))
return filt_kspace, filt_traj
|
py | 1a359de582391f8837b8b2b05dbdde399d1f175f | import subprocess
import yaml
from ckan_cloud_operator import kubectl
from ckan_cloud_operator import logs
from ckan_cloud_operator.gitlab import CkanGitlab
from ckan_cloud_operator.deis_ckan import datapusher
from ckan_cloud_operator.providers.db import manager as db_manager
class DeisCkanInstanceEnvvars(object):
def __init__(self, instance):
self.instance = instance
self.solr_spec = self.instance.spec.solrCloudCollection
self.site_url = None
def _apply_instance_envvars_overrides(self, envvars):
if 'overrides' in self.instance.spec.envvars:
print('Applying instance envvars overrides')
envvars.update(**self.instance.spec.envvars['overrides'])
def _update(self):
spec = self.instance.spec
db_name = spec.db['name']
db_password = self.instance.annotations.get_secret('databasePassword')
datastore_name = spec.datastore['name']
datastore_password = self.instance.annotations.get_secret('datastorePassword')
datastore_ro_user = self.instance.annotations.get_secret('datastoreReadonlyUser')
datastore_ro_password = self.instance.annotations.get_secret('datatastoreReadonlyPassword')
no_db_proxy = True
# db_no_db_proxy = spec.db.get('no-db-proxy') == 'yes'
# datastore_no_db_proxy = spec.datastore.get('no-db-proxy') == 'yes'
# if db_no_db_proxy or datastore_no_db_proxy:
# assert db_no_db_proxy and datastore_no_db_proxy, 'must set both DB and datastore with no-db-proxy'
# no_db_proxy = True
# else:
# no_db_proxy = False
from ckan_cloud_operator.providers.solr import manager as solr_manager
solr_http_endpoint = solr_manager.get_internal_http_endpoint()
solr_collection_name = spec.solrCloudCollection['name']
if 'fromSecret' in spec.envvars:
envvars = kubectl.get(f'secret {spec.envvars["fromSecret"]}')
envvars = yaml.load(kubectl.decode_secret(envvars, 'envvars.yaml'))
elif 'fromGitlab' in spec.envvars:
envvars = CkanGitlab().get_envvars(spec.envvars['fromGitlab'])
else:
raise Exception(f'invalid envvars spec: {spec.envvars}')
from ckan_cloud_operator.providers.storage import manager as storage_manager
storage_hostname, storage_access_key, storage_secret_key = storage_manager.get_provider().get_credentials()
storage_path_parts = spec.storage['path'].strip('/').split('/')
storage_bucket = storage_path_parts[0]
storage_path = '/'.join(storage_path_parts[1:])
if no_db_proxy:
postgres_host, postgres_port = db_manager.get_internal_unproxied_db_host_port(db_prefix=spec.db.get('dbPrefix') or '')
logs.info(f'Bypassing db proxy, connecting to DB directly: {postgres_host}:{postgres_port}')
else:
postgres_host, postgres_port = db_manager.get_internal_proxy_host_port()
logs.info(f'Connecting to DB proxy: {postgres_host}:{postgres_port}')
envvars.update(
CKAN_SQLALCHEMY_URL=f"postgresql://{db_name}:{db_password}@{postgres_host}:{postgres_port}/{db_name}",
CKAN___BEAKER__SESSION__URL=f"postgresql://{db_name}:{db_password}@{postgres_host}:{postgres_port}/{db_name}",
CKAN__DATASTORE__READ_URL=f"postgresql://{datastore_ro_user}:{datastore_ro_password}@{postgres_host}:{postgres_port}/{datastore_name}",
CKAN__DATASTORE__WRITE_URL=f"postgresql://{datastore_name}:{datastore_password}@{postgres_host}:{postgres_port}/{datastore_name}",
CKAN_SOLR_URL=f"{solr_http_endpoint}/{solr_collection_name}",
CKANEXT__S3FILESTORE__AWS_STORAGE_PATH=storage_path,
CKANEXT__S3FILESTORE__AWS_ACCESS_KEY_ID=storage_access_key,
CKANEXT__S3FILESTORE__AWS_SECRET_ACCESS_KEY=storage_secret_key,
CKANEXT__S3FILESTORE__AWS_BUCKET_NAME=storage_bucket,
CKANEXT__S3FILESTORE__HOST_NAME=f'https://{storage_hostname}/',
CKANEXT__S3FILESTORE__REGION_NAME='us-east-1',
CKANEXT__S3FILESTORE__SIGNATURE_VERSION='s3v4',
CKAN__DATAPUSHER__URL=datapusher.get_datapusher_url(envvars.get('CKAN__DATAPUSHER__URL')),
)
from ckan_cloud_operator.providers.ckan import manager as ckan_manager
ckan_manager.update_deis_instance_envvars(self.instance, envvars)
assert envvars['CKAN_SITE_ID'] and envvars['CKAN_SITE_URL'] and envvars['CKAN_SQLALCHEMY_URL']
# print(yaml.dump(envvars, default_flow_style=False))
self._apply_instance_envvars_overrides(envvars)
envvars = {
k: ('' if v is None else v)
for k,v
in envvars.items()
}
kubectl.update_secret('ckan-envvars', envvars, namespace=self.instance.id)
self.site_url = envvars.get('CKAN_SITE_URL')
def update(self):
self.instance.annotations.update_status('envvars', 'created', lambda: self._update(), force_update=True)
def delete(self):
print('Deleting instance envvars secret')
subprocess.check_call(f'kubectl -n {self.instance.id} delete secret/ckan-envvars', shell=True)
def get(self, full=False):
exitcode, output = subprocess.getstatusoutput(f'kubectl -n {self.instance.id} get secret/ckan-envvars -o yaml')
if exitcode == 0:
secret = kubectl.decode_secret(yaml.load(output))
res = {'ready': 'CKAN_SITE_URL' in secret}
if full:
res['envvars'] = secret
else:
res = {'ready': False, 'error': output}
return res
|
py | 1a359e40b3bc61bae807ac042b2f6d31209fd4bc | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class PurchaseConfigSettings(models.TransientModel):
_name = 'purchase.config.settings'
_inherit = 'res.config.settings'
company_id = fields.Many2one('res.company', string='Company', required=True,
default=lambda self: self.env.user.company_id)
po_lead = fields.Float(related='company_id.po_lead', string="Purchase Lead Time *")
po_lock = fields.Selection(related='company_id.po_lock', string="Purchase Order Modification *")
po_double_validation = fields.Selection(related='company_id.po_double_validation', string="Levels of Approvals *")
po_double_validation_amount = fields.Monetary(related='company_id.po_double_validation_amount', string="Double validation amount *", currency_field='company_currency_id')
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id', readonly=True,
help='Utility field to express amount currency')
group_product_variant = fields.Selection([
(0, "No variants on products"),
(1, 'Products can have several attributes, defining variants (Example: size, color,...)')
], "Product Variants",
help='Work with product variant allows you to define some variant of the same products, an ease the product management in the ecommerce for example',
implied_group='product.group_product_variant')
group_uom = fields.Selection([
(0, 'Products have only one unit of measure (easier)'),
(1, 'Some products may be sold/puchased in different units of measure (advanced)')
], "Units of Measure",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products.""")
group_costing_method = fields.Selection([
(0, 'Set a fixed cost price on each product'),
(1, "Use a 'Fixed', 'Real' or 'Average' price costing method")
], "Costing Methods",
implied_group='stock_account.group_inventory_valuation',
help="""Allows you to compute product cost price based on average cost.""")
module_purchase_requisition = fields.Selection([
(0, 'Purchase propositions trigger draft purchase orders to a single supplier'),
(1, 'Allow using call for tenders to get quotes from multiple suppliers (advanced)')
], "Calls for Tenders",
help="Calls for tenders are used when you want to generate requests for quotations to several vendors for a given set of products.\n"
"You can configure per product if you directly do a Request for Quotation "
"to one vendor or if you want a Call for Tenders to compare offers from several vendors.")
group_warning_purchase = fields.Selection([
(0, 'All the products and the customers can be used in purchase orders'),
(1, 'An informative or blocking warning can be set on a product or a customer')
], "Warning", implied_group='purchase.group_warning_purchase')
module_stock_dropshipping = fields.Selection([
(0, 'Suppliers always deliver to your warehouse(s)'),
(1, "Allow suppliers to deliver directly to your customers")
], "Dropshipping",
help='\nCreates the dropship Route and add more complex tests\n'
'-This installs the module stock_dropshipping.')
group_manage_vendor_price = fields.Selection([
(0, 'Manage vendor price on the product form'),
(1, 'Allow using and importing vendor pricelists')
], "Vendor Price",
implied_group="purchase.group_manage_vendor_price")
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
group_analytic_account_for_purchases = fields.Boolean('Analytic accounting for purchases',
implied_group='purchase.group_analytic_accounting',
help="Allows you to specify an analytic account on purchase order lines.")
|
py | 1a359e46d8e29cefe66c1fa18b994cd6afe0dae9 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
from Alphas.HistoricalReturnsAlphaModel import HistoricalReturnsAlphaModel
from Portfolio.BlackLittermanOptimizationPortfolioConstructionModel import *
from Portfolio.UnconstrainedMeanVariancePortfolioOptimizer import UnconstrainedMeanVariancePortfolioOptimizer
from Risk.NullRiskManagementModel import NullRiskManagementModel
### <summary>
### Black-Litterman framework algorithm
### Uses the HistoricalReturnsAlphaModel and the BlackLittermanPortfolioConstructionModel
### to create an algorithm that rebalances the portfolio according to Black-Litterman portfolio optimization
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class BlackLittermanPortfolioOptimizationFrameworkAlgorithm(QCAlgorithm):
'''Black-Litterman Optimization algorithm.'''
def Initialize(self):
# Set requested data resolution
self.UniverseSettings.Resolution = Resolution.Minute
# Order margin value has to have a minimum of 0.5% of Portfolio value, allows filtering out small trades and reduce fees.
# Commented so regression algorithm is more sensitive
#self.Settings.MinimumOrderMarginPortfolioPercentage = 0.005
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.symbols = [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in [ 'AIG', 'BAC', 'IBM', 'SPY' ] ]
optimizer = UnconstrainedMeanVariancePortfolioOptimizer()
# set algorithm framework models
self.SetUniverseSelection(CoarseFundamentalUniverseSelectionModel(self.coarseSelector))
self.SetAlpha(HistoricalReturnsAlphaModel(resolution = Resolution.Daily))
self.SetPortfolioConstruction(BlackLittermanOptimizationPortfolioConstructionModel(optimizer = optimizer))
self.SetExecution(ImmediateExecutionModel())
self.SetRiskManagement(NullRiskManagementModel())
def coarseSelector(self, coarse):
# Drops SPY after the 8th
last = 3 if self.Time.day > 8 else len(self.symbols)
return self.symbols[0:last]
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
self.Debug(orderEvent)
|
py | 1a359e967e195750226bfe2328b4267fe74576ed | #!/usr/bin/env python
#
# This is a module that gathers a list of serial ports including details on
# GNU/Linux systems.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2011-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import
import glob
import os
from serial.tools import list_ports_common
class SysFS(list_ports_common.ListPortInfo):
"""Wrapper for easy sysfs access and device info"""
def __init__(self, device):
super(SysFS, self).__init__(device)
# special handling for links
if device is not None and os.path.islink(device):
device = os.path.realpath(device)
is_link = True
else:
is_link = False
self.usb_device_path = None
if os.path.exists('/sys/class/tty/{}/device'.format(self.name)):
self.device_path = os.path.realpath('/sys/class/tty/{}/device'.format(self.name))
self.subsystem = os.path.basename(os.path.realpath(os.path.join(self.device_path, 'subsystem')))
else:
self.device_path = None
self.subsystem = None
# check device type
if self.subsystem == 'usb-serial':
self.usb_interface_path = os.path.dirname(self.device_path)
elif self.subsystem == 'usb':
self.usb_interface_path = self.device_path
else:
self.usb_interface_path = None
# fill-in info for USB devices
if self.usb_interface_path is not None:
self.usb_device_path = os.path.dirname(self.usb_interface_path)
try:
num_if = int(self.read_line(self.usb_device_path, 'bNumInterfaces'))
except ValueError:
num_if = 1
self.vid = int(self.read_line(self.usb_device_path, 'idVendor'), 16)
self.pid = int(self.read_line(self.usb_device_path, 'idProduct'), 16)
self.serial_number = self.read_line(self.usb_device_path, 'serial')
if num_if > 1: # multi interface devices like FT4232
self.location = os.path.basename(self.usb_interface_path)
else:
self.location = os.path.basename(self.usb_device_path)
self.manufacturer = self.read_line(self.usb_device_path, 'manufacturer')
self.product = self.read_line(self.usb_device_path, 'product')
self.interface = self.read_line(self.usb_interface_path, 'interface')
if self.subsystem in ('usb', 'usb-serial'):
self.apply_usb_info()
#~ elif self.subsystem in ('pnp', 'amba'): # PCI based devices, raspi
elif self.subsystem == 'pnp': # PCI based devices
self.description = self.name
self.hwid = self.read_line(self.device_path, 'id')
elif self.subsystem == 'amba': # raspi
self.description = self.name
self.hwid = os.path.basename(self.device_path)
if is_link:
self.hwid += ' LINK={}'.format(device)
def read_line(self, *args):
"""\
Helper function to read a single line from a file.
One or more parameters are allowed, they are joined with os.path.join.
Returns None on errors..
"""
try:
with open(os.path.join(*args)) as f:
line = f.readline().strip()
return line
except IOError:
return None
def comports(include_links=False):
devices = glob.glob('/dev/ttyS*') # built-in serial ports
devices.extend(glob.glob('/dev/ttyUSB*')) # usb-serial with own driver
devices.extend(glob.glob('/dev/ttyXRUSB*')) # xr-usb-serial port exar (DELL Edge 3001)
devices.extend(glob.glob('/dev/ttyACM*')) # usb-serial with CDC-ACM profile
devices.extend(glob.glob('/dev/ttyAMA*')) # ARM internal port (raspi)
devices.extend(glob.glob('/dev/rfcomm*')) # BT serial devices
devices.extend(glob.glob('/dev/ttyAP*')) # Advantech multi-port serial controllers
if include_links:
devices.extend(list_ports_common.list_links(devices))
return [info
for info in [SysFS(d) for d in devices]
if info.subsystem != "platform"] # hide non-present internal serial ports
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print("{}: {} [{}]".format(port, desc, hwid))
|
py | 1a359f297d2b6828869bc46242dddabd07ea4c1c | import tensorflow as tf
import tensorflow.contrib as tf_contrib
import numpy as np
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = None
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
#x = tf.layers.conv2d(inputs=x, filters=channels,
# kernel_size=kernel, kernel_initializer=weight_init,
# kernel_regularizer=weight_regularizer,
# strides=stride, use_bias=use_bias)
x = tf.contrib.layers.conv2d(inputs=x, num_outputs=channels, kernel_size=kernel,
stride=stride, padding='VALID',
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer())
return x
def atrous_conv2d(x, channels, kernel=3, rate=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=spectral_norm(w), rate=2, padding='SAME')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=w, rate=2, padding='SAME')
return x
def atrous_pool2d(x, channels, kernel=3, rate=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.constant("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=spectral_norm(w), rate=2, padding='SAME')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=w, rate=2, padding='SAME')
return x
def deconv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, sn=False, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if padding == 'SAME':
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
else:
output_shape =[x_shape[0], x_shape[1] * stride + max(kernel - stride, 0), x_shape[2] * stride + max(kernel - stride, 0), channels]
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init, regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape, strides=[1, stride, stride, 1], padding=padding)
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=use_bias)
return x
def fully_conneted(x, units, use_bias=True, sn=False, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn :
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer)
if use_bias :
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else :
x = tf.matmul(x, spectral_norm(w))
else :
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def flatten(x) :
return tf.layers.flatten(x)
def hw_flatten(x) :
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
#########################
#deformable conv
#########################
# Definition of the regular 2D convolutional
def deform_conv(x, kernel_size, stride, output_channals, mode):
if mode == 'offset':
layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=stride, padding='SAME', kernel_initializer = tf.zeros_initializer(), bias_initializer = tf.zeros_initializer())
layer_output = tf.clip_by_value(layer_output, -0.25*int(x.shape[1]), 0.25*int(x.shape[1]))
if mode == 'weight':
layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=stride, padding='SAME', bias_initializer = tf.zeros_initializer())
if mode == 'feature':
#layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=kernel_size, padding='SAME', kernel_initializer = tf.constant_initializer(0.5), bias_initializer = tf.zeros_initializer())
#layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=kernel_size, padding='SAME', initializer=weight_init,regularizer=weight_regularizer)
layer_output = conv(x, output_channals, kernel=kernel_size, stride=kernel_size, sn=True, scope='feature')
return layer_output
# Create the pn [1, 1, 1, 2N]
def get_pn(kernel_size, dtype):
pn_x, pn_y = np.meshgrid(range(-(kernel_size-1)//2, (kernel_size-1)//2+1), range(-(kernel_size-1)//2, (kernel_size-1)//2+1), indexing="ij")
# The order is [x1, x2, ..., y1, y2, ...]
pn = np.concatenate((pn_x.flatten(), pn_y.flatten()))
pn = np.reshape(pn, [1, 1, 1, 2 * kernel_size ** 2])
# Change the dtype of pn
pn = tf.constant(pn, dtype)
return pn
# Create the p0 [1, h, w, 2N]
def get_p0(kernel_size, x_size, dtype):
bs, h, w, C = x_size
p0_x, p0_y = np.meshgrid(range(0, h), range(0, w), indexing="ij")
p0_x = p0_x.flatten().reshape(1, h, w, 1).repeat(kernel_size ** 2, axis=3)
p0_y = p0_y.flatten().reshape(1, h, w, 1).repeat(kernel_size ** 2, axis=3)
p0 = np.concatenate((p0_x, p0_y), axis=3)
# Change the dtype of p0
p0 = tf.constant(p0, dtype)
return p0
def get_q(x_size, dtype):
bs, h, w, c = x_size
q_x, q_y = np.meshgrid(range(0, h), range(0, w), indexing="ij")
q_x = q_x.flatten().reshape(h, w, 1)
q_y = q_y.flatten().reshape(h, w, 1)
q = np.concatenate((q_x, q_y), axis=2)
# Change the dtype of q
q = tf.constant(q, dtype)
return q
def reshape_x_offset(x_offset, kernel_size):
bs, h, w, N, C = x_offset.get_shape().as_list()
# Get the new_shape
new_shape = [bs, h, w * kernel_size, C]
x_offset = [tf.reshape(x_offset[:, :, :, s:s+kernel_size, :], new_shape) for s in range(0, N, kernel_size)]
x_offset = tf.concat(x_offset, axis=2)
# Reshape to final shape [batch_size, h*kernel_size, w*kernel_size, C]
x_offset = tf.reshape(x_offset, [bs, h * kernel_size, w * kernel_size, C])
return x_offset
def deform_con2v(input, num_outputs, kernel_size, stride, trainable, name, reuse):
N = kernel_size ** 2
with tf.variable_scope(name, reuse=reuse):
bs, h, w, C = input.get_shape().as_list()
# offset with shape [batch_size, h, w, 2N]
offset = deform_conv(input, kernel_size, stride, 2 * N, "offset")
#offset = tf.constant(0.0,shape=[bs, h, w, 2*N])
# delte_weight with shape [batch_size, h, w, N * C]
#delte_weight = deform_conv(input, kernel_size, stride, N * C, "weight")
#delte_weight = tf.sigmoid(delte_weight)
# pn with shape [1, 1, 1, 2N]
pn = get_pn(kernel_size, offset.dtype)
# p0 with shape [1, h, w, 2N]
p0 = get_p0(kernel_size, [bs, h, w, C], offset.dtype)
# p with shape [batch_size, h, w, 2N]
p = pn + p0 + offset
# Reshape p to [batch_size, h, w, 2N, 1, 1]
p = tf.reshape(p, [bs, h, w, 2 * N, 1, 1])
# q with shape [h, w, 2]
q = get_q([bs, h, w, C], offset.dtype)
# Bilinear interpolation kernel G ([batch_size, h, w, N, h, w])
gx = tf.maximum(1 - tf.abs(p[:, :, :, :N, :, :] - q[:, :, 0]), 0)
gy = tf.maximum(1 - tf.abs(p[:, :, :, N:, :, :] - q[:, :, 1]), 0)
G = gx * gy
# Reshape G to [batch_size, h*w*N, h*w]
G = tf.reshape(G, [bs, h * w * N, h * w])
# Reshape x to [batch_size, h*w, C]
x = tf.reshape(input, [bs, h*w, C])
# x_offset with shape [batch_size, h, w, N, C]
x = tf.reshape(tf.matmul(G, x), [bs, h, w, N, C])
# Reshape x_offset to [batch_size, h*kernel_size, w*kernel_size, C]
x = reshape_x_offset(x, kernel_size)
# Reshape delte_weight to [batch_size, h*kernel_size, w*kernel_size, C]
#delte_weight = tf.reshape(delte_weight, [batch_size, h*kernel_size, w*kernel_size, C])
#y = x_offset * delte_weight
# Get the output of the deformable convolutional layer
x = deform_conv(x, kernel_size, stride, num_outputs, "feature")
return x, offset
##################################################################################
# Sampling
##################################################################################
def make_png(att, scale):
att_current = up_sample_bilinear(att, scale_factor=scale)
att_current = tf.nn.relu(att_current)
att_current = tf.reduce_mean(att_current,axis=-1)
att_current = tf.stack([att_current, att_current, att_current])
att_current = tf.transpose(att_current, perm=[1, 2, 3, 0])
return att_current
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2])
return gap
def up_sample(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [np.int32(h * scale_factor), np.int32(w * scale_factor)]
return tf.image.resize_nearest_neighbor(x, size=new_size)
def up_sample_bilinear(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [np.int32(h * scale_factor), np.int32(w * scale_factor)]
return tf.image.resize_bilinear(x, size=new_size)
def up_sample_bicubic(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [np.int32(h * scale_factor), np.int32(w * scale_factor)]
return tf.image.resize_bicubic(x, size=new_size)
##################################################################################
# Activation function
##################################################################################
def lrelu(x, alpha=0.2):
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.tanh(x)
##################################################################################
# Normalization function
##################################################################################
def batch_norm(x, is_training=True, scope='batch_norm'):
#return tf.layers.batch_normalization(x, training=is_training)
return tf_contrib.layers.batch_norm(x,decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=tf.GraphKeys.UPDATE_OPS,
is_training=is_training, scope=scope)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = l2_norm(v_)
u_ = tf.matmul(v_hat, w)
u_hat = l2_norm(u_)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
w_norm = w / sigma
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def l2_norm(v, eps=1e-12):
return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)
##################################################################################
# Loss function
##################################################################################
def class_loss(class_logits, label, num_class):
loss = 0
loss = tf.losses.softmax_cross_entropy(tf.one_hot(label, num_class), class_logits, weights=1.0)
return loss |
py | 1a359f5b21c626c18d86650e76510be93276cdcb | '''production script for planetary nebula
this script is a streamlined version of the code in planetary_nebula.ipynb.
The notebook was used for testing and peaking into some results, while
this script is used to produce the final plots/tables.
'''
import sys
from pathlib import Path
import logging
import json
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii, fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from photutils import DAOStarFinder
from extinction import ccm89
from pnlf.auxiliary import search_table
from pnlf.io import ReadLineMaps
from pnlf.detection import detect_unresolved_sources, completeness_limit
from pnlf.photometry import measure_flux
from pnlf.analyse import emission_line_diagnostics, MaximumLikelihood, pnlf, Distance
from pnlf.plot.pnlf import plot_emission_line_ratio, plot_pnlf
logging.basicConfig(#filename='log.txt',
#filemode='w',
#format='(levelname)s %(name)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
basedir = Path('..')
# we save
with open(basedir / 'data' / 'interim' / 'parameters.json') as json_file:
parameters = json.load(json_file)
with fits.open(basedir / 'data' / 'raw' / 'phangs_sample_table_v1p4.fits') as hdul:
sample_table = Table(hdul[1].data)
for name in parameters.keys():
tmp = search_table(sample_table,name)
if tmp:
d = Distance(tmp['DIST'][0]*1e6,'pc').to_distance_modulus()
parameters[name]["mu"] = d
print('using mu from sample table')
'''
IC5332 NGC1087 NGC1365 NGC1512 NGC1566 NGC1672 NGC2835
NGC3351 NGC3627 NGC4254 NGC4535 NGC5068 NGC628
'''
data_raw = Path('d:\downloads\MUSEDAP')
basedir = Path('..')
for name in parameters.keys():
'''
Step 1: Read in the data
'''
galaxy = ReadLineMaps(data_raw / name)
setattr(galaxy,'mu',parameters[galaxy.name]['mu'])
setattr(galaxy,'alpha',parameters[galaxy.name]['power_index'])
setattr(galaxy,'completeness_limit',parameters[galaxy.name]['completeness_limit'])
'''
Step 2: Detect sources
'''
sources = detect_unresolved_sources(galaxy,
'OIII5006',
StarFinder=DAOStarFinder,
threshold=8,
save=False)
'''
Step 3: Measure fluxes
'''
flux = measure_flux(galaxy,sources, galaxy.alpha,aperture_size=2.,background='local')
for col in ['HA6562','NII6583','SII6716']:
flux[col][flux[col]<0] = flux[f'{col}_err'][flux[col]<0]
flux[col][flux[col]/flux[f'{col}_err']<3] = flux[f'{col}_err'][flux[col]/flux[f'{col}_err']<3]
# calculate astronomical coordinates for comparison
flux['SkyCoord'] = SkyCoord.from_pixel(flux['x'],flux['y'],galaxy.wcs)
# calculate magnitudes from measured fluxes
flux['mOIII'] = -2.5*np.log10(flux['OIII5006']*1e-20) - 13.74
flux['dmOIII'] = np.abs( 2.5/np.log(10) * flux['OIII5006_err'] / flux['OIII5006'] )
# correct for milky way extinction
extinction = ccm89(wave=np.array([5007.]),a_v=0.2,r_v=3.1,unit='aa')[0]
flux['mOIII'] -= extinction
'''
Step 4: Emission line diagnostics
'''
tbl = emission_line_diagnostics(flux,galaxy.mu,galaxy.completeness_limit)
filename = basedir / 'reports' / 'catalogues' / f'pn_candidates_{galaxy.name}.txt'
with open(filename,'w',newline='\n') as f:
tbl['RaDec'] = tbl['SkyCoord'].to_string(style='hmsdms',precision=2)
for col in tbl.colnames:
if col not in ['id','RaDec','type']:
tbl[col].info.format = '%.3f'
ascii.write(tbl[['id','type','x','y','RaDec','OIII5006','OIII5006_err','mOIII','dmOIII','HA6562','HA6562_err',
'NII6583','NII6583_err','SII6716','SII6716_err']][tbl['type']!='NaN'],
f,format='fixed_width',delimiter='\t',overwrite=True)
filename = basedir / 'reports' / 'figures' / f'{galaxy.name}_emission_line'
plot_emission_line_ratio(tbl,galaxy.mu,filename=filename)
'''
Step 5: Fit with maximum likelihood
'''
data = tbl[(tbl['type']=='PN') & (tbl['mOIII']<galaxy.completeness_limit)]['mOIII']
err = tbl[(tbl['type']=='PN') & (tbl['mOIII']<galaxy.completeness_limit)]['dmOIII']
#data = data[data>26]
fitter = MaximumLikelihood(pnlf,
data,
mhigh=galaxy.completeness_limit)
# a good guess would be mu_guess = min(data)-Mmax
mu = fitter([24])[0]
filename = basedir / 'reports' / 'figures' / f'{galaxy.name}_PNLF'
plot_pnlf(tbl[tbl['type']=='PN']['mOIII'],mu,galaxy.completeness_limit,binsize=0.25,mhigh=32,filename=filename)
print(f'{galaxy.name}: {mu:.2f} vs {parameters[galaxy.name]["mu"]:.2f}') |
py | 1a359f8c4679c96bac746dc20017997049f8367c | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import os
import numpy as np
dir = "./jtr/data/emoji2vec/"
emojis = []
vecs = []
with open(dir + "metadata.tsv", "w") as f_out:
# f_out.write("emoji\n")
with open(dir + "emoji2vec.txt", "r") as f_in:
for ix, line in enumerate(f_in.readlines()[1:]):
splits = line.strip().split(" ")
emoji = splits[0]
vec = [float(x) for x in splits[1:]]
assert len(vec) == 300
# print(emoji, vec)
emojis.append(emoji)
vecs.append(vec)
f_out.write(emoji+"\n")
f_in.close()
f_out.close()
emoji2vec = tf.constant(np.array(vecs))
tf_emoji2vec = tf.get_variable("emoji2vec", [len(vecs), 300], tf.float64)
# save embeddings to file
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf_emoji2vec.assign(emoji2vec))
saver = tf.train.Saver()
saver.save(sess, os.path.join(dir, "model.ckpt"), 0)
# Use the same LOG_DIR where you stored your checkpoint.
summary_writer = tf.summary.FileWriter(dir)
# Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto
config = projector.ProjectorConfig()
# You can add multiple embeddings. Here we add only one.
embedding = config.embeddings.add()
embedding.tensor_name = tf_emoji2vec.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(dir, 'metadata.tsv')
# Saves a configuration file that TensorBoard will read during startup.
projector.visualize_embeddings(summary_writer, config)
|
py | 1a359ff6a47f6f564d964dbd2905f1e35ab7d45a | # -*- coding: utf-8 -*-
"""
The :class:`SwaggerClient` provides an interface for making API calls based on
a swagger spec, and returns responses of python objects which build from the
API response.
Structure Diagram::
+---------------------+
| |
| SwaggerClient |
| |
+------+--------------+
|
| has many
|
+------v--------------+
| |
| Resource +------------------+
| | |
+------+--------------+ has many |
| |
| has many |
| |
+------v--------------+ +------v--------------+
| | | |
| Operation | | SwaggerModel |
| | | |
+------+--------------+ +---------------------+
|
| uses
|
+------v--------------+
| |
| HttpClient |
| |
+---------------------+
To get a client
.. code-block:: python
client = bravado.client.SwaggerClient.from_url(swagger_spec_url)
"""
import logging
from bravado_core.docstring import create_operation_docstring
from bravado_core.exception import SwaggerMappingError
from bravado_core.formatter import SwaggerFormat # noqa
from bravado_core.param import marshal_param
from bravado_core.spec import Spec
from six import iteritems
from six import itervalues
from bravado.config import bravado_config_from_config_dict
from bravado.config import RequestConfig
from bravado.docstring_property import docstring_property
from bravado.requests_client import RequestsClient
from bravado.swagger_model import Loader
from bravado.warning import warn_for_deprecated_op
log = logging.getLogger(__name__)
class SwaggerClient(object):
"""A client for accessing a Swagger-documented RESTful service.
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
def __init__(self, swagger_spec, also_return_response=False):
self.__also_return_response = also_return_response
self.swagger_spec = swagger_spec
@classmethod
def from_url(cls, spec_url, http_client=None, request_headers=None, config=None):
"""Build a :class:`SwaggerClient` from a url to the Swagger
specification for a RESTful API.
:param spec_url: url pointing at the swagger API specification
:type spec_url: str
:param http_client: an HTTP client used to perform requests
:type http_client: :class:`bravado.http_client.HttpClient`
:param request_headers: Headers to pass with http requests
:type request_headers: dict
:param config: Config dict for bravado and bravado_core.
See CONFIG_DEFAULTS in :module:`bravado_core.spec`.
See CONFIG_DEFAULTS in :module:`bravado.client`.
:rtype: :class:`bravado_core.spec.Spec`
"""
log.debug(u"Loading from %s", spec_url)
http_client = http_client or RequestsClient()
loader = Loader(http_client, request_headers=request_headers)
spec_dict = loader.load_spec(spec_url)
# RefResolver may have to download additional json files (remote refs)
# via http. Wrap http_client's request() so that request headers are
# passed along with the request transparently. Yeah, this is not ideal,
# but since RefResolver has new found responsibilities, it is
# functional.
if request_headers is not None:
http_client.request = inject_headers_for_remote_refs(
http_client.request, request_headers)
return cls.from_spec(spec_dict, spec_url, http_client, config)
@classmethod
def from_spec(cls, spec_dict, origin_url=None, http_client=None,
config=None):
"""
Build a :class:`SwaggerClient` from a Swagger spec in dict form.
:param spec_dict: a dict with a Swagger spec in json-like form
:param origin_url: the url used to retrieve the spec_dict
:type origin_url: str
:param config: Configuration dict - see spec.CONFIG_DEFAULTS
:rtype: :class:`bravado_core.spec.Spec`
"""
http_client = http_client or RequestsClient()
config = config or {}
# Apply bravado config defaults
bravado_config = bravado_config_from_config_dict(config)
# remove bravado configs from config dict
for key in set(bravado_config._fields).intersection(set(config)):
del config[key]
# set bravado config object
config['bravado'] = bravado_config
swagger_spec = Spec.from_dict(
spec_dict, origin_url, http_client, config,
)
return cls(swagger_spec, also_return_response=bravado_config.also_return_response)
def get_model(self, model_name):
return self.swagger_spec.definitions[model_name]
def _get_resource(self, item):
"""
:param item: name of the resource to return
:return: :class:`Resource`
"""
resource = self.swagger_spec.resources.get(item)
if not resource:
raise AttributeError(
'Resource {0} not found. Available resources: {1}'
.format(item, ', '.join(dir(self))))
# Wrap bravado-core's Resource and Operation objects in order to
# execute a service call via the http_client.
return ResourceDecorator(resource, self.__also_return_response)
def __repr__(self):
return u"%s(%s)" % (self.__class__.__name__, self.swagger_spec.api_url)
def __getattr__(self, item):
return self._get_resource(item)
def __dir__(self):
return self.swagger_spec.resources.keys()
def inject_headers_for_remote_refs(request_callable, request_headers):
"""Inject request_headers only when the request is to retrieve the
remote refs in the swagger spec (vs being a request for a service call).
:param request_callable: method on http_client to make a http request
:param request_headers: headers to inject when retrieving remote refs
"""
def request_wrapper(request_params, *args, **kwargs):
def is_remote_ref_request(request_kwargs):
# operation is only present for service calls
return request_kwargs.get('operation') is None
if is_remote_ref_request(kwargs):
request_params['headers'] = request_headers
return request_callable(request_params, *args, **kwargs)
return request_wrapper
class ResourceDecorator(object):
"""
Wraps :class:`bravado_core.resource.Resource` so that accesses to contained
operations can be instrumented.
"""
def __init__(self, resource, also_return_response=False):
"""
:type resource: :class:`bravado_core.resource.Resource`
"""
self.also_return_response = also_return_response
self.resource = resource
def __getattr__(self, name):
"""
:rtype: :class:`CallableOperation`
"""
return CallableOperation(getattr(self.resource, name), self.also_return_response)
def __dir__(self):
"""
Exposes correct attrs on resource when tab completing in a REPL
"""
return self.resource.__dir__()
class CallableOperation(object):
"""Wraps an operation to make it callable and provides a docstring. Calling
the operation uses the configured http_client.
:type operation: :class:`bravado_core.operation.Operation`
"""
def __init__(self, operation, also_return_response=False):
self.also_return_response = also_return_response
self.operation = operation
@docstring_property(__doc__)
def __doc__(self):
return create_operation_docstring(self.operation)
def __getattr__(self, name):
"""Forward requests for attrs not found on this decorator to the
delegate.
"""
return getattr(self.operation, name)
def __call__(self, **op_kwargs):
"""Invoke the actual HTTP request and return a future.
:rtype: :class:`bravado.http_future.HTTPFuture`
"""
log.debug(u'%s(%s)', self.operation.operation_id, op_kwargs)
warn_for_deprecated_op(self.operation)
# Get per-request config
request_options = op_kwargs.pop('_request_options', {})
request_config = RequestConfig(request_options, self.also_return_response)
request_params = construct_request(
self.operation, request_options, **op_kwargs)
http_client = self.operation.swagger_spec.http_client
return http_client.request(
request_params,
operation=self.operation,
request_config=request_config,
)
def construct_request(operation, request_options, **op_kwargs):
"""Construct the outgoing request dict.
:type operation: :class:`bravado_core.operation.Operation`
:param request_options: _request_options passed into the operation
invocation.
:param op_kwargs: parameter name/value pairs to passed to the
invocation of the operation.
:return: request in dict form
"""
url = operation.swagger_spec.api_url.rstrip('/') + operation.path_name
request = {
'method': str(operation.http_method.upper()),
'url': url,
'params': {}, # filled in downstream
'headers': request_options.get('headers', {}),
}
# Adds Accept header to request for msgpack response if specified
if request_options.get('use_msgpack', False):
request['headers']['Accept'] = 'application/msgpack'
# Copy over optional request options
for request_option in ('connect_timeout', 'timeout'):
if request_option in request_options:
request[request_option] = request_options[request_option]
construct_params(operation, request, op_kwargs)
return request
def construct_params(operation, request, op_kwargs):
"""Given the parameters passed to the operation invocation, validates and
marshals the parameters into the provided request dict.
:type operation: :class:`bravado_core.operation.Operation`
:type request: dict
:param op_kwargs: the kwargs passed to the operation invocation
:raises: SwaggerMappingError on extra parameters or when a required
parameter is not supplied.
"""
current_params = operation.params.copy()
for param_name, param_value in iteritems(op_kwargs):
param = current_params.pop(param_name, None)
if param is None:
raise SwaggerMappingError(
"{0} does not have parameter {1}"
.format(operation.operation_id, param_name))
marshal_param(param, param_value, request)
# Check required params and non-required params with a 'default' value
for remaining_param in itervalues(current_params):
if remaining_param.location == 'header' and remaining_param.name in request['headers']:
marshal_param(remaining_param, request['headers'][remaining_param.name], request)
else:
if remaining_param.required:
raise SwaggerMappingError(
'{0} is a required parameter'.format(remaining_param.name))
if not remaining_param.required and remaining_param.has_default():
marshal_param(remaining_param, None, request)
|
py | 1a35a0080792a81598e020f30956334188913c64 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# copied from https://github.com/kaidic/LDAM-DRW/blob/master/losses.py
class LDAMLoss(nn.Module):
def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30, reduce_=False):
super(LDAMLoss, self).__init__()
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list = m_list * (max_m / np.max(m_list))
m_list = torch.cuda.FloatTensor(m_list)
self.m_list = m_list
assert s > 0
self.s = s
self.weight = weight
self.reduce = reduce_
def forward(self, x, target):
index = torch.zeros_like(x, dtype=torch.uint8)
index.scatter_(1, target.data.view(-1, 1), 1)
index_float = index.type(torch.cuda.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1))
batch_m = batch_m.view((-1, 1))
x_m = x - batch_m
output = torch.where(index, x_m, x)
return F.cross_entropy(self.s*output, target, reduce=self.reduce)
|
py | 1a35a00a57824caf5da53d02400587fc1ec17912 | # -*- coding: utf-8 -*-
# Scrapy settings for signalstart project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'signalstart'
SPIDER_MODULES = ['signalstart.spiders']
NEWSPIDER_MODULE = 'signalstart.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'signalstart (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'signalstart.middlewares.SignalstartSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'signalstart.middlewares.SignalstartDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'signalstart.pipelines.SignalstartPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
py | 1a35a00c6b161e26b872607ea966f10335394a7e | # -*- coding: utf-8 -*-
""" This file contains functions to generate and verify tokens for Flask-User.
Tokens contain an encoded user ID and a signature. The signature is managed by the itsdangerous module.
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio ([email protected])
:license: Simplified BSD License, see LICENSE.txt for more details."""
import base64
from Crypto.Cipher import AES
from itsdangerous import BadSignature, SignatureExpired, TimestampSigner
class TokenManager(object):
def __init__(self):
""" Create a cypher to encrypt IDs and a signer to sign tokens."""
# Create cypher to encrypt IDs
# and ensure >=16 characters
# secret = app.config.get('SECRET_KEY')
secret = 'SECRET_KEY'
precursor = b'0123456789abcdef'
if isinstance(secret, bytes):
key = secret + precursor
else:
key = secret.encode("utf-8") + precursor
self.cipher = AES.new(key[0:16], AES.MODE_CBC)
# Create signer to sign tokens
self.signer = TimestampSigner(secret)
def encrypt_id(self, id):
""" Encrypts integer ID to url-safe base64 string."""
# 16 byte integer
str1 = '%016d' % id
# encrypted data
str2 = self.cipher.encrypt(str1.encode())
# URL safe base64 string with '=='
str3 = base64.urlsafe_b64encode(str2)
# return base64 string without '=='
return str3[0:-2]
def decrypt_id(self, encrypted_id):
""" Decrypts url-safe base64 string to integer ID"""
# Convert strings and unicode strings to bytes if needed
if hasattr(encrypted_id, 'encode'):
encrypted_id = encrypted_id.encode('ascii', 'ignore')
try:
str3 = encrypted_id + b'==' # --> base64 string with '=='
# print('str3=', str3)
str2 = base64.urlsafe_b64decode(str3) # --> encrypted data
# print('str2=', str2)
str1 = self.cipher.decrypt(str2) # --> 16 byte integer string
# print('str1=', str1)
return int(str1) # --> integer id
except Exception as e: # pragma: no cover
print('!!!Exception in decrypt_id!!!:', e)
return 0
def generate_token(self, id):
""" Return token with id, timestamp and signature"""
# In Python3 we must make sure that bytes are converted to strings.
# Hence the addition of '.decode()'
return self.signer.sign(self.encrypt_id(id)).decode()
def verify_token(self, token, expiration_in_seconds):
""" Verify token and return (is_valid, has_expired, id).
Returns (True, False, id) on success.
Returns (False, True, None) on expired tokens.
Returns (False, False, None) on invalid tokens."""
try:
data = self.signer.unsign(token, max_age=expiration_in_seconds)
is_valid = True
has_expired = False
id = self.decrypt_id(data)
except SignatureExpired:
is_valid = False
has_expired = True
id = None
except BadSignature:
is_valid = False
has_expired = False
id = None
return (is_valid, has_expired, id)
|
py | 1a35a12e4588dcff60787509fbf0f882e754dd60 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1CinderVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'read_only': 'bool',
'secret_ref': 'V1LocalObjectReference',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_id=None): # noqa: E501
"""V1CinderVolumeSource - a model defined in OpenAPI""" # noqa: E501
self._fs_type = None
self._read_only = None
self._secret_ref = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
self.volume_id = volume_id
@property
def fs_type(self):
"""Gets the fs_type of this V1CinderVolumeSource. # noqa: E501
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md # noqa: E501
:return: The fs_type of this V1CinderVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1CinderVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md # noqa: E501
:param fs_type: The fs_type of this V1CinderVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""Gets the read_only of this V1CinderVolumeSource. # noqa: E501
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md # noqa: E501
:return: The read_only of this V1CinderVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1CinderVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md # noqa: E501
:param read_only: The read_only of this V1CinderVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1CinderVolumeSource. # noqa: E501
:return: The secret_ref of this V1CinderVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1CinderVolumeSource.
:param secret_ref: The secret_ref of this V1CinderVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def volume_id(self):
"""Gets the volume_id of this V1CinderVolumeSource. # noqa: E501
volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md # noqa: E501
:return: The volume_id of this V1CinderVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""Sets the volume_id of this V1CinderVolumeSource.
volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md # noqa: E501
:param volume_id: The volume_id of this V1CinderVolumeSource. # noqa: E501
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501
self._volume_id = volume_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CinderVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.