filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_6629 | from config import HNConfig as Config
import numpy as np # type: ignore
from matplotlib import pyplot as plt # type: ignore
from matplotlib import cm # type: ignore
from matplotlib import colors # type: ignore
import pandas as pd # type: ignore
import util
window_size = 5
dpi = 100
iter_lim = 1000
record_moment = np.arange(0, iter_lim, 10)
record = True
delta_t = 0.01
noise = 0.001
u_0 = 0.02
param_a = 1.0
param_b = 1.0
param_c = 2.0
param_d = 1.0
@np.vectorize
def sigmoid(input: float) -> float:
sigmoid_range = 34.538776394910684
if input <= -sigmoid_range:
return 1e-15
if input >= sigmoid_range:
return 1.0 - 1e-15
return 1.0 / (1.0 + np.exp(-input / u_0))
def kronecker_delta(i: int, j: int) -> float:
if i == j:
return 1.0
return 0.0
def calc_weight_matrix(city_array: np.array) -> np.array:
city_num: int = city_array.shape[0]
n: int = city_num ** 2
tmp: np.array = np.zeros((n, n))
for s0 in range(n):
x: int = int(s0 / city_num)
i: int = s0 % city_num
for s1 in range(n):
y: int = int(s1 / city_num)
j: int = s1 % city_num
dxy: float = util.dist(city_array[x, :], city_array[y, :])
tmp[s0, s1] = (
-param_a * kronecker_delta(x, y) * (1.0 - kronecker_delta(i, j))
- param_b * kronecker_delta(i, j) * (1.0 - kronecker_delta(x, y))
- param_c
- param_d
* dxy
* (
kronecker_delta(j, (i - 1) % city_num)
+ kronecker_delta(j, (i + 1) % city_num)
)
)
return tmp
def calc_bias(city_array: np.array) -> np.array:
city_num: int = city_array.shape[0]
n: int = city_num ** 2
tmp: np.array = param_c * city_num * np.ones(n)
return tmp
def update_inner_vals(
nodes_array: np.matrix,
inner_vals: np.matrix,
weight_matrix: np.matrix,
biases: np.matrix,
) -> np.matrix:
tau = 1.0
asdf: np.matrix = np.matmul(weight_matrix, nodes_array)
delta: np.matrix = (-inner_vals / tau + asdf + biases) * delta_t
return inner_vals + delta
def hp_begin(
inner_vals_array: np.matrix,
nodes_array: np.matrix,
weights_matrix: np.matrix,
biases_array: np.matrix,
) -> None:
if record:
dir_name: str = util.make_directory(Config)
for i in range(iter_lim):
if i in record_moment:
filename: str = "iteration-" + str(i) + ".png"
file_path: str = dir_name + filename
plt.savefig(file_path)
inner_vals_array = update_inner_vals(
nodes_array, inner_vals_array, weights_matrix, biases_array
)
nodes_array = sigmoid(inner_vals_array)
plt.title("iteration=" + str(i + 1))
mat_visual.set_data(np.reshape(nodes_array, (city_num, city_num)))
plt.pause(0.0001)
else:
i = 1
# while plt.get_fignums():
# inner_vals_array = update_inner_vals(nodes_array, inner_vals_array, weights_matrix, biases_array)
# nodes_array = sigmoid(inner_vals_array)
# plt.title("iteration=" + str(i))
# mat_visual.set_data(np.reshape(nodes_array, (city_num, city_num)))
# i += 1
# plt.pause(.01)
while plt.get_fignums():
# print(nodes_array.shape, inner_vals_array.shape, weights_matrix.shape, biases_array.shape)
inner_vals_array = update_inner_vals(
nodes_array, inner_vals_array, weights_matrix, biases_array
)
nodes_array = sigmoid(inner_vals_array)
plt.title("iteration=" + str(i))
mat_visual.set_data(np.reshape(nodes_array, (city_num, city_num)))
i += 1
plt.pause(0.0001)
if __name__ == "__main__":
if Config.read_file:
np_cities = np.genfromtxt(Config.file_path + Config.city_file, delimiter=",")
city_num = np_cities.shape[0]
# width_x = (np.max(np_cities[:, 0]) - np.min(np_cities[:, 0]))
# width_y = (np.max(np_cities[:, 1]) - np.min(np_cities[:, 1]))
# width = np.amax([width_x, width_y])
# np_cities[:, 0] -= np.min(np_cities[:, 0])
# np_cities[:, 0] /= width
# np_cities[:, 1] -= np.min(np_cities[:, 1])
# np_cities[:, 1] /= width
# center_x = np.average(np_cities[:, 0])
# center_y = np.average(np_cities[:, 1])
figsize = (window_size, window_size)
else:
city_num = Config.city_num
# “continuous uniform” distribution random
np_cities = np.random.random((city_num, 2))
center_x = 0.5
center_y = 0.5
figsize = (window_size, window_size)
inner_vals = np.matrix((np.random.random((city_num ** 2)) - 0.5) * noise).T
nodes = np.matrix(sigmoid(inner_vals))
weights = np.matrix(calc_weight_matrix(np_cities))
df = pd.DataFrame(weights)
df.to_csv("weigths.csv", header=False, index=False)
biases = np.matrix(calc_bias(np_cities)).T
fig = plt.figure(figsize=figsize, dpi=dpi)
mat_visual = plt.matshow(
np.reshape(nodes, (city_num, city_num)),
fignum=0,
cmap=cm.Greys,
norm=colors.Normalize(vmin=0.0, vmax=1.0),
)
fig.colorbar(mat_visual)
plt.title("iteration=" + str(0))
plt.pause(0.0001)
hp_begin(inner_vals, nodes, weights, biases)
|
the-stack_0_6630 | import flask_restplus
import marshmallow
from znail.netem.disciplines import PacketReordering
from znail.netem.tc import Tc
from znail.ui import api
from znail.ui.util import NoneAttributes, json_request_handler
class PacketReorderingSchema(marshmallow.Schema):
milliseconds = marshmallow.fields.Integer(required=True, validate=lambda n: n > 0)
percent = marshmallow.fields.Float(required=True, validate=lambda n: n >= 0 and n <= 100)
packet_reordering_schema = PacketReorderingSchema()
packet_reordering_model = api.model(
'PacketReordering', {
'milliseconds': flask_restplus.fields.Integer(min=0),
'percent': flask_restplus.fields.Float(min=0, max=100),
})
@api.route('/api/disciplines/packet_reordering')
class PacketReorderingResource(flask_restplus.Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tc = Tc.adapter('eth1')
@api.response(200, 'Success', packet_reordering_model)
def get(self):
reordering = self.tc.disciplines.get('reorder', NoneAttributes)
return {
'milliseconds': reordering.milliseconds,
'percent': reordering.percent,
}, 200
@json_request_handler(packet_reordering_schema, packet_reordering_model)
def post(self, data):
disciplines = self.tc.disciplines
disciplines['reorder'] = PacketReordering(data['percent'], data['milliseconds'])
self.tc.apply(disciplines)
@api.route('/api/disciplines/packet_reordering/clear')
class ClearPacketReorderingResource(flask_restplus.Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tc = Tc.adapter('eth1')
@json_request_handler()
def post(self, data):
disciplines = self.tc.disciplines
if 'reorder' in disciplines:
del disciplines['reorder']
self.tc.apply(disciplines)
|
the-stack_0_6634 | """Support for exposing Concord232 elements as sensors."""
import datetime
import logging
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDevice,
PLATFORM_SCHEMA,
DEVICE_CLASSES,
)
from homeassistant.const import CONF_HOST, CONF_PORT
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_ZONES = "exclude_zones"
CONF_ZONE_TYPES = "zone_types"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Alarm"
DEFAULT_PORT = "5007"
DEFAULT_SSL = False
SCAN_INTERVAL = datetime.timedelta(seconds=10)
ZONE_TYPES_SCHEMA = vol.Schema({cv.positive_int: vol.In(DEVICE_CLASSES)})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_EXCLUDE_ZONES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ZONE_TYPES, default={}): ZONE_TYPES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Concord232 binary sensor platform."""
from concord232 import client as concord232_client
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
exclude = config.get(CONF_EXCLUDE_ZONES)
zone_types = config.get(CONF_ZONE_TYPES)
sensors = []
try:
_LOGGER.debug("Initializing client")
client = concord232_client.Client(f"http://{host}:{port}")
client.zones = client.list_zones()
client.last_zone_update = dt_util.utcnow()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to Concord232: %s", str(ex))
return False
# The order of zones returned by client.list_zones() can vary.
# When the zones are not named, this can result in the same entity
# name mapping to different sensors in an unpredictable way. Sort
# the zones by zone number to prevent this.
client.zones.sort(key=lambda zone: zone["number"])
for zone in client.zones:
_LOGGER.info("Loading Zone found: %s", zone["name"])
if zone["number"] not in exclude:
sensors.append(
Concord232ZoneSensor(
hass,
client,
zone,
zone_types.get(zone["number"], get_opening_type(zone)),
)
)
add_entities(sensors, True)
def get_opening_type(zone):
"""Return the result of the type guessing from name."""
if "MOTION" in zone["name"]:
return "motion"
if "KEY" in zone["name"]:
return "safety"
if "SMOKE" in zone["name"]:
return "smoke"
if "WATER" in zone["name"]:
return "water"
return "opening"
class Concord232ZoneSensor(BinarySensorDevice):
"""Representation of a Concord232 zone as a sensor."""
def __init__(self, hass, client, zone, zone_type):
"""Initialize the Concord232 binary sensor."""
self._hass = hass
self._client = client
self._zone = zone
self._number = zone["number"]
self._zone_type = zone_type
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@property
def should_poll(self):
"""No polling needed."""
return True
@property
def name(self):
"""Return the name of the binary sensor."""
return self._zone["name"]
@property
def is_on(self):
"""Return true if the binary sensor is on."""
# True means "faulted" or "open" or "abnormal state"
return bool(self._zone["state"] != "Normal")
def update(self):
"""Get updated stats from API."""
last_update = dt_util.utcnow() - self._client.last_zone_update
_LOGGER.debug("Zone: %s ", self._zone)
if last_update > datetime.timedelta(seconds=1):
self._client.zones = self._client.list_zones()
self._client.last_zone_update = dt_util.utcnow()
_LOGGER.debug("Updated from zone: %s", self._zone["name"])
if hasattr(self._client, "zones"):
self._zone = next(
(x for x in self._client.zones if x["number"] == self._number), None
)
|
the-stack_0_6636 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Torch utilities for the Trainer class.
"""
import math
import warnings
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Iterator, List, Optional, Union
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, Sampler
from .file_utils import is_sagemaker_distributed_available, is_torch_tpu_available
from .utils import logging
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
else:
import torch.distributed as dist
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
# this is used to supress an undesired warning emitted by pytorch versions 1.4.2-1.7.0
try:
from torch.optim.lr_scheduler import SAVE_STATE_WARNING
except ImportError:
SAVE_STATE_WARNING = ""
logger = logging.get_logger(__name__)
def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100):
"""Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
return torch.cat((tensor1, tensor2), dim=0)
# Let's figure out the new shape
new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:]
# Now let's fill the result tensor
result = tensor1.new_full(new_shape, padding_index)
result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
return result
def numpy_pad_and_concatenate(array1, array2, padding_index=-100):
"""Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary."""
if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]:
return np.concatenate((array1, array2), dim=0)
# Let's figure out the new shape
new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:]
# Now let's fill the result tensor
result = np.full_like(array1, padding_index, shape=new_shape)
result[: array1.shape[0], : array1.shape[1]] = array1
result[array1.shape[0] :, : array2.shape[1]] = array2
return result
def nested_concat(tensors, new_tensors, padding_index=-100):
"""
Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
nested list/tuples of tensors.
"""
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors))
elif isinstance(tensors, torch.Tensor):
return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
elif isinstance(tensors, np.ndarray):
return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
else:
raise TypeError(f"Unsupported type for concatenation: got {type(tensors)}")
def nested_numpify(tensors):
"Numpify `tensors` (even if it's a nested list/tuple of tensors)."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_numpify(t) for t in tensors)
return tensors.cpu().numpy()
def nested_detach(tensors):
"Detach `tensors` (even if it's a nested list/tuple of tensors)."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_detach(t) for t in tensors)
return tensors.detach()
def nested_xla_mesh_reduce(tensors, name):
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors))
return xm.mesh_reduce(name, tensors, torch.cat)
else:
raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`")
def distributed_concat(tensor: "torch.Tensor", num_total_examples: Optional[int] = None) -> torch.Tensor:
try:
if isinstance(tensor, (tuple, list)):
return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor)
output_tensors = [tensor.clone() for _ in range(dist.get_world_size())]
dist.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
if num_total_examples is not None:
concat = concat[:num_total_examples]
return concat
except AssertionError:
raise AssertionError("Not currently using distributed training")
def distributed_broadcast_scalars(
scalars: List[Union[int, float]], num_total_examples: Optional[int] = None
) -> torch.Tensor:
try:
tensorized_scalar = torch.tensor(scalars).cuda()
output_tensors = [tensorized_scalar.clone() for _ in range(dist.get_world_size())]
dist.all_gather(output_tensors, tensorized_scalar)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
if num_total_examples is not None:
concat = concat[:num_total_examples]
return concat
except AssertionError:
raise AssertionError("Not currently using distributed training")
def reissue_pt_warnings(caught_warnings):
# Reissue warnings that are not the SAVE_STATE_WARNING
if len(caught_warnings) > 1:
for w in caught_warnings:
if w.category != UserWarning or w.message != SAVE_STATE_WARNING:
warnings.warn(w.message, w.category)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
Args:
local_rank (:obj:`int`): The rank of the local process.
"""
if local_rank not in [-1, 0]:
dist.barrier()
yield
if local_rank == 0:
dist.barrier()
class SequentialDistributedSampler(Sampler):
"""
Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end.
Even though we only use this sampler for eval and predict (no training), which means that the model params won't
have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add
extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather`
or `reduce` resulting tensors at the end of the loop.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert (
len(indices) == self.total_size
), f"Indices length {len(indices)} and total size {self.total_size} mismatched"
# subsample
indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
assert (
len(indices) == self.num_samples
), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched"
return iter(indices)
def __len__(self):
return self.num_samples
def get_tpu_sampler(dataset: torch.utils.data.dataset.Dataset):
if xm.xrt_world_size() <= 1:
return RandomSampler(dataset)
return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
def nested_new_like(arrays, num_samples, padding_index=-100):
""" Create the same nested structure as `arrays` with a first dimension always at `num_samples`."""
if isinstance(arrays, (list, tuple)):
return type(arrays)(nested_new_like(x, num_samples) for x in arrays)
return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:]))
def nested_expand_like(arrays, new_seq_length, padding_index=-100):
""" Expand the `arrays` so that the second dimension grows to `new_seq_length`. Uses `padding_index` for padding."""
if isinstance(arrays, (list, tuple)):
return type(arrays)(nested_expand_like(x, new_seq_length, padding_index=padding_index) for x in arrays)
result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:])
result[:, : arrays.shape[1]] = arrays
return result
def nested_truncate(tensors, limit):
"Truncate `tensors` at `limit` (even if it's a nested list/tuple of tensors)."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_truncate(t, limit) for t in tensors)
return tensors[:limit]
def _get_first_shape(arrays):
"""Return the shape of the first array found in the nested struct `arrays`."""
if isinstance(arrays, (list, tuple)):
return _get_first_shape(arrays[0])
return arrays.shape
class DistributedTensorGatherer:
"""
A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.
If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every
step, our sampler will generate the following indices:
:obj:`[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]`
to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and
2 will be responsible of making predictions for the following samples:
- P0: :obj:`[0, 1, 2, 3, 4, 5]`
- P1: :obj:`[6, 7, 8, 9, 10, 11]`
- P2: :obj:`[12, 13, 14, 15, 0, 1]`
The first batch treated on each process will be
- P0: :obj:`[0, 1]`
- P1: :obj:`[6, 7]`
- P2: :obj:`[12, 13]`
So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to
the following indices:
:obj:`[0, 1, 6, 7, 12, 13]`
If we directly concatenate our results without taking any precautions, the user will then get the predictions for
the indices in this order at the end of the prediction loop:
:obj:`[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]`
For some reason, that's not going to roll their boat. This class is there to solve that problem.
Args:
world_size (:obj:`int`):
The number of processes used in the distributed training.
num_samples (:obj:`int`):
The number of samples in our dataset.
make_multiple_of (:obj:`int`, `optional`):
If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument
(by adding samples).
padding_index (:obj:`int`, `optional`, defaults to -100):
The padding index to use if the arrays don't all have the same sequence length.
"""
def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100):
self.world_size = world_size
self.num_samples = num_samples
total_size = world_size if make_multiple_of is None else world_size * make_multiple_of
self.total_samples = int(np.ceil(num_samples / total_size)) * total_size
self.process_length = self.total_samples // world_size
self._storage = None
self._offsets = None
self.padding_index = padding_index
def add_arrays(self, arrays):
"""
Add :obj:`arrays` to the internal storage, Will initialize the storage to the full size at the first arrays
passed so that if we're bound to get an OOM, it happens at the beginning.
"""
if arrays is None:
return
if self._storage is None:
self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index)
self._offsets = list(range(0, self.total_samples, self.process_length))
else:
storage_shape = _get_first_shape(self._storage)
arrays_shape = _get_first_shape(arrays)
if len(storage_shape) > 1 and storage_shape[1] < arrays_shape[1]:
# If we get new arrays that are too big too fit, we expand the shape fo the storage
self._storage = nested_expand_like(self._storage, arrays_shape[1], padding_index=self.padding_index)
slice_len = self._nested_set_tensors(self._storage, arrays)
for i in range(self.world_size):
self._offsets[i] += slice_len
def _nested_set_tensors(self, storage, arrays):
if isinstance(arrays, (list, tuple)):
for x, y in zip(storage, arrays):
slice_len = self._nested_set_tensors(x, y)
return slice_len
assert (
arrays.shape[0] % self.world_size == 0
), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}."
slice_len = arrays.shape[0] // self.world_size
for i in range(self.world_size):
if len(arrays.shape) == 1:
storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len]
else:
storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[
i * slice_len : (i + 1) * slice_len
]
return slice_len
def finalize(self):
"""
Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras
to get each process a dataset of the same length).
"""
if self._storage is None:
return
if self._offsets[0] != self.process_length:
logger.warn("Not all data has been set. Are you sure you passed all values?")
return nested_truncate(self._storage, self.num_samples)
@dataclass
class LabelSmoother:
"""
Adds label-smoothing on a pre-computed output from a Transformers model.
Args:
epsilon (:obj:`float`, `optional`, defaults to 0.1):
The label smoothing factor.
ignore_index (:obj:`int`, `optional`, defaults to -100):
The index in the labels to ignore when computing the loss.
"""
epsilon: float = 0.1
ignore_index: int = -100
def __call__(self, model_output, labels):
logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0]
log_probs = -torch.nn.functional.log_softmax(logits, dim=-1)
if labels.dim() == log_probs.dim() - 1:
labels = labels.unsqueeze(-1)
padding_mask = labels.eq(self.ignore_index)
# In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask
# will ignore them in any case.
labels.clamp_min_(0)
nll_loss = log_probs.gather(dim=-1, index=labels)
smoothed_loss = log_probs.sum(dim=-1, keepdim=True)
nll_loss.masked_fill_(padding_mask, 0.0)
smoothed_loss.masked_fill_(padding_mask, 0.0)
# Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded):
num_active_elements = padding_mask.numel() - padding_mask.long().sum()
nll_loss = nll_loss.sum() / num_active_elements
smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])
return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
def get_length_grouped_indices(lengths, batch_size, mega_batch_mult=None, generator=None):
"""
Return a list of indices so that each slice of :obj:`batch_size` consecutive indices correspond to elements of
similar lengths. To do this, the indices are:
- randomly permuted
- grouped in mega-batches of size :obj:`mega_batch_mult * batch_size`
- sorted by length in each mega-batch
The result is the concatenation of all mega-batches, with the batch of :obj:`batch_size` containing the element of
maximum length placed first, so that an OOM happens sooner rather than later.
"""
# Default for mega_batch_mult: 50 or the number to get 4 megabatches, whichever is smaller.
if mega_batch_mult is None:
mega_batch_mult = min(len(lengths) // (batch_size * 4), 50)
# Just in case, for tiny datasets
if mega_batch_mult == 0:
mega_batch_mult = 1
# We need to use torch for the random part as a distributed sampler will set the random seed for torch.
indices = torch.randperm(len(lengths), generator=generator)
megabatch_size = mega_batch_mult * batch_size
megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
megabatches = [list(sorted(megabatch, key=lambda i: lengths[i], reverse=True)) for megabatch in megabatches]
# The rest is to get the biggest batch first.
# Since each megabatch is sorted by descending length, the longest element is the first
megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches]
max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item()
# Switch to put the longest element in first position
megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][0], megabatches[0][0]
return sum(megabatches, [])
class LengthGroupedSampler(Sampler):
r"""
Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
keeping a bit of randomness.
"""
def __init__(self, dataset: Dataset, batch_size: int, lengths: Optional[List[int]] = None):
self.dataset = dataset
self.batch_size = batch_size
if lengths is None:
if not isinstance(dataset[0], dict) or "input_ids" not in dataset[0]:
raise ValueError(
"Can only automatically infer lengths for datasets whose items are dictionaries with an "
"'input_ids' key."
)
lengths = [len(feature["input_ids"]) for feature in dataset]
self.lengths = lengths
def __len__(self):
return len(self.lengths)
def __iter__(self):
indices = get_length_grouped_indices(self.lengths, self.batch_size)
return iter(indices)
class DistributedLengthGroupedSampler(DistributedSampler):
r"""
Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same
length while keeping a bit of randomness.
"""
# Copied and adapted from PyTorch DistributedSampler.
def __init__(
self,
dataset: Dataset,
batch_size: int,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
seed: int = 0,
drop_last: bool = False,
lengths: Optional[List[int]] = None,
):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.batch_size = batch_size
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.num_replicas != 0:
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil((len(self.dataset) - self.num_replicas) / self.num_replicas)
else:
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
self.seed = seed
if lengths is None:
if not isinstance(dataset[0], dict) or "input_ids" not in dataset[0]:
raise ValueError(
"Can only automatically infer lengths for datasets whose items are dictionaries with an "
"'input_ids' key."
)
lengths = [len(feature["input_ids"]) for feature in dataset]
self.lengths = lengths
def __iter__(self) -> Iterator:
# Deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g)
if not self.drop_last:
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
else:
# remove tail of data to make it evenly divisible.
indices = indices[: self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
the-stack_0_6637 | # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.api import HasTraits, Instance, Str, Any, Property
class Foo(HasTraits):
s = Str
class ClassWithAny(HasTraits):
x = Property
_x = Any
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = x
class ClassWithInstance(HasTraits):
x = Property
_x = Instance(Foo)
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = x
class ClassWithClassAttribute(HasTraits):
name = "class defined name"
foo = Str
class BazAny(HasTraits):
other = Any
class BarAny(HasTraits):
other = Any
class BazInstance(HasTraits):
# A BarInstance owned by this object.
other = Instance("BarInstance")
# A Foo owned by this object and not referenced by others.
unique = Instance(Foo)
# A Foo owned by this object and referenced by others.
shared = Instance(Foo)
# A Foo not owned by this object, may or may not be shared with other
# objects found via owned references (e.g. other.ref). For the tests,
# ref will always reference a Foo that is not owned by any of the objects
# reachable via owned references, and therefore, that Foo object should
# not be cloned.
ref = Instance(Foo, copy="ref")
class BarInstance(HasTraits):
# used as circular reference back to owning BazInstance
# NOTE: Setting copy to 'ref' will mean that when BarInstance is cloned,
# the 'other' trait will not be copied, and will still point to the
# 'other' attribute of the original BarInstance.
other = Instance("BazInstance", copy="ref")
# A Foo owned by this object and not referenced by others.
unique = Instance(Foo)
# A Foo owned by the 'other' object and referenced by this object.
shared = Instance(Foo)
# A Foo not owned by this object, may or may not be shared with other
# objects found via owned references (e.g. other.ref). For the tests,
# ref will always reference a Foo that is not owned by any of the objects
# reachable via owned references, and therefore, that Foo object should
# not be cloned.
ref = Instance(Foo, copy="ref")
class CloneTestCase(unittest.TestCase):
""" Test cases for traits clone """
def test_any(self):
b = ClassWithAny()
f = Foo()
f.s = "the f"
b.x = f
bc = b.clone_traits(traits="all", copy="deep")
self.assertNotEqual(id(bc.x), id(f), "Foo x not cloned")
def test_instance(self):
b = ClassWithInstance()
f = Foo()
f.s = "the f"
b.x = f
bc = b.clone_traits(traits="all", copy="deep")
self.assertNotEqual(id(bc.x), id(f), "Foo x not cloned")
def test_class_attribute_missing(self):
""" This test demonstrates a problem with Traits objects with class
attributes. A change to the value of a class attribute via one
instance causes the attribute to be removed from other instances.
AttributeError: 'ClassWithClassAttribute' object has no attribute
'name'
"""
s = "class defined name"
c = ClassWithClassAttribute()
self.assertEqual(s, c.name)
c2 = ClassWithClassAttribute()
self.assertEqual(s, c.name)
self.assertEqual(s, c2.name)
s2 = "name class attribute changed via clone"
c2.name = s2
self.assertEqual(s2, c2.name)
# this is failing with AttributeError: 'ClassWithClassAttribute'
# object has no attribute 'name'
self.assertEqual(s, c.name)
def test_Any_circular_references(self):
# Demonstrates that Any traits default to copy='ref'
bar = BarAny()
baz = BazAny()
bar.other = baz
baz.other = bar
bar_copy = bar.clone_traits()
self.assertIsNot(bar_copy, bar)
self.assertIs(bar_copy.other, baz)
self.assertIs(bar_copy.other.other, bar)
def test_Any_circular_references_deep(self):
# Demonstrates that Any traits can be forced to deep copy.
bar = BarAny()
baz = BazAny()
bar.other = baz
baz.other = bar
bar_copy = bar.clone_traits(copy="deep")
self.assertIsNot(bar_copy, bar)
self.assertIsNot(bar_copy.other, baz)
self.assertIsNot(bar_copy.other.other, bar)
self.assertIs(bar_copy.other.other, bar_copy)
def test_Instance_circular_references(self):
ref = Foo(s="ref")
bar_unique = Foo(s="bar.foo")
shared = Foo(s="shared")
baz_unique = Foo(s="baz.unique")
baz = BazInstance()
baz.unique = baz_unique
baz.shared = shared
baz.ref = ref
bar = BarInstance()
bar.unique = bar_unique
bar.shared = shared
bar.ref = ref
bar.other = baz
baz.other = bar
baz_copy = baz.clone_traits()
# Check Baz and Baz attributes....
self.assertIsNot(baz_copy, baz)
self.assertIsNot(baz_copy.other, bar)
self.assertIsNot(baz_copy.unique, baz.unique)
self.assertIsNot(baz_copy.shared, baz.shared)
self.assertIs(baz_copy.ref, ref)
# Check Bar and Bar attributes....
bar_copy = baz_copy.other
# Check the Bar owned object
self.assertIsNot(bar_copy.unique, bar.unique)
# Check the Bar reference to an object 'outside' the cloned graph.
self.assertIs(bar_copy.ref, ref)
# Check references to objects that where cloned, they should reference
# the new clones not the original objects, except when copy is set
# to 'ref' (as in the case of the 'other' trait).
# When copy is set to ref, the trait does not get cloned. Therefore,
# baz_copy.other.other is baz (and not baz_copy).
self.assertIsNot(bar_copy.other, baz_copy)
self.assertIs(bar_copy.other, baz)
# 'shared' does not have copy set to 'ref', and so bar_copy.shared
# should reference the new clone.
# should reference the new clones
self.assertIsNot(bar_copy.shared, baz.shared)
self.assertIs(bar_copy.shared, baz_copy.shared)
def test_Instance_circular_references_deep(self):
ref = Foo(s="ref")
bar_unique = Foo(s="bar.foo")
shared = Foo(s="shared")
baz_unique = Foo(s="baz.unique")
baz = BazInstance()
baz.unique = baz_unique
baz.shared = shared
baz.ref = ref
bar = BarInstance()
bar.unique = bar_unique
bar.shared = shared
bar.ref = ref
bar.other = baz
baz.other = bar
baz_copy = baz.clone_traits(copy="deep")
# Check Baz and Baz attributes....
self.assertIsNot(baz_copy, baz)
self.assertIsNot(baz_copy.other, bar)
self.assertIsNot(baz_copy.unique, baz.unique)
self.assertIsNot(baz_copy.shared, baz.shared)
# baz_copy.ref is checked below with bar_copy.ref.
# Check Bar and Bar attributes....
bar_copy = baz_copy.other
# Check the Bar owned object
self.assertIsNot(bar_copy.unique, bar.unique)
# Since the two original 'ref' links were to a shared object,
# the cloned links should be to a shared object. Also, the shared
# object should be the original 'ref' object, since copy was set to
# 'ref'.
self.assertIs(baz_copy.ref, bar_copy.ref)
self.assertIs(bar_copy.ref, ref)
# Check references to objects that where cloned, they should reference
# the new clones not the original objects, except when copy is set
# to 'ref' (as in the case of the 'other' trait). That is, the 'deep'
# flag on clone_traits should not override the 'copy' metadata on
# the trait.
self.assertIsNot(bar_copy.other, baz_copy)
self.assertIs(bar_copy.other, baz)
# 'shared' does not have copy set to 'ref', and so bar_copy.shared
# should reference the new clone.
self.assertIsNot(bar_copy.shared, baz.shared)
self.assertIs(bar_copy.shared, baz_copy.shared)
|
the-stack_0_6638 | # coding: utf-8
from django.conf.urls import include, url
from customers.cbv_base.CreateView import CreateViewCustom
from customers.cbv_base.DeleteView import DeleteViewCustom
from customers.cbv_base.UpdateView import UpdateViewCustom
from customers.cbv_base.ListView import ListViewCustomOrderBy
from customers.cbv_base.DetailView import DetailViewCustom
from .models import Task
from .cbv import TaskCreate
from .forms import TaskForm
urlpatterns = [
url(r'^(list)?$',
ListViewCustomOrderBy.as_view(
model = Task,
cbv_order_by = "created",
url_delete_name = "tasks:delete",
url_update_name = "tasks:update",
url_create_name = "tasks:create",
url_list_name = "tasks:list",
url_detail_name = "tasks:detail",
template_name = "cbv/ListViewCustom.html",
),
name = 'list'
),
url(r'^create$',
TaskCreate.as_view(
model=Task,
success_url = "tasks:list",
url_name = "tasks:create",
template_name = "cbv/CreateViewCustom.html",
form_class = TaskForm,
),
name='create'
),
url(r'^update-(?P<pk>\d+)$',
UpdateViewCustom.as_view(
model=Task,
success_url="tasks:list",
url_name="tasks:update",
template_name="cbv/UpdateViewCustom.html",
form_class = TaskForm,
),
name='update'
),
url(r'^delete-(?P<pk>\d+)$',
DeleteViewCustom.as_view(
model=Task,
url_name="tasks:delete",
success_url="tasks:list",
template_name="cbv/DeleteViewCustom.html"
),
name='delete'
),
url(r'^detail-(?P<pk>\d+)$',
DetailViewCustom.as_view(
model=Task,
url_name="tasks:detail",
template_name="tasks/detail.html"
),
name='detail'
),
] |
the-stack_0_6640 | import collections
class Solution:
def pacificAtlantic(self, matrix: List[List[int]]) -> List[List[int]]:
m = len(matrix)
if m == 0:
return []
n = len(matrix[0])
if n == 0:
return []
visitedTimes = [[0] * n for _ in range(m)]
def bfs(start):
Q = collections.deque(start)
visited = [[False]*n for _ in range(m)]
for row, col, height in start:
visited[row][col] = True
visitedTimes[row][col] += 1
while Q:
row, col, height = Q.popleft()
for nr, nc in (row-1, col), (row+1, col), (row, col-1), (row, col+1):
if 0 <= nr < m and 0 <= nc < n and not visited[nr][nc] and matrix[nr][nc] >= height:
visited[nr][nc] = True
visitedTimes[nr][nc] += 1
Q.append((nr, nc, matrix[nr][nc]))
bfs([[i, 0, matrix[i][0]] for i in range(m)] + [[0, j, matrix[0][j]] for j in range(1, n)])
bfs([[i, n - 1, matrix[i][n - 1]] for i in range(m)] + [[m - 1, j, matrix[m - 1][j]] for j in range(n - 1)])
return [[row, col] for row in range(m) for col in range(n) if visitedTimes[row][col] == 2]
|
the-stack_0_6641 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
import setuptools
import sys
cext = setuptools.Extension(
"pyignite._cutils",
sources=[
"./cext/cutils.c"
],
include_dirs=["./cext"]
)
if sys.platform == 'win32':
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, ValueError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
def is_a_requirement(line):
return not any([
line.startswith('#'),
line.startswith('-r'),
len(line) == 0,
])
install_requirements = []
with open('requirements/install.txt', 'r', encoding='utf-8') as requirements_file:
for line in requirements_file.readlines():
line = line.strip('\n')
if is_a_requirement(line):
install_requirements.append(line)
with open('README.md', 'r', encoding='utf-8') as readme_file:
long_description = readme_file.read()
with open('pyignite/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
def run_setup(with_binary=True):
if with_binary:
kw = dict(
ext_modules=[cext],
cmdclass=dict(build_ext=ve_build_ext),
)
else:
kw = dict()
setuptools.setup(
name='pyignite',
version=version,
python_requires='>=3.6',
author='The Apache Software Foundation',
author_email='[email protected]',
description='Apache Ignite binary client Python API',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/apache/ignite-python-thin-client',
packages=setuptools.find_packages(),
install_requires=install_requirements,
license="Apache License 2.0",
license_files=('LICENSE', 'NOTICE'),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
'Intended Audience :: Developers',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
**kw
)
try:
run_setup()
except BuildFailed:
BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, "
"speedups are not enabled.")
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Failure information, if any, is above.")
print("I'm retrying the build without the C extension now.")
print('*' * 75)
run_setup(False)
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Plain python installation succeeded.")
print('*' * 75)
|
the-stack_0_6642 | from __future__ import division
import array
import os
import subprocess
from tempfile import TemporaryFile, NamedTemporaryFile
import wave
import sys
import struct
from .logging_utils import log_conversion, log_subprocess_output
from .utils import mediainfo_json, fsdecode
import base64
from collections import namedtuple
try:
from StringIO import StringIO
except:
from io import StringIO
from io import BytesIO
try:
from itertools import izip
except:
izip = zip
from .utils import (
_fd_or_path_or_tempfile,
db_to_float,
ratio_to_db,
get_encoder_name,
get_array_type,
audioop,
)
from .exceptions import (
TooManyMissingFrames,
InvalidDuration,
InvalidID3TagVersion,
InvalidTag,
CouldntDecodeError,
CouldntEncodeError,
MissingAudioParameter,
)
if sys.version_info >= (3, 0):
basestring = str
xrange = range
StringIO = BytesIO
class ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
AUDIO_FILE_EXT_ALIASES = {
"m4a": "mp4",
"wave": "wav",
}
WavSubChunk = namedtuple('WavSubChunk', ['id', 'position', 'size'])
WavData = namedtuple('WavData', ['audio_format', 'channels', 'sample_rate',
'bits_per_sample', 'raw_data'])
def extract_wav_headers(data):
# def search_subchunk(data, subchunk_id):
pos = 12 # The size of the RIFF chunk descriptor
subchunks = []
while pos + 8 <= len(data) and len(subchunks) < 10:
subchunk_id = data[pos:pos + 4]
subchunk_size = struct.unpack_from('<I', data[pos + 4:pos + 8])[0]
subchunks.append(WavSubChunk(subchunk_id, pos, subchunk_size))
if subchunk_id == b'data':
# 'data' is the last subchunk
break
pos += subchunk_size + 8
return subchunks
def read_wav_audio(data, headers=None):
if not headers:
headers = extract_wav_headers(data)
fmt = [x for x in headers if x.id == b'fmt ']
if not fmt or fmt[0].size < 16:
raise CouldntDecodeError("Couldn't find fmt header in wav data")
fmt = fmt[0]
pos = fmt.position + 8
audio_format = struct.unpack_from('<H', data[pos:pos + 2])[0]
if audio_format != 1 and audio_format != 0xFFFE:
raise CouldntDecodeError("Unknown audio format 0x%X in wav data" %
audio_format)
channels = struct.unpack_from('<H', data[pos + 2:pos + 4])[0]
sample_rate = struct.unpack_from('<I', data[pos + 4:pos + 8])[0]
bits_per_sample = struct.unpack_from('<H', data[pos + 14:pos + 16])[0]
data_hdr = headers[-1]
if data_hdr.id != b'data':
raise CouldntDecodeError("Couldn't find data header in wav data")
pos = data_hdr.position + 8
return WavData(audio_format, channels, sample_rate, bits_per_sample,
data[pos:pos + data_hdr.size])
def fix_wav_headers(data):
headers = extract_wav_headers(data)
if not headers or headers[-1].id != b'data':
return
# TODO: Handle huge files in some other way
if len(data) > 2**32:
raise CouldntDecodeError("Unable to process >4GB files")
# Set the file size in the RIFF chunk descriptor
data[4:8] = struct.pack('<I', len(data) - 8)
# Set the data size in the data subchunk
pos = headers[-1].position
data[pos + 4:pos + 8] = struct.pack('<I', len(data) - pos - 8)
class AudioSegment(object):
"""
AudioSegments are *immutable* objects representing segments of audio
that can be manipulated using python code.
AudioSegments are slicable using milliseconds.
for example:
a = AudioSegment.from_mp3(mp3file)
first_second = a[:1000] # get the first second of an mp3
slice = a[5000:10000] # get a slice from 5 to 10 seconds of an mp3
"""
converter = get_encoder_name() # either ffmpeg or avconv
# TODO: remove in 1.0 release
# maintain backwards compatibility for ffmpeg attr (now called converter)
@classproperty
def ffmpeg(cls):
return cls.converter
@ffmpeg.setter
def ffmpeg(cls, val):
cls.converter = val
DEFAULT_CODECS = {
"ogg": "libvorbis"
}
def __init__(self, data=None, *args, **kwargs):
self.sample_width = kwargs.pop("sample_width", None)
self.frame_rate = kwargs.pop("frame_rate", None)
self.channels = kwargs.pop("channels", None)
audio_params = (self.sample_width, self.frame_rate, self.channels)
if isinstance(data, array.array):
try:
data = data.tobytes()
except:
data = data.tostring()
# prevent partial specification of arguments
if any(audio_params) and None in audio_params:
raise MissingAudioParameter("Either all audio parameters or no parameter must be specified")
# all arguments are given
elif self.sample_width is not None:
if len(data) % (self.sample_width * self.channels) != 0:
raise ValueError("data length must be a multiple of '(sample_width * channels)'")
self.frame_width = self.channels * self.sample_width
self._data = data
# keep support for 'metadata' until audio params are used everywhere
elif kwargs.get('metadata', False):
# internal use only
self._data = data
for attr, val in kwargs.pop('metadata').items():
setattr(self, attr, val)
else:
# normal construction
try:
data = data if isinstance(data, (basestring, bytes)) else data.read()
except(OSError):
d = b''
reader = data.read(2 ** 31 - 1)
while reader:
d += reader
reader = data.read(2 ** 31 - 1)
data = d
wav_data = read_wav_audio(data)
if not wav_data:
raise CouldntDecodeError("Couldn't read wav audio from data")
self.channels = wav_data.channels
self.sample_width = wav_data.bits_per_sample // 8
self.frame_rate = wav_data.sample_rate
self.frame_width = self.channels * self.sample_width
self._data = wav_data.raw_data
if self.sample_width == 1:
# convert from unsigned integers in wav
self._data = audioop.bias(self._data, 1, -128)
# Convert 24-bit audio to 32-bit audio.
# (stdlib audioop and array modules do not support 24-bit data)
if self.sample_width == 3:
byte_buffer = BytesIO()
# Workaround for python 2 vs python 3. _data in 2.x are length-1 strings,
# And in 3.x are ints.
pack_fmt = 'BBB' if isinstance(self._data[0], int) else 'ccc'
# This conversion maintains the 24 bit values. The values are
# not scaled up to the 32 bit range. Other conversions could be
# implemented.
i = iter(self._data)
padding = {False: b'\x00', True: b'\xFF'}
for b0, b1, b2 in izip(i, i, i):
byte_buffer.write(padding[b2 > b'\x7f'[0]])
old_bytes = struct.pack(pack_fmt, b0, b1, b2)
byte_buffer.write(old_bytes)
self._data = byte_buffer.getvalue()
self.sample_width = 4
self.frame_width = self.channels * self.sample_width
super(AudioSegment, self).__init__(*args, **kwargs)
@property
def raw_data(self):
"""
public access to the raw audio data as a bytestring
"""
return self._data
def get_array_of_samples(self, array_type_override=None):
"""
returns the raw_data as an array of samples
"""
if array_type_override is None:
array_type_override = self.array_type
return array.array(array_type_override, self._data)
@property
def array_type(self):
return get_array_type(self.sample_width * 8)
def __len__(self):
"""
returns the length of this audio segment in milliseconds
"""
return round(1000 * (self.frame_count() / self.frame_rate))
def __eq__(self, other):
try:
return self._data == other._data
except:
return False
def __hash__(self):
return hash(AudioSegment) ^ hash((self.channels, self.frame_rate, self.sample_width, self._data))
def __ne__(self, other):
return not (self == other)
def __iter__(self):
return (self[i] for i in xrange(len(self)))
def __getitem__(self, millisecond):
if isinstance(millisecond, slice):
if millisecond.step:
return (
self[i:i + millisecond.step]
for i in xrange(*millisecond.indices(len(self)))
)
start = millisecond.start if millisecond.start is not None else 0
end = millisecond.stop if millisecond.stop is not None \
else len(self)
start = min(start, len(self))
end = min(end, len(self))
else:
start = millisecond
end = millisecond + 1
start = self._parse_position(start) * self.frame_width
end = self._parse_position(end) * self.frame_width
data = self._data[start:end]
# ensure the output is as long as the requester is expecting
expected_length = end - start
missing_frames = (expected_length - len(data)) // self.frame_width
if missing_frames:
if missing_frames > self.frame_count(ms=2):
raise TooManyMissingFrames(
"You should never be filling in "
" more than 2 ms with silence here, "
"missing frames: %s" % missing_frames)
silence = audioop.mul(data[:self.frame_width],
self.sample_width, 0)
data += (silence * missing_frames)
return self._spawn(data)
def get_sample_slice(self, start_sample=None, end_sample=None):
"""
Get a section of the audio segment by sample index.
NOTE: Negative indices do *not* address samples backword
from the end of the audio segment like a python list.
This is intentional.
"""
max_val = int(self.frame_count())
def bounded(val, default):
if val is None:
return default
if val < 0:
return 0
if val > max_val:
return max_val
return val
start_i = bounded(start_sample, 0) * self.frame_width
end_i = bounded(end_sample, max_val) * self.frame_width
data = self._data[start_i:end_i]
return self._spawn(data)
def __add__(self, arg):
if isinstance(arg, AudioSegment):
return self.append(arg, crossfade=0)
else:
return self.apply_gain(arg)
def __radd__(self, rarg):
"""
Permit use of sum() builtin with an iterable of AudioSegments
"""
if rarg == 0:
return self
raise TypeError("Gains must be the second addend after the "
"AudioSegment")
def __sub__(self, arg):
if isinstance(arg, AudioSegment):
raise TypeError("AudioSegment objects can't be subtracted from "
"each other")
else:
return self.apply_gain(-arg)
def __mul__(self, arg):
"""
If the argument is an AudioSegment, overlay the multiplied audio
segment.
If it's a number, just use the string multiply operation to repeat the
audio.
The following would return an AudioSegment that contains the
audio of audio_seg eight times
`audio_seg * 8`
"""
if isinstance(arg, AudioSegment):
return self.overlay(arg, position=0, loop=True)
else:
return self._spawn(data=self._data * arg)
def _spawn(self, data, overrides={}):
"""
Creates a new audio segment using the metadata from the current one
and the data passed in. Should be used whenever an AudioSegment is
being returned by an operation that would alters the current one,
since AudioSegment objects are immutable.
"""
# accept lists of data chunks
if isinstance(data, list):
data = b''.join(data)
if isinstance(data, array.array):
try:
data = data.tobytes()
except:
data = data.tostring()
# accept file-like objects
if hasattr(data, 'read'):
if hasattr(data, 'seek'):
data.seek(0)
data = data.read()
metadata = {
'sample_width': self.sample_width,
'frame_rate': self.frame_rate,
'frame_width': self.frame_width,
'channels': self.channels
}
metadata.update(overrides)
return self.__class__(data=data, metadata=metadata)
@classmethod
def _sync(cls, *segs):
channels = max(seg.channels for seg in segs)
frame_rate = max(seg.frame_rate for seg in segs)
sample_width = max(seg.sample_width for seg in segs)
return tuple(
seg.set_channels(channels).set_frame_rate(frame_rate).set_sample_width(sample_width)
for seg in segs
)
def _parse_position(self, val):
if val < 0:
val = len(self) - abs(val)
val = self.frame_count(ms=len(self)) if val == float("inf") else \
self.frame_count(ms=val)
return int(val)
@classmethod
def empty(cls):
return cls(b'', metadata={
"channels": 1,
"sample_width": 1,
"frame_rate": 1,
"frame_width": 1
})
@classmethod
def silent(cls, duration=1000, frame_rate=11025):
"""
Generate a silent audio segment.
duration specified in milliseconds (default duration: 1000ms, default frame_rate: 11025).
"""
frames = int(frame_rate * (duration / 1000.0))
data = b"\0\0" * frames
return cls(data, metadata={"channels": 1,
"sample_width": 2,
"frame_rate": frame_rate,
"frame_width": 2})
@classmethod
def from_mono_audiosegments(cls, *mono_segments):
if not len(mono_segments):
raise ValueError("At least one AudioSegment instance is required")
segs = cls._sync(*mono_segments)
if segs[0].channels != 1:
raise ValueError(
"AudioSegment.from_mono_audiosegments requires all arguments are mono AudioSegment instances")
channels = len(segs)
sample_width = segs[0].sample_width
frame_rate = segs[0].frame_rate
frame_count = max(int(seg.frame_count()) for seg in segs)
data = array.array(
segs[0].array_type,
b'\0' * (frame_count * sample_width * channels)
)
for i, seg in enumerate(segs):
data[i::channels] = seg.get_array_of_samples()
return cls(
data,
channels=channels,
sample_width=sample_width,
frame_rate=frame_rate,
)
@classmethod
def from_file_using_temporary_files(cls, file, format=None, codec=None, parameters=None, **kwargs):
orig_file = file
file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
if format:
format = format.lower()
format = AUDIO_FILE_EXT_ALIASES.get(format, format)
def is_format(f):
f = f.lower()
if format == f:
return True
if isinstance(orig_file, basestring):
return orig_file.lower().endswith(".{0}".format(f))
if isinstance(orig_file, bytes):
return orig_file.lower().endswith((".{0}".format(f)).encode('utf8'))
return False
if is_format("wav"):
try:
obj = cls._from_safe_wav(file)
if close_file:
file.close()
return obj
except:
file.seek(0)
elif is_format("raw") or is_format("pcm"):
sample_width = kwargs['sample_width']
frame_rate = kwargs['frame_rate']
channels = kwargs['channels']
metadata = {
'sample_width': sample_width,
'frame_rate': frame_rate,
'channels': channels,
'frame_width': channels * sample_width
}
obj = cls(data=file.read(), metadata=metadata)
if close_file:
file.close()
return obj
input_file = NamedTemporaryFile(mode='wb', delete=False)
try:
input_file.write(file.read())
except(OSError):
input_file.flush()
input_file.close()
input_file = NamedTemporaryFile(mode='wb', delete=False, buffering=2 ** 31 - 1)
if close_file:
file.close()
close_file = True
file = open(orig_file, buffering=2 ** 13 - 1, mode='rb')
reader = file.read(2 ** 31 - 1)
while reader:
input_file.write(reader)
reader = file.read(2 ** 31 - 1)
input_file.flush()
if close_file:
file.close()
output = NamedTemporaryFile(mode="rb", delete=False)
conversion_command = [cls.converter,
'-y', # always overwrite existing files
]
# If format is not defined
# ffmpeg/avconv will detect it automatically
if format:
conversion_command += ["-f", format]
if codec:
# force audio decoder
conversion_command += ["-acodec", codec]
conversion_command += [
"-i", input_file.name, # input_file options (filename last)
"-vn", # Drop any video streams if there are any
"-f", "wav", # output options (filename last)
output.name
]
if parameters is not None:
# extend arguments with arbitrary set
conversion_command.extend(parameters)
log_conversion(conversion_command)
with open(os.devnull, 'rb') as devnull:
p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000)
p_out, p_err = p.communicate()
log_subprocess_output(p_out)
log_subprocess_output(p_err)
try:
if p.returncode != 0:
raise CouldntDecodeError(
"Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format(
p.returncode, p_err.decode(errors='ignore') ))
obj = cls._from_safe_wav(output)
finally:
input_file.close()
output.close()
os.unlink(input_file.name)
os.unlink(output.name)
return obj
@classmethod
def from_file(cls, file, format=None, codec=None, parameters=None, **kwargs):
orig_file = file
try:
filename = fsdecode(file)
except TypeError:
filename = None
file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
if format:
format = format.lower()
format = AUDIO_FILE_EXT_ALIASES.get(format, format)
def is_format(f):
f = f.lower()
if format == f:
return True
if filename:
return filename.lower().endswith(".{0}".format(f))
return False
if is_format("wav"):
try:
return cls._from_safe_wav(file)
except:
file.seek(0)
elif is_format("raw") or is_format("pcm"):
sample_width = kwargs['sample_width']
frame_rate = kwargs['frame_rate']
channels = kwargs['channels']
metadata = {
'sample_width': sample_width,
'frame_rate': frame_rate,
'channels': channels,
'frame_width': channels * sample_width
}
return cls(data=file.read(), metadata=metadata)
conversion_command = [cls.converter,
'-y', # always overwrite existing files
]
# If format is not defined
# ffmpeg/avconv will detect it automatically
if format:
conversion_command += ["-f", format]
if codec:
# force audio decoder
conversion_command += ["-acodec", codec]
read_ahead_limit = kwargs.get('read_ahead_limit', -1)
if filename:
conversion_command += ["-i", filename]
stdin_parameter = subprocess.DEVNULL
stdin_data = None
else:
if cls.converter == 'ffmpeg':
conversion_command += ["-read_ahead_limit", str(read_ahead_limit),
"-i", "cache:pipe:0"]
else:
conversion_command += ["-i", "-"]
stdin_parameter = subprocess.PIPE
stdin_data = file.read()
if codec:
info = None
else:
info = mediainfo_json(orig_file, read_ahead_limit=read_ahead_limit)
if info:
audio_streams = [x for x in info['streams']
if x['codec_type'] == 'audio']
# This is a workaround for some ffprobe versions that always say
# that mp3/mp4/aac/webm/ogg files contain fltp samples
audio_codec = audio_streams[0].get('codec_name')
if (audio_streams[0].get('sample_fmt') == 'fltp' and
audio_codec in ['mp3', 'mp4', 'aac', 'webm', 'ogg']):
bits_per_sample = 16
else:
bits_per_sample = audio_streams[0]['bits_per_sample']
if bits_per_sample == 8:
acodec = 'pcm_u8'
else:
acodec = 'pcm_s%dle' % bits_per_sample
conversion_command += ["-acodec", acodec]
conversion_command += [
"-vn", # Drop any video streams if there are any
"-f", "wav", # output options (filename last)
"-"
]
if parameters is not None:
# extend arguments with arbitrary set
conversion_command.extend(parameters)
log_conversion(conversion_command)
p = subprocess.Popen(conversion_command, stdin=stdin_parameter,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000)
p_out, p_err = p.communicate(input=stdin_data)
if p.returncode != 0 or len(p_out) == 0:
if close_file:
file.close()
raise CouldntDecodeError(
"Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format(
p.returncode, p_err.decode(errors='ignore') ))
p_out = bytearray(p_out)
fix_wav_headers(p_out)
obj = cls._from_safe_wav(BytesIO(p_out))
if close_file:
file.close()
return obj
@classmethod
def from_mp3(cls, file, parameters=None):
return cls.from_file(file, 'mp3', parameters=parameters)
@classmethod
def from_flv(cls, file, parameters=None):
return cls.from_file(file, 'flv', parameters=parameters)
@classmethod
def from_ogg(cls, file, parameters=None):
return cls.from_file(file, 'ogg', parameters=parameters)
@classmethod
def from_wav(cls, file, parameters=None):
return cls.from_file(file, 'wav', parameters=parameters)
@classmethod
def from_raw(cls, file, **kwargs):
return cls.from_file(file, 'raw', sample_width=kwargs['sample_width'], frame_rate=kwargs['frame_rate'],
channels=kwargs['channels'])
@classmethod
def _from_safe_wav(cls, file):
file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
file.seek(0)
obj = cls(data=file)
if close_file:
file.close()
return obj
def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4',
cover=None):
"""
Export an AudioSegment to a file with given options
out_f (string):
Path to destination audio file. Also accepts os.PathLike objects on
python >= 3.6
format (string)
Format for destination audio file.
('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
codec (string)
Codec used to encode the destination file.
bitrate (string)
Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
Each codec accepts different bitrate arguments so take a look at the
ffmpeg documentation for details (bitrate usually shown as -b, -ba or
-a:b).
parameters (list of strings)
Aditional ffmpeg/avconv parameters
tags (dict)
Set metadata information to destination files
usually used as tags. ({title='Song Title', artist='Song Artist'})
id3v2_version (string)
Set ID3v2 version for tags. (default: '4')
cover (file)
Set cover for audio file from image file. (png or jpg)
"""
id3v2_allowed_versions = ['3', '4']
if format == "raw" and (codec is not None or parameters is not None):
raise AttributeError(
'Can not invoke ffmpeg when export format is "raw"; '
'specify an ffmpeg raw format like format="s16le" instead '
'or call export(format="raw") with no codec or parameters')
out_f, _ = _fd_or_path_or_tempfile(out_f, 'wb+')
out_f.seek(0)
if format == "raw":
out_f.write(self._data)
out_f.seek(0)
return out_f
# wav with no ffmpeg parameters can just be written directly to out_f
easy_wav = format == "wav" and codec is None and parameters is None
if easy_wav:
data = out_f
else:
data = NamedTemporaryFile(mode="wb", delete=False)
pcm_for_wav = self._data
if self.sample_width == 1:
# convert to unsigned integers for wav
pcm_for_wav = audioop.bias(self._data, 1, 128)
wave_data = wave.open(data, 'wb')
wave_data.setnchannels(self.channels)
wave_data.setsampwidth(self.sample_width)
wave_data.setframerate(self.frame_rate)
# For some reason packing the wave header struct with
# a float in python 2 doesn't throw an exception
wave_data.setnframes(int(self.frame_count()))
wave_data.writeframesraw(pcm_for_wav)
wave_data.close()
# for easy wav files, we're done (wav data is written directly to out_f)
if easy_wav:
return out_f
output = NamedTemporaryFile(mode="w+b", delete=False)
# build converter command to export
conversion_command = [
self.converter,
'-y', # always overwrite existing files
"-f", "wav", "-i", data.name, # input options (filename last)
]
if codec is None:
codec = self.DEFAULT_CODECS.get(format, None)
if cover is not None:
if cover.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff')) and format == "mp3":
conversion_command.extend(["-i", cover, "-map", "0", "-map", "1", "-c:v", "mjpeg"])
else:
raise AttributeError(
"Currently cover images are only supported by MP3 files. The allowed image formats are: .tif, .jpg, .bmp, .jpeg and .png.")
if codec is not None:
# force audio encoder
conversion_command.extend(["-acodec", codec])
if bitrate is not None:
conversion_command.extend(["-b:a", bitrate])
if parameters is not None:
# extend arguments with arbitrary set
conversion_command.extend(parameters)
if tags is not None:
if not isinstance(tags, dict):
raise InvalidTag("Tags must be a dictionary.")
else:
# Extend converter command with tags
# print(tags)
for key, value in tags.items():
conversion_command.extend(
['-metadata', '{0}={1}'.format(key, value)])
if format == 'mp3':
# set id3v2 tag version
if id3v2_version not in id3v2_allowed_versions:
raise InvalidID3TagVersion(
"id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions)
conversion_command.extend([
"-id3v2_version", id3v2_version
])
if sys.platform == 'darwin' and codec == 'mp3':
conversion_command.extend(["-write_xing", "0"])
conversion_command.extend([
"-f", format, output.name, # output options (filename last)
])
log_conversion(conversion_command)
# read stdin / write stdout
with open(os.devnull, 'rb') as devnull:
p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000)
p_out, p_err = p.communicate()
log_subprocess_output(p_out)
log_subprocess_output(p_err)
if p.returncode != 0:
raise CouldntEncodeError(
"Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}\n\nOutput from ffmpeg/avlib:\n\n{2}".format(
p.returncode, conversion_command, p_err.decode(errors='ignore') ))
output.seek(0)
out_f.write(output.read())
data.close()
output.close()
os.unlink(data.name)
os.unlink(output.name)
out_f.seek(0)
return out_f
def get_frame(self, index):
frame_start = index * self.frame_width
frame_end = frame_start + self.frame_width
return self._data[frame_start:frame_end]
def frame_count(self, ms=None):
"""
returns the number of frames for the given number of milliseconds, or
if not specified, the number of frames in the whole AudioSegment
"""
if ms is not None:
return ms * (self.frame_rate / 1000.0)
else:
return float(len(self._data) // self.frame_width)
def set_sample_width(self, sample_width):
if sample_width == self.sample_width:
return self
frame_width = self.channels * sample_width
return self._spawn(
audioop.lin2lin(self._data, self.sample_width, sample_width),
overrides={'sample_width': sample_width, 'frame_width': frame_width}
)
def set_frame_rate(self, frame_rate):
if frame_rate == self.frame_rate:
return self
if self._data:
converted, _ = audioop.ratecv(self._data, self.sample_width,
self.channels, self.frame_rate,
frame_rate, None)
else:
converted = self._data
return self._spawn(data=converted,
overrides={'frame_rate': frame_rate})
def set_channels(self, channels):
if channels == self.channels:
return self
if channels == 2 and self.channels == 1:
fn = audioop.tostereo
frame_width = self.frame_width * 2
fac = 1
converted = fn(self._data, self.sample_width, fac, fac)
elif channels == 1 and self.channels == 2:
fn = audioop.tomono
frame_width = self.frame_width // 2
fac = 0.5
converted = fn(self._data, self.sample_width, fac, fac)
elif channels == 1:
channels_data = [seg.get_array_of_samples() for seg in self.split_to_mono()]
frame_count = int(self.frame_count())
converted = array.array(
channels_data[0].typecode,
b'\0' * (frame_count * self.sample_width)
)
for raw_channel_data in channels_data:
for i in range(frame_count):
converted[i] += raw_channel_data[i] // self.channels
frame_width = self.frame_width // self.channels
elif self.channels == 1:
dup_channels = [self for iChannel in range(channels)]
return AudioSegment.from_mono_audiosegments(*dup_channels)
else:
raise ValueError(
"AudioSegment.set_channels only supports mono-to-multi channel and multi-to-mono channel conversion")
return self._spawn(data=converted,
overrides={
'channels': channels,
'frame_width': frame_width})
def split_to_mono(self):
if self.channels == 1:
return [self]
samples = self.get_array_of_samples()
mono_channels = []
for i in range(self.channels):
samples_for_current_channel = samples[i::self.channels]
try:
mono_data = samples_for_current_channel.tobytes()
except AttributeError:
mono_data = samples_for_current_channel.tostring()
mono_channels.append(
self._spawn(mono_data, overrides={"channels": 1, "frame_width": self.sample_width})
)
return mono_channels
@property
def rms(self):
return audioop.rms(self._data, self.sample_width)
@property
def dBFS(self):
rms = self.rms
if not rms:
return -float("infinity")
return ratio_to_db(self.rms / self.max_possible_amplitude)
@property
def max(self):
return audioop.max(self._data, self.sample_width)
@property
def max_possible_amplitude(self):
bits = self.sample_width * 8
max_possible_val = (2 ** bits)
# since half is above 0 and half is below the max amplitude is divided
return max_possible_val / 2
@property
def max_dBFS(self):
return ratio_to_db(self.max, self.max_possible_amplitude)
@property
def duration_seconds(self):
return self.frame_rate and self.frame_count() / self.frame_rate or 0.0
def get_dc_offset(self, channel=1):
"""
Returns a value between -1.0 and 1.0 representing the DC offset of a
channel (1 for left, 2 for right).
"""
if not 1 <= channel <= 2:
raise ValueError("channel value must be 1 (left) or 2 (right)")
if self.channels == 1:
data = self._data
elif channel == 1:
data = audioop.tomono(self._data, self.sample_width, 1, 0)
else:
data = audioop.tomono(self._data, self.sample_width, 0, 1)
return float(audioop.avg(data, self.sample_width)) / self.max_possible_amplitude
def remove_dc_offset(self, channel=None, offset=None):
"""
Removes DC offset of given channel. Calculates offset if it's not given.
Offset values must be in range -1.0 to 1.0. If channel is None, removes
DC offset from all available channels.
"""
if channel and not 1 <= channel <= 2:
raise ValueError("channel value must be None, 1 (left) or 2 (right)")
if offset and not -1.0 <= offset <= 1.0:
raise ValueError("offset value must be in range -1.0 to 1.0")
if offset:
offset = int(round(offset * self.max_possible_amplitude))
def remove_data_dc(data, off):
if not off:
off = audioop.avg(data, self.sample_width)
return audioop.bias(data, self.sample_width, -off)
if self.channels == 1:
return self._spawn(data=remove_data_dc(self._data, offset))
left_channel = audioop.tomono(self._data, self.sample_width, 1, 0)
right_channel = audioop.tomono(self._data, self.sample_width, 0, 1)
if not channel or channel == 1:
left_channel = remove_data_dc(left_channel, offset)
if not channel or channel == 2:
right_channel = remove_data_dc(right_channel, offset)
left_channel = audioop.tostereo(left_channel, self.sample_width, 1, 0)
right_channel = audioop.tostereo(right_channel, self.sample_width, 0, 1)
return self._spawn(data=audioop.add(left_channel, right_channel,
self.sample_width))
def apply_gain(self, volume_change):
return self._spawn(data=audioop.mul(self._data, self.sample_width,
db_to_float(float(volume_change))))
def overlay(self, seg, position=0, loop=False, times=None, gain_during_overlay=None):
"""
Overlay the provided segment on to this segment starting at the
specificed position and using the specfied looping beahvior.
seg (AudioSegment):
The audio segment to overlay on to this one.
position (optional int):
The position to start overlaying the provided segment in to this
one.
loop (optional bool):
Loop seg as many times as necessary to match this segment's length.
Overrides loops param.
times (optional int):
Loop seg the specified number of times or until it matches this
segment's length. 1 means once, 2 means twice, ... 0 would make the
call a no-op
gain_during_overlay (optional int):
Changes this segment's volume by the specified amount during the
duration of time that seg is overlaid on top of it. When negative,
this has the effect of 'ducking' the audio under the overlay.
"""
if loop:
# match loop=True's behavior with new times (count) mechinism.
times = -1
elif times is None:
# no times specified, just once through
times = 1
elif times == 0:
# it's a no-op, make a copy since we never mutate
return self._spawn(self._data)
output = StringIO()
seg1, seg2 = AudioSegment._sync(self, seg)
sample_width = seg1.sample_width
spawn = seg1._spawn
output.write(seg1[:position]._data)
# drop down to the raw data
seg1 = seg1[position:]._data
seg2 = seg2._data
pos = 0
seg1_len = len(seg1)
seg2_len = len(seg2)
while times:
remaining = max(0, seg1_len - pos)
if seg2_len >= remaining:
seg2 = seg2[:remaining]
seg2_len = remaining
# we've hit the end, we're done looping (if we were) and this
# is our last go-around
times = 1
if gain_during_overlay:
seg1_overlaid = seg1[pos:pos + seg2_len]
seg1_adjusted_gain = audioop.mul(seg1_overlaid, self.sample_width,
db_to_float(float(gain_during_overlay)))
output.write(audioop.add(seg1_adjusted_gain, seg2, sample_width))
else:
output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
sample_width))
pos += seg2_len
# dec times to break our while loop (eventually)
times -= 1
output.write(seg1[pos:])
return spawn(data=output)
def append(self, seg, crossfade=100):
seg1, seg2 = AudioSegment._sync(self, seg)
if not crossfade:
return seg1._spawn(seg1._data + seg2._data)
elif crossfade > len(self):
raise ValueError("Crossfade is longer than the original AudioSegment ({}ms > {}ms)".format(
crossfade, len(self)
))
elif crossfade > len(seg):
raise ValueError("Crossfade is longer than the appended AudioSegment ({}ms > {}ms)".format(
crossfade, len(seg)
))
xf = seg1[-crossfade:].fade(to_gain=-120, start=0, end=float('inf'))
xf *= seg2[:crossfade].fade(from_gain=-120, start=0, end=float('inf'))
output = TemporaryFile()
output.write(seg1[:-crossfade]._data)
output.write(xf._data)
output.write(seg2[crossfade:]._data)
output.seek(0)
obj = seg1._spawn(data=output)
output.close()
return obj
def fade(self, to_gain=0, from_gain=0, start=None, end=None,
duration=None):
"""
Fade the volume of this audio segment.
to_gain (float):
resulting volume_change in db
start (int):
default = beginning of the segment
when in this segment to start fading in milliseconds
end (int):
default = end of the segment
when in this segment to start fading in milliseconds
duration (int):
default = until the end of the audio segment
the duration of the fade
"""
if None not in [duration, end, start]:
raise TypeError('Only two of the three arguments, "start", '
'"end", and "duration" may be specified')
# no fade == the same audio
if to_gain == 0 and from_gain == 0:
return self
start = min(len(self), start) if start is not None else None
end = min(len(self), end) if end is not None else None
if start is not None and start < 0:
start += len(self)
if end is not None and end < 0:
end += len(self)
if duration is not None and duration < 0:
raise InvalidDuration("duration must be a positive integer")
if duration:
if start is not None:
end = start + duration
elif end is not None:
start = end - duration
else:
duration = end - start
from_power = db_to_float(from_gain)
output = []
# original data - up until the crossfade portion, as is
before_fade = self[:start]._data
if from_gain != 0:
before_fade = audioop.mul(before_fade,
self.sample_width,
from_power)
output.append(before_fade)
gain_delta = db_to_float(to_gain) - from_power
# fades longer than 100ms can use coarse fading (one gain step per ms),
# shorter fades will have audible clicks so they use precise fading
# (one gain step per sample)
if duration > 100:
scale_step = gain_delta / duration
for i in range(duration):
volume_change = from_power + (scale_step * i)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
self.sample_width,
volume_change)
output.append(chunk)
else:
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
def fade_out(self, duration):
return self.fade(to_gain=-120, duration=duration, end=float('inf'))
def fade_in(self, duration):
return self.fade(from_gain=-120, duration=duration, start=0)
def reverse(self):
return self._spawn(
data=audioop.reverse(self._data, self.sample_width)
)
def _repr_html_(self):
src = """
<audio controls>
<source src="data:audio/mpeg;base64,{base64}" type="audio/mpeg"/>
Your browser does not support the audio element.
</audio>
"""
fh = self.export()
data = base64.b64encode(fh.read()).decode('ascii')
return src.format(base64=data)
from . import effects
|
the-stack_0_6646 | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.acquisition.acquisition import (
AcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.utils.testing import BotorchTestCase
class TestAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
AcquisitionFunction()
class TestOneShotAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
OneShotAcquisitionFunction()
|
the-stack_0_6647 | load("@rules_pkg//:pkg.bzl", "pkg_zip")
def copy_file(name, src, out):
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = "cp $< $@"
)
def pkg_asset(name, srcs = [], **kwargs):
"""Package MediaPipe assets
This task renames asset files so that they can be added to an AssetBundle (e.g. x.tflte -> x.bytes) and zip them.
Args:
name: the name of the output zip file
srcs: files to be packaged
"""
rename_target = "normalize_%s_exts" % name
_normalize_exts(name = rename_target, srcs = srcs)
pkg_zip(
name = name,
srcs = [":" + rename_target],
**kwargs,
)
def _normalize_exts_impl(ctx):
output_files = []
for src in ctx.files.srcs:
ext = "bytes" if src.extension in ctx.attr.bytes_exts else ("txt" if src.extension in ctx.attr.txt_exts else src.extension)
if ext == src.extension:
output_files.append(src)
else:
dest = ctx.actions.declare_file(src.path[:-1 * len(src.extension)] + ext)
ctx.actions.run_shell(
inputs = [src],
outputs = [dest],
arguments = [src.path, dest.path],
command = "test $1 != $2 && cp $1 $2",
progress_message = "Copying {} to {}...".format(src.path, dest.path),
)
output_files.append(dest)
return [
DefaultInfo(files = depset(output_files)),
]
_normalize_exts = rule(
implementation = _normalize_exts_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
"bytes_exts": attr.string_list(default = ["jpg", "png", "tflite", "uuu"]),
"txt_exts": attr.string_list(default = ["pbtxt"]),
},
)
|
the-stack_0_6648 | from celery import shared_task
from checkerapp.models import AlertPlugin
from checkerapp.models import AlertSent
from django.db import models
from django.db.models import Q
from .telegrambot import send_alert
class TelegramAlertPlugin(AlertPlugin):
url = "accounts:telegram_plugin:telegram_pluginview"
telegram_id = models.CharField(max_length=50)
@shared_task
def send_alert_task(task_obj):
check_obj = task_obj["base_check_obj"]
message = str(check_obj.content_object) + " is down"
users = list(check_obj.service_set.first().users.all())
for user in users:
telegram_user_obj = TelegramAlertPlugin.objects.filter(
Q(alert_receiver=user) & Q(active_status=True)
).first()
if not telegram_user_obj:
print("Inactive")
break
send_alert(message, telegram_user_obj)
AlertSent.objects.create(check_obj=check_obj)
return "Success !"
|
the-stack_0_6650 | # Copyright (C) 2013 by Ben Morris ([email protected])
# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox
# and Bio.Phylo.Newick, copyright 2009 by Eric Talevich.
# All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""I/O function wrappers for the RDF/CDAO file format.
This is an RDF format that conforms to the Comparative Data Analysis Ontology (CDAO).
See: http://evolutionaryontology.org/cdao
This module requires the librdf Python bindings (http://www.librdf.org)
The CDAOIO.Parser, in addition to parsing text files, can also parse directly
from a triple store that implements the Redland storage interface; similarly,
the CDAOIO.Writer can store triples in a triple store instead of serializing
them to a file.
"""
import os
from Bio._py3k import StringIO
from Bio import MissingPythonDependencyError
from Bio.Phylo import CDAO
from ._cdao_owl import cdao_namespaces, resolve_uri
# import of cdao_elements from ._cdao_owl removed in Biopython 1.74
try:
import rdflib
rdfver = rdflib.__version__
if rdfver[0] in ["1", "2"] or (rdfver in ["3.0.0", "3.1.0", "3.2.0"]):
raise MissingPythonDependencyError(
"Support for CDAO tree format requires RDFlib v3.2.1 or later."
)
except ImportError:
raise MissingPythonDependencyError("Support for CDAO tree format requires RDFlib.")
RDF_NAMESPACES = {
"owl": "http://www.w3.org/2002/07/owl#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
}
RDF_NAMESPACES.update(cdao_namespaces)
# pad node ids with zeroes until they're at least this length
ZEROES = 8
def qUri(x):
"""Resolve URI for librdf."""
return resolve_uri(x, namespaces=RDF_NAMESPACES)
def format_label(x):
"""Format label for librdf."""
return x.replace("_", " ")
# ---------------------------------------------------------
# Public API
def parse(handle, **kwargs):
"""Iterate over the trees in a CDAO file handle.
:returns: generator of Bio.Phylo.CDAO.Tree objects.
"""
return Parser(handle).parse(**kwargs)
def write(trees, handle, plain=False, **kwargs):
"""Write a trees in CDAO format to the given file handle.
:returns: number of trees written.
"""
return Writer(trees).write(handle, plain=plain, **kwargs)
# ---------------------------------------------------------
# Input
class Parser(object):
"""Parse a CDAO tree given a file handle."""
def __init__(self, handle=None):
"""Initialize CDAO tree parser."""
self.handle = handle
self.graph = None
self.node_info = None
self.children = {}
self.rooted = False
@classmethod
def from_string(cls, treetext):
"""Instantiate the class from the given string."""
handle = StringIO(treetext)
return cls(handle)
def parse(self, **kwargs):
"""Parse the text stream this object was initialized with."""
self.parse_handle_to_graph(**kwargs)
return self.parse_graph()
def parse_handle_to_graph(
self, rooted=False, parse_format="turtle", context=None, **kwargs
):
"""Parse self.handle into RDF model self.model."""
if self.graph is None:
self.graph = rdflib.Graph()
graph = self.graph
for k, v in RDF_NAMESPACES.items():
graph.bind(k, v)
self.rooted = rooted
if "base_uri" in kwargs:
base_uri = kwargs["base_uri"]
else:
# Windows style slashes cannot be used in an RDF URI
base_uri = "file://" + os.path.abspath(self.handle.name).replace("\\", "/")
graph.parse(file=self.handle, publicID=base_uri, format=parse_format)
return self.parse_graph(graph, context=context)
def parse_graph(self, graph=None, context=None):
"""Iterate over RDF model yielding CDAO.Tree instances."""
if graph is None:
graph = self.graph
# look up branch lengths/TUs for all nodes
self.get_node_info(graph, context=context)
for root_node in self.tree_roots:
clade = self.parse_children(root_node)
yield CDAO.Tree(root=clade, rooted=self.rooted)
def new_clade(self, node):
"""Return a CDAO.Clade object for a given named node."""
result = self.node_info[node]
kwargs = {}
if "branch_length" in result:
kwargs["branch_length"] = result["branch_length"]
if "label" in result:
kwargs["name"] = result["label"].replace("_", " ")
if "confidence" in result:
kwargs["confidence"] = result["confidence"]
clade = CDAO.Clade(**kwargs)
return clade
def get_node_info(self, graph, context=None):
"""Create a dictionary containing information about all nodes in the tree."""
self.node_info = {}
self.obj_info = {}
self.children = {}
self.nodes = set()
self.tree_roots = set()
assignments = {
qUri("cdao:has_Parent"): "parent",
qUri("cdao:belongs_to_Edge_as_Child"): "edge",
qUri("cdao:has_Annotation"): "annotation",
qUri("cdao:has_Value"): "value",
qUri("cdao:represents_TU"): "tu",
qUri("rdfs:label"): "label",
qUri("cdao:has_Support_Value"): "confidence",
}
for s, v, o in graph:
# process each RDF triple in the graph sequentially
s, v, o = str(s), str(v), str(o)
if s not in self.obj_info:
self.obj_info[s] = {}
this = self.obj_info[s]
try:
# if the predicate is one we care about, store information for
# later
this[assignments[v]] = o
except KeyError:
pass
if v == qUri("rdf:type"):
if o in (qUri("cdao:AncestralNode"), qUri("cdao:TerminalNode")):
# this is a tree node; store it in set of all nodes
self.nodes.add(s)
if v == qUri("cdao:has_Root"):
# this is a tree; store its root in set of all tree roots
self.tree_roots.add(o)
for node in self.nodes:
# for each node, look up all information needed to create a
# CDAO.Clade
self.node_info[node] = {}
node_info = self.node_info[node]
obj = self.obj_info[node]
if "edge" in obj:
# if this object points to an edge, we need a branch length from
# the annotation on that edge
edge = self.obj_info[obj["edge"]]
if "annotation" in edge:
annotation = self.obj_info[edge["annotation"]]
if "value" in annotation:
node_info["branch_length"] = float(annotation["value"])
if "tu" in obj:
# if this object points to a TU, we need the label of that TU
tu = self.obj_info[obj["tu"]]
if "label" in tu:
node_info["label"] = tu["label"]
if "parent" in obj:
# store this node as a child of its parent, if it has one,
# so that the tree can be traversed from parent to children
parent = obj["parent"]
if parent not in self.children:
self.children[parent] = []
self.children[parent].append(node)
def parse_children(self, node):
"""Traverse the tree to create a nested clade structure.
Return a CDAO.Clade, and calls itself recursively for each child,
traversing the entire tree and creating a nested structure of CDAO.Clade
objects.
"""
clade = self.new_clade(node)
children = self.children[node] if node in self.children else []
clade.clades = [self.parse_children(child_node) for child_node in children]
return clade
# ---------------------------------------------------------
# Output
class Writer(object):
"""Based on the writer in Bio.Nexus.Trees (str, to_string)."""
prefixes = RDF_NAMESPACES
def __init__(self, trees):
"""Initialize parameters for writing a CDAO tree."""
self.trees = trees
self.node_counter = 0
self.edge_counter = 0
self.tu_counter = 0
self.tree_counter = 0
def write(
self,
handle,
tree_uri="",
record_complete_ancestry=False,
rooted=False,
**kwargs
):
"""Write this instance's trees to a file handle."""
self.rooted = rooted
self.record_complete_ancestry = record_complete_ancestry
if tree_uri and not tree_uri.endswith("/"):
tree_uri += "/"
trees = self.trees
if tree_uri:
handle.write("@base <%s>\n" % tree_uri)
for k, v in self.prefixes.items():
handle.write("@prefix %s: <%s> .\n" % (k, v))
handle.write("<%s> a owl:Ontology .\n" % self.prefixes["cdao"])
for tree in trees:
self.tree_counter += 1
self.tree_uri = "tree%s"
first_clade = tree.clade
statements = self.process_clade(first_clade, root=tree)
for stmt in statements:
self.add_stmt_to_handle(handle, stmt)
def add_stmt_to_handle(self, handle, stmt):
"""Add URI prefix to handle."""
# apply URI prefixes
stmt_strings = []
for n, part in enumerate(stmt):
if isinstance(part, rdflib.URIRef):
node_uri = str(part)
changed = False
for prefix, uri in self.prefixes.items():
if node_uri.startswith(uri):
node_uri = node_uri.replace(uri, "%s:" % prefix, 1)
if node_uri == "rdf:type":
node_uri = "a"
changed = True
if changed or ":" in node_uri:
stmt_strings.append(node_uri)
else:
stmt_strings.append("<%s>" % node_uri)
elif isinstance(part, rdflib.Literal):
stmt_strings.append(part.n3())
else:
stmt_strings.append(str(part))
handle.write("%s .\n" % " ".join(stmt_strings))
def process_clade(self, clade, parent=None, root=False):
"""Recursively generate triples describing a tree of clades."""
self.node_counter += 1
clade.uri = "node%s" % str(self.node_counter).zfill(ZEROES)
if parent:
clade.ancestors = parent.ancestors + [parent.uri]
else:
clade.ancestors = []
def nUri(s):
# nUri = lambda s: rdflib.URIRef(s)
return rdflib.URIRef(s)
def pUri(s):
# pUri = lambda s: rdflib.URIRef(qUri(s))
return rdflib.URIRef(qUri(s))
tree_id = nUri("")
statements = []
if root is not False:
# create a cdao:RootedTree with reference to the tree root
tree_type = (
pUri("cdao:RootedTree") if self.rooted else pUri("cdao:UnrootedTree")
)
statements += [
(tree_id, pUri("rdf:type"), tree_type),
(tree_id, pUri("cdao:has_Root"), nUri(clade.uri)),
]
try:
tree_attributes = root.attributes
except AttributeError:
tree_attributes = []
for predicate, obj in tree_attributes:
statements.append((tree_id, predicate, obj))
if clade.name:
# create TU
self.tu_counter += 1
tu_uri = "tu%s" % str(self.tu_counter).zfill(ZEROES)
statements += [
(nUri(tu_uri), pUri("rdf:type"), pUri("cdao:TU")),
(nUri(clade.uri), pUri("cdao:represents_TU"), nUri(tu_uri)),
(
nUri(tu_uri),
pUri("rdfs:label"),
rdflib.Literal(format_label(clade.name)),
),
]
try:
tu_attributes = clade.tu_attributes
except AttributeError:
tu_attributes = []
for predicate, obj in tu_attributes:
yield (nUri(tu_uri), predicate, obj)
# create this node
node_type = "cdao:TerminalNode" if clade.is_terminal() else "cdao:AncestralNode"
statements += [
(nUri(clade.uri), pUri("rdf:type"), pUri(node_type)),
(nUri(clade.uri), pUri("cdao:belongs_to_Tree"), tree_id),
]
if parent is not None:
# create edge from the parent node to this node
self.edge_counter += 1
edge_uri = "edge%s" % str(self.edge_counter).zfill(ZEROES)
statements += [
(nUri(edge_uri), pUri("rdf:type"), pUri("cdao:DirectedEdge")),
(nUri(edge_uri), pUri("cdao:belongs_to_Tree"), tree_id),
(nUri(edge_uri), pUri("cdao:has_Parent_Node"), nUri(parent.uri)),
(nUri(edge_uri), pUri("cdao:has_Child_Node"), nUri(clade.uri)),
(
nUri(clade.uri),
pUri("cdao:belongs_to_Edge_as_Child"),
nUri(edge_uri),
),
(nUri(clade.uri), pUri("cdao:has_Parent"), nUri(parent.uri)),
(
nUri(parent.uri),
pUri("cdao:belongs_to_Edge_as_Parent"),
nUri(edge_uri),
),
]
if hasattr(clade, "confidence") and clade.confidence is not None:
confidence = rdflib.Literal(
clade.confidence,
datatype="http://www.w3.org/2001/XMLSchema#decimal",
)
statements += [
(nUri(clade.uri), pUri("cdao:has_Support_Value"), confidence)
]
if self.record_complete_ancestry and len(clade.ancestors) > 0:
statements += [
(nUri(clade.uri), pUri("cdao:has_Ancestor"), nUri(ancestor))
for ancestor in clade.ancestors
]
if clade.branch_length is not None:
# add branch length
edge_ann_uri = "edge_annotation%s" % str(self.edge_counter).zfill(
ZEROES
)
branch_length = rdflib.Literal(
clade.branch_length,
datatype=rdflib.URIRef("http://www.w3.org/2001/XMLSchema#decimal"),
)
statements += [
(nUri(edge_ann_uri), pUri("rdf:type"), pUri("cdao:EdgeLength")),
(nUri(edge_uri), pUri("cdao:has_Annotation"), nUri(edge_ann_uri)),
(nUri(edge_ann_uri), pUri("cdao:has_Value"), branch_length),
]
try:
edge_attributes = clade.edge_attributes
except AttributeError:
edge_attributes = []
for predicate, obj in edge_attributes:
yield (nUri(edge_uri), predicate, obj)
for stmt in statements:
yield stmt
try:
clade_attributes = clade.attributes
except AttributeError:
clade_attributes = []
for predicate, obj in clade_attributes:
yield (nUri(clade.uri), predicate, obj)
if not clade.is_terminal():
for new_clade in clade.clades:
for stmt in self.process_clade(new_clade, parent=clade, root=False):
yield stmt
|
the-stack_0_6651 | #!/bin/python
# Simple script, which gathers information about GPUs and puts it into a file to expose it for node-exporter
from __future__ import print_function
from pynvml import *
import sys, traceback
try:
nvmlInit()
try:
driverVersion = nvmlSystemGetDriverVersion()
try:
gpuNum = nvmlDeviceGetCount()
for i in range(gpuNum):
try:
filename = "/etc/node-exporter/gpu_%d.prom" % (i,)
with open(filename,"w") as file:
handle = nvmlDeviceGetHandleByIndex(i)
info = nvmlDeviceGetMemoryInfo(handle)
device_name = nvmlDeviceGetName(handle)
try:
utilization = nvmlDeviceGetUtilizationRates(handle)
file.write ("node_gpu_%d_util{device_name=\"%s\", device_id=\"%d\"} %d\n" %(i,device_name,i,utilization.gpu))
except NVMLError as error:
traceback.print_exc(file=sys.stderr)
print("Failed nvmlDeviceGetUtilizationRates", file=sys.stderr)
file.write ("node_gpu_%d_total_memory{device_name=\"%s\", device_id=\"%d\"} %d\n" %(i,device_name,i,info.total))
file.write ("node_gpu_%d_used_memory{device_name=\"%s\", device_id=\"%d\"} %d\n" %(i,device_name,i,info.used))
file.write ("node_gpu_%d_core_temp{device_name=\"%s\", device_id=\"%d\"} %d\n" %(i,device_name,i,nvmlDeviceGetTemperature(handle, NVML_TEMPERATURE_GPU)))
except NVMLError as error:
traceback.print_exc(file=sys.stderr)
with open("/etc/node-exporter/gpu_issues.prom", "w") as file:
file.write("node_gpu_errors 1\n")
exit(1)
with open("/etc/node-exporter/gpu_issues.prom", "w") as file:
file.write("node_gpu_errors 0\n")
exit(0)
except NVMLError as error:
traceback.print_exc(file=sys.stderr)
with open("/etc/node-exporter/gpu_issues.prom","w") as file:
file.write("node_gpu_errors 1\n")
exit(1)
except NVMLError as error:
traceback.print_exc(file=sys.stderr)
with open("/etc/node-exporter/gpu_issues.prom","w") as file:
file.write("node_gpu_errors 1\n")
exit(1)
except NVMLError as error:
traceback.print_exc(file=sys.stderr)
with open("/etc/node-exporter/gpu_issues.prom","w") as file:
file.write("node_gpu_errors 1\n")
exit(1)
|
the-stack_0_6655 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.core.base_task."""
import functools
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.utils.testing import mock_task
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
mode='eager',
)
class TaskKerasTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_task_with_step_override(self, distribution):
with distribution.scope():
task = mock_task.MockTask()
model = task.build_model()
model = task.compile_model(
model,
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),
metrics=task.build_metrics(),
train_step=task.train_step,
validation_step=task.validation_step)
dataset = task.build_inputs(params=None)
logs = model.fit(dataset, epochs=1, steps_per_epoch=2)
self.assertIn('loss', logs.history)
self.assertIn('acc', logs.history)
# Without specifying metrics through compile.
with distribution.scope():
train_metrics = task.build_metrics(training=True)
val_metrics = task.build_metrics(training=False)
model = task.build_model()
model = task.compile_model(
model,
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),
train_step=functools.partial(task.train_step, metrics=train_metrics),
validation_step=functools.partial(
task.validation_step, metrics=val_metrics))
logs = model.fit(dataset, epochs=1, steps_per_epoch=2)
self.assertIn('loss', logs.history)
self.assertIn('acc', logs.history)
def test_task_with_fit(self):
task = mock_task.MockTask()
model = task.build_model()
model = task.compile_model(
model,
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=task.build_metrics())
dataset = task.build_inputs(params=None)
logs = model.fit(dataset, epochs=1, steps_per_epoch=2)
self.assertIn('loss', logs.history)
self.assertIn('acc', logs.history)
self.assertLen(model.evaluate(dataset, steps=1), 2)
def test_task_invalid_compile(self):
task = mock_task.MockTask()
model = task.build_model()
with self.assertRaises(ValueError):
_ = task.compile_model(
model,
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=task.build_metrics(),
train_step=task.train_step)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_6658 | '''
Takes all pickle files from 1. pre-preprocessing, 2. post-preprocessing,
3. kernelizing, 4. decomposing
and creates single csv file with all data.
'''
import pandas as pd
import networkx as nx
import os, sys, pickle, pprint, shutil, datetime, time
from os.path import dirname,realpath
sys.path.insert(0,dirname(realpath(__file__))[:-10])
#print(sys.path)
import algs.utils_misc as utils_misc
def check_for_missing_data(final_output, kernel_output, kdistinct, wscale, witer, fname):
'''
true_total input:
TF: wecp, ipart, lp - witer=0, wscale=sml,med, kdistinct <= k_distinct_max
LV: wecp, ipart, lp - wscale=sml,med, kdistinct <= k_distinct_max
true_dist input:
TF: ipart, lp - witer <= 4 [0-4], kdistinct <= k_distinct_max
LV: ipart, lp - kdistinct <= k_distinct_max
guess input:
TF: ipart, lp - witer=0, 5 <= kdistinct <= k_distinct_max
LV: ipart, lp - 5 <= kdistinct <= k_distinct_max
'''
k_distinct_max = 11 ######### NOTE
if kdistinct==0:
return
if witer is None:
witer=0
# first check true_total input
if wscale=='sml' or wscale=='med':
if kdistinct <= k_distinct_max:
if witer==0:
wecp_dat = final_output['decomp_data']['bsd_dw']['true_total']
ip_dat = final_output['decomp_data']['bswd_dw_ip']['true_total']
lp_dat = final_output['decomp_data']['bswd_dw_lp']['true_total']
if wecp_dat is None:
print('ERROR: {} (true_total input) missing wecp k={}'.format(fname,
kdistinct))
# second check for true_dist input for weight permutations
if witer <= 5 and kdistinct <= 7:
ip_dat = final_output['decomp_data']['bswd_dw_ip']['true_distinct']
lp_dat = final_output['decomp_data']['bswd_dw_lp']['true_distinct']
if ip_dat is None:
print('ERROR: {} (true_distinct input) missing ip_dat k={}'.format(fname,
kdistinct))
if lp_dat is None:
print('ERROR: {} (true_distinct input) missing lp_dat k={}'.format(fname,
kdistinct))
# third check for true_dist input for weight permutations
if witer==0 and kdistinct <= k_distinct_max:
ip_dat = final_output['decomp_data']['bswd_dw_ip']['true_distinct']
lp_dat = final_output['decomp_data']['bswd_dw_lp']['true_distinct']
if ip_dat is None:
print('ERROR: {} (true_distinct input) missing ip_dat k={}'.format(fname,
kdistinct))
if lp_dat is None:
print('ERROR: {} (true_distinct input) missing lp_dat k={}'.format(fname,
kdistinct))
# fourth check for guess input
if witer==0 and kdistinct >= 5 and kdistinct <= 7:
vals = [-0.6, -0.4, -0.2, 0.2, 0.4, 0.6]
for val in vals:
ip_dat = final_output['decomp_data']['bswd_dw_ip']['guesses']
lp_dat = final_output['decomp_data']['bswd_dw_lp']['guesses']
kern_dat = kernel_output['guess_kernels'][val]
if kern_dat['passed_kernel']:
if ip_dat[val] is None:
print('ERROR: {} (guess {} input) missing ip_dat k={}'.format(fname,
val,
kdistinct))
if lp_dat[val] is None:
print('ERROR: {} (guess {} input) missing lp_dat k={}'.format(fname,
val,
kdistinct))
def print_keys(final_output):
pre_preprocessing = final_output['pre_preprocessing']
post_preprocessing = final_output['post_preprocessing']
kernel_data = final_output['kernel_data']
true_total_kernel = kernel_data['true_total_kernel']
true_distinct_kernel = kernel_data['true_distinct_kernel']
guess_kernels = kernel_data['guess_kernels']
decomp_data = final_output['decomp_data']
bsd_dw = decomp_data['bsd_dw']
bswd_dw_lp = decomp_data['bswd_dw_lp']
bswd_dw_ip = decomp_data['bswd_dw_ip']
print('\nMain keys: ', list(final_output.keys()))
print('pre_preprocessing keys: ', list(pre_preprocessing.keys()))
print('\npost_preprocessing keys: ', list(post_preprocessing.keys()))
print('\nkernel_data keys: ', list(kernel_data.keys()))
if true_total_kernel is not None:
print('\ntrue_total_kernel keys: ', list(true_total_kernel.keys()))
else:
print('\ntrue_total_kernel keys: ', true_total_kernel)
if true_distinct_kernel is not None:
print('\ntrue_distinct_kernel keys: ',
list(true_distinct_kernel.keys()))
else:
print('\ntrue_distinct_kernel keys: ', true_distinct_kernel)
if guess_kernels is not None:
print('\nguess_kernels keys: ', list(guess_kernels.keys()))
else:
print('\nguess_kernels keys: ', guess_kernels.keys())
print('\ndecomp_data keys: ', list(decomp_data.keys()))
print('bsd_dw keys: ', list(bsd_dw.keys()))
print('bswd_dw_lp keys: ', list(bswd_dw_lp.keys()))
print('bswd_dw_ip keys: ', list(bswd_dw_ip.keys()))
def get_data(witer, wscale, fname, datatype, final_output, kernel_output, postproc_output, preproc_output, get_colnames=False):
'''
filename
pre_preprocessing: n-init, m-init, kdistinct-init, ktotal-init,
witer, wscale(sml, med, lrg)
post_preprocessing/pre_kernel: n-postproc, m-postproc,
kdistinct-postproc, ktotal-postproc, time_preproc
- true_total input:
post_kernel/pre_bsd: kinput-1, n-1, m-1, passed_kernel-1, time_kernel-1
wecp: passed_bsd-1, reconstructs-1, found_cliq_frac-1, time_bsd-1
- true_dist input:
post_kernel/pre_bsd: kinput-2, n-2, m-2, passed_kernel-2, time_kernel-2
lp: passed_bsd-2-1, reconstructs-2-1, found_cliq_frac-2-1, time_bsd-2-1
ipart: passed_bsd-2-2, reconstructs-2-2, found_cliq_frac-2-2, time_bsd-2-2
- guess0 input:
post_kernel/pre_bsd: kinput-3, n-3, m-3, passed_kernel-3, time_kernel-3
lp: passed_bsd-3-1, reconstructs-3-1, found_cliq_frac-3-1, time_bsd-3-1
ipart: passed_bsd-3-2, reconstructs-3-2, found_cliq_frac-3-2, time_bsd-3-2
....
- guess5 input:
post_kernel/pre_bsd: kinput-8, n-8, m-8, passed_kernel-8, time_kernel-8
lp: passed_bsd-8-1, reconstructs-8-1, found_cliq_frac-8-1, time_bsd-8-1
ipart: passed_bsd-8-2, reconstructs-8-2, found_cliq_frac-8-2, time_bsd-8-2
'''
colnames = {'filename' : None,
'datatype' : None,
#pre-preprocessing
'n-init' : None,
'm-init' : None,
'kdistinct-init' : None,
'ktotal-init' : None,
'witer' : None,
'wscale' : None,
# post-preprocessing
'n-postproc' : None,
'm-postproc' : None,
'kdistinct-postproc' : None,
'ktotal-postproc' : None,
'time_preproc' : None,
#----- true_total input
'kinput-1' : None,
'n-1' : None,
'm-1' : None,
'max_edgeweight-1' : None,
'passed_kernel-1' : None,
'time_kernel-1' : None,
# wecp
'passed_bsd-1' : None,
'reconstructs-1' : None,
'found_cliq_frac-1' : None,
'time_bsd-1' : None}
# kdistinct + guess input info
for i in range(2, 8+1):
# post kernel info
colnames['kinput-'+str(i)]=None
colnames['n-'+str(i)]=None
colnames['m-'+str(i)]=None
colnames['max_edgeweight-'+str(i)]=None
colnames['passed_kernel-'+str(i)]=None
colnames['time_kernel-'+str(i)]=None
# lp
colnames['passed_bsd-'+str(i)+'-1']=None
colnames['reconstructs-'+str(i)+'-1']=None
colnames['found_cliq_frac-'+str(i)+'-1']=None
colnames['time_bsd-'+str(i)+'-1']=None
colnames['mem_usage-'+str(i)+'-1']=None
# ipart
colnames['passed_bsd-'+str(i)+'-2']=None
colnames['reconstructs-'+str(i)+'-2']=None
colnames['found_cliq_frac-'+str(i)+'-2']=None
colnames['time_bsd-'+str(i)+'-2']=None
colnames['mem_usage-'+str(i)+'-2']=None
if get_colnames:
return list(colnames.keys())
else:
pre_preprocessing = preproc_output
post_preprocessing = postproc_output['post_preprocessing']
kernel_data = kernel_output
true_total_kernel = kernel_data['true_total_kernel']
true_distinct_kernel = kernel_data['true_distinct_kernel']
guess_kernels = kernel_data['guess_kernels']
decomp_data = final_output['decomp_data']
bsd_dw = decomp_data['bsd_dw']
bswd_dw_lp = decomp_data['bswd_dw_lp']
bswd_dw_ip = decomp_data['bswd_dw_ip']
upd_fname = fname.split('/')[-1]
if datatype=='tf':
# remove 'witer' from fname
newfnm = ''
comps = upd_fname.split('_')
for u in comps:
if 'witer' not in u:
newfnm+='_'+u
upd_fname=newfnm[1:-1]
#### fill in the data
colnames['filename'] = upd_fname
colnames['datatype'] = datatype
colnames['n-init'] = pre_preprocessing['n']
colnames['m-init'] = pre_preprocessing['m']
colnames['kdistinct-init'] = pre_preprocessing['kdistinct']
colnames['ktotal-init'] = pre_preprocessing['ktotal']
colnames['witer'] = witer
colnames['wscale'] = wscale
colnames['n-postproc'] = post_preprocessing['n']
colnames['m-postproc'] = post_preprocessing['m']
colnames['kdistinct-postproc'] = post_preprocessing['kdistinct']
colnames['ktotal-postproc'] = post_preprocessing['ktotal']
colnames['time_preproc'] = post_preprocessing['preprocess_time']
##----- true_total input
if true_total_kernel is not None:
colnames['kinput-1'] = true_total_kernel['kinput']
colnames['n-1'] = true_total_kernel['n']
colnames['m-1'] = true_total_kernel['m']
colnames['max_edgeweight-1'] = true_total_kernel['max_eweight']
colnames['passed_kernel-1'] = true_total_kernel['passed_kernel']
colnames['time_kernel-1'] = true_total_kernel['kernel_time']
# wecp data
truetot = bsd_dw['true_total']
colnames['passed_bsd-1'] = truetot['passed_bsd']
colnames['reconstructs-1'] = truetot['reconstructs']
colnames['found_cliq_frac-1'] = truetot['found_cliq_fraction']
colnames['time_bsd-1'] = truetot['time_bsd']
##----- true distinct input
if true_distinct_kernel is not None:
# post kernel info
colnames['kinput-2'] = true_distinct_kernel['kinput']
colnames['n-2'] = true_distinct_kernel['n']
colnames['m-2'] = true_distinct_kernel['m']
colnames['max_edgeweight-2'] = true_distinct_kernel['max_eweight']
colnames['passed_kernel-2'] = true_distinct_kernel['passed_kernel']
colnames['time_kernel-2'] = true_distinct_kernel['kernel_time']
# lp
truedist_lp = bswd_dw_lp['true_distinct']
colnames['passed_bsd-2-1'] = truedist_lp['passed_bsd']
colnames['reconstructs-2-1'] = truedist_lp['reconstructs']
colnames['found_cliq_frac-2-1'] = truedist_lp['found_cliq_fraction']
colnames['time_bsd-2-1'] = truedist_lp['time_bsd']
if 'mem_usage' in truedist_lp.keys():
colnames['mem_usage-2-1']=truedist_lp['mem_usage']
# ipart
truedist_ip = bswd_dw_ip['true_distinct']
colnames['passed_bsd-2-2'] = truedist_ip['passed_bsd']
colnames['reconstructs-2-2'] = truedist_ip['reconstructs']
colnames['found_cliq_frac-2-2'] = truedist_ip['found_cliq_fraction']
colnames['time_bsd-2-2'] = truedist_ip['time_bsd']
if 'mem_usage' in truedist_ip.keys():
colnames['mem_usage-2-2']=truedist_ip['mem_usage']
i=3
vals = [-0.6, -0.4, -0.2, 0.2, 0.4, 0.6]
for val in vals:
if guess_kernels is not None:
guess_kern = guess_kernels[val]
if guess_kern is not None:
guess_ip = bswd_dw_ip['guesses'][val]
guess_lp = bswd_dw_lp['guesses'][val]
# post kernel info
colnames['kinput-'+str(i)] = guess_kern['kinput']
colnames['n-'+str(i)] = guess_kern['n']
colnames['m-'+str(i)] = guess_kern['m']
colnames['max_edgeweight-'+str(i)] = guess_kern['max_eweight']
colnames['passed_kernel-'+str(i)] = guess_kern['passed_kernel']
colnames['time_kernel-'+str(i)] = guess_kern['kernel_time']
if guess_lp is not None:
# lp
colnames['passed_bsd-'+str(i)+'-1'] = guess_lp['passed_bsd']
colnames['reconstructs-'+str(i)+'-1'] = guess_lp['reconstructs']
colnames['found_cliq_frac-'+str(i)+'-1'] = guess_lp['found_cliq_fraction']
colnames['time_bsd-'+str(i)+'-1'] = guess_lp['time_bsd']
if guess_ip is not None:
# ipart
colnames['passed_bsd-'+str(i)+'-2'] = guess_ip['passed_bsd']
colnames['reconstructs-'+str(i)+'-2'] = guess_ip['reconstructs']
colnames['found_cliq_frac-'+str(i)+'-2'] = guess_ip['found_cliq_fraction']
colnames['time_bsd-'+str(i)+'-2'] = guess_ip['time_bsd']
i+=1
return colnames
def get_overlapping_nodes(fname, dat, final_output, kernel_output, postproc_output, preproc_output):
'''
For each clique compute/save:
1. # of nodes that are in other cliques
2. # of cliques that are overlapping w. current clique
3. clique size
'''
#print('\n\n')
#print(final_output['pre_preprocessing'])
#clique_vertices = final_output['post_preprocessing']['clique_vertices']
#clique_vertices = final_output['pre_preprocessing']['clique_vertices']
#clique_vertices = postproc_output['post_preprocessing']['clique_vertices']
clique_vertices = preproc_output['clique_vertices']
total_cliques = len(clique_vertices)
clique_i=0
for cliquei in clique_vertices:
cliquen = len(cliquei)
dat['clique_n-'+str(clique_i)]=cliquen # clique size
num_node_ovl=set()
num_cliq_ovl=0
clique_j=0
for cliquej in clique_vertices:
if clique_i!=clique_j:
#nno = len([x for x in cliquei if x in cliquej])
#num_node_ovl+=nno
nno=0
for x in cliquei:
if x in cliquej:
nno+=1
num_node_ovl.add(x)
if nno>0: # # of cliques overlapping
num_cliq_ovl+=1
clique_j+=1
dat['clique_nodeovl-'+str(clique_i)]=len(num_node_ovl)/cliquen
dat['clique_clqovl-'+str(clique_i)]=num_cliq_ovl/total_cliques
clique_i+=1
def run(final_datadir, out_datadir, kernel_dir, postproc_dir, preproc_dir, typ):
print('preproc dirname: ', preproc_dir)
print('postproc dirname: ', postproc_dir)
print('kern dirname: ', kernel_dir)
print('final data dirname: ', final_datadir)
print('out_dirname: ', out_datadir)
print()
colnames = get_data(None, None, None, None, None, None, None, None, get_colnames=True)
if not os.path.exists(out_datadir):
os.makedirs(out_datadir)
final_files = utils_misc.get_files(final_datadir, '.pkl')
data = []
for fname in final_files:
preproc_file = preproc_dir+fname.split('/')[-1]
postproc_file = postproc_dir+fname.split('/')[-1]
kern_file = kernel_dir+fname.split('/')[-1]
witer=None
wscale=None # sml, med, lrg
if typ=='tf':
witer = int(utils_misc.get_fname_value(fname, 'witer'))
ws = int(utils_misc.get_fname_value(fname, 'scalefac'))
if ws==1:
wscale='sml'
elif ws==4:
wscale='med'
elif ws==16:
wscale='lrg'
elif typ=='lv':
ws = int(utils_misc.get_fname_value(fname, 'scalefac'))
if ws==1:
wscale='sml'
elif ws==2:
wscale='med'
elif ws==4:
wscale='lrg'
# get pkl file info
with open(fname, 'rb') as infile:
final_output = pickle.load(infile)
with open(kern_file, 'rb') as infile:
kernel_output = pickle.load(infile)
with open(postproc_file, 'rb') as infile:
postproc_output = pickle.load(infile)
with open(preproc_file, 'rb') as infile:
preproc_output = pickle.load(infile)
if final_output is None:
print('Warning: final_output is None')
if kernel_output is None:
print('Warning: kernel_output is None')
if postproc_output is None:
print('Warning: postproc_output is None')
if preproc_output is None:
print('Warning: preproc_output is None')
k_total = postproc_output['post_preprocessing']['ktotal']
k_distinct = postproc_output['post_preprocessing']['kdistinct']
dat=None
try: #NOTE
dat = get_data(witer, wscale, fname, typ, final_output,
kernel_output, postproc_output, preproc_output)
except:
print("ERROR: couldnt get complete data for ", fname)
check_for_missing_data(final_output, kernel_output, k_distinct,
wscale, witer, fname)
if dat is not None:
get_overlapping_nodes(fname, dat, final_output,
kernel_output, postproc_output, preproc_output)
data.append(dat)
df = pd.DataFrame(data)
return df
def main():
preproc_dir_tf = 'data/pre_preprocessing/tf/'
preproc_dir_lv = 'data/pre_preprocessing/lv/'
postproc_dir_tf = 'data/post_preprocessing/tf/'
postproc_dir_lv = 'data/post_preprocessing/lv/'
kernel_dir_tf = 'data/kernels/tf/'
kernel_dir_lv = 'data/kernels/lv/'
in_datadir_tf = 'data/finaldata/tf/'
in_datadir_lv = 'data/finaldata/lv/'
out_datadir_tf = 'data/csvfiles/'
out_datadir_lv = 'data/csvfiles/'
start = time.time()
print('Creating tf csv')
tf_df = run(in_datadir_tf, out_datadir_tf, kernel_dir_tf, postproc_dir_tf, preproc_dir_tf, 'tf')
#print(tf_df['num_overlap'])
print('\nCreating lv csv')
lv_df = run(in_datadir_lv, out_datadir_lv, kernel_dir_lv, postproc_dir_lv, preproc_dir_lv, 'lv')
#print(lv_df)
end = time.time()
print(tf_df.shape, lv_df.shape)
df = tf_df.append(lv_df)
print(df)
now = datetime.datetime.now()
date_time = now.strftime("%m-%d-%Y_%H-%M-%S")
name = 'fulldata_'+date_time+'.csv'
print(name, 'time: ', end-start)
source_dir = 'data/csvfiles/mostrecent/'
target_dir = 'data/csvfiles/'
if not os.path.exists(source_dir):
os.makedirs(source_dir)
file_names = os.listdir(source_dir)
#move files from mostrecent dir
for file_name in file_names:
shutil.move(os.path.join(source_dir, file_name), target_dir)
#save csv to mostrecent dir
df.to_csv(source_dir+name)
if __name__=="__main__":
main()
|
the-stack_0_6660 | import os
import re
import sys
import json
import time
import signal
import logging
import traceback
import boto3
import subprocess
from moto import core as moto_core
from requests.models import Response
from localstack import constants, config
from localstack.constants import (
ENV_DEV, LOCALSTACK_VENV_FOLDER, LOCALSTACK_INFRA_PROCESS, DEFAULT_SERVICE_PORTS)
from localstack.utils import common, persistence
from localstack.utils.common import (TMP_THREADS, run, get_free_tcp_port, is_linux, start_thread,
ShellCommandThread, in_docker, is_port_open, sleep_forever, print_debug, edge_ports_info)
from localstack.utils.server import multiserver
from localstack.utils.testutil import is_local_test_mode
from localstack.utils.bootstrap import (
setup_logging, is_debug, canonicalize_api_names, load_plugins, in_ci)
from localstack.utils.analytics import event_publisher
from localstack.services import generic_proxy, install
from localstack.services.es import es_api
from localstack.services.plugins import SERVICE_PLUGINS, record_service_health, check_infra
from localstack.services.firehose import firehose_api
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import GenericProxyHandler, ProxyListener, start_proxy_server
from localstack.services.dynamodbstreams import dynamodbstreams_api
from localstack.utils.analytics.profiler import log_duration
# flag to indicate whether signal handlers have been set up already
SIGNAL_HANDLERS_SETUP = False
# output string that indicates that the stack is ready
READY_MARKER_OUTPUT = 'Ready.'
# default backend host address
DEFAULT_BACKEND_HOST = '127.0.0.1'
# maps ports to proxy listener details
PROXY_LISTENERS = {}
# set up logger
LOG = logging.getLogger(__name__)
# fix moto account ID - note: keep this at the top level here
moto_core.ACCOUNT_ID = constants.TEST_AWS_ACCOUNT_ID
# -----------------------
# CONFIG UPDATE BACKDOOR
# -----------------------
def update_config_variable(variable, new_value):
if new_value is not None:
LOG.info('Updating value of config variable "%s": %s' % (variable, new_value))
setattr(config, variable, new_value)
class ConfigUpdateProxyListener(ProxyListener):
""" Default proxy listener that intercepts requests to retrieve or update config variables. """
def forward_request(self, method, path, data, headers):
if path != constants.CONFIG_UPDATE_PATH or method != 'POST':
return True
response = Response()
data = json.loads(data)
variable = data.get('variable', '')
response._content = '{}'
response.status_code = 200
if not re.match(r'^[_a-zA-Z0-9]+$', variable):
response.status_code = 400
return response
new_value = data.get('value')
update_config_variable(variable, new_value)
value = getattr(config, variable, None)
result = {'variable': variable, 'value': value}
response._content = json.dumps(result)
return response
GenericProxyHandler.DEFAULT_LISTENERS.append(ConfigUpdateProxyListener())
# -----------------
# API ENTRY POINTS
# -----------------
def start_sns(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_SNS
return start_moto_server('sns', port, name='SNS', asynchronous=asynchronous,
update_listener=update_listener)
def start_sts(port=None, asynchronous=False):
port = port or config.PORT_STS
return start_moto_server('sts', port, name='STS', asynchronous=asynchronous)
def start_redshift(port=None, asynchronous=False):
port = port or config.PORT_REDSHIFT
return start_moto_server('redshift', port, name='Redshift', asynchronous=asynchronous)
def start_acm(port=None, asynchronous=False):
port = port or config.PORT_ACM
return start_moto_server('acm', port, name='ACM', asynchronous=asynchronous)
# TODO still needed?
def start_ses(port=None, asynchronous=False):
port = port or config.PORT_SES
return start_moto_server('ses', port, name='SES', asynchronous=asynchronous)
# TODO move to es_starter.py?
def start_elasticsearch_service(port=None, asynchronous=False):
port = port or config.PORT_ES
return start_local_api('ES', port, api='es', method=es_api.serve, asynchronous=asynchronous)
def start_firehose(port=None, asynchronous=False):
port = port or config.PORT_FIREHOSE
return start_local_api('Firehose', port, api='firehose', method=firehose_api.serve, asynchronous=asynchronous)
def start_dynamodbstreams(port=None, asynchronous=False):
port = port or config.PORT_DYNAMODBSTREAMS
return start_local_api('DynamoDB Streams', port, api='dynamodbstreams',
method=dynamodbstreams_api.serve, asynchronous=asynchronous)
def start_lambda(port=None, asynchronous=False):
port = port or config.PORT_LAMBDA
return start_local_api('Lambda', port, api='lambda', method=lambda_api.serve, asynchronous=asynchronous)
def start_ssm(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_SSM
return start_moto_server('ssm', port, name='SSM', asynchronous=asynchronous,
update_listener=update_listener)
# ---------------
# HELPER METHODS
# ---------------
def patch_urllib3_connection_pool(**constructor_kwargs):
"""
Override the default parameters of HTTPConnectionPool, e.g., set the pool size via maxsize=16
"""
try:
from urllib3 import connectionpool, poolmanager
class MyHTTPSConnectionPool(connectionpool.HTTPSConnectionPool):
def __init__(self, *args, **kwargs):
kwargs.update(constructor_kwargs)
super(MyHTTPSConnectionPool, self).__init__(*args, **kwargs)
poolmanager.pool_classes_by_scheme['https'] = MyHTTPSConnectionPool
class MyHTTPConnectionPool(connectionpool.HTTPConnectionPool):
def __init__(self, *args, **kwargs):
kwargs.update(constructor_kwargs)
super(MyHTTPConnectionPool, self).__init__(*args, **kwargs)
poolmanager.pool_classes_by_scheme['http'] = MyHTTPConnectionPool
except Exception:
pass
def patch_instance_tracker_meta():
"""
Avoid instance collection for moto dashboard
"""
def new_intance(meta, name, bases, dct):
cls = super(moto_core.models.InstanceTrackerMeta, meta).__new__(meta, name, bases, dct)
if name == 'BaseModel':
return cls
cls.instances = []
return cls
moto_core.models.InstanceTrackerMeta.__new__ = new_intance
def new_basemodel(cls, *args, **kwargs):
instance = super(moto_core.models.BaseModel, cls).__new__(cls)
return instance
moto_core.models.BaseModel.__new__ = new_basemodel
def set_service_status(data):
command = data.get('command')
service = data.get('service')
service_ports = config.parse_service_ports()
if command == 'start':
existing = service_ports.get(service)
port = DEFAULT_SERVICE_PORTS.get(service)
if existing:
status = get_service_status(service, port)
if status == 'running':
return
key_upper = service.upper().replace('-', '_')
port_variable = 'PORT_%s' % key_upper
service_list = os.environ.get('SERVICES', '').strip()
services = [e for e in re.split(r'[\s,]+', service_list) if e]
contained = [s for s in services if s.startswith(service)]
if not contained:
services.append(service)
update_config_variable(port_variable, port)
new_service_list = ','.join(services)
os.environ['SERVICES'] = new_service_list
config.populate_configs()
LOG.info('Starting service %s on port %s' % (service, port))
SERVICE_PLUGINS[service].start(asynchronous=True)
return {}
def get_services_status():
result = {}
for service, port in config.parse_service_ports().items():
status = get_service_status(service, port)
result[service] = {
'port': port,
'status': status
}
return result
def get_service_status(service, port=None):
port = port or config.parse_service_ports().get(service)
status = 'disabled' if (port or 0) <= 0 else 'running' if is_port_open(port) else 'stopped'
return status
def get_multiserver_or_free_service_port():
if config.FORWARD_EDGE_INMEM:
return multiserver.get_moto_server_port()
return get_free_tcp_port()
def register_signal_handlers():
global SIGNAL_HANDLERS_SETUP
if SIGNAL_HANDLERS_SETUP:
return
# register signal handlers
def signal_handler(signal, frame):
stop_infra()
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
SIGNAL_HANDLERS_SETUP = True
def do_run(cmd, asynchronous, print_output=None, env_vars={}):
sys.stdout.flush()
if asynchronous:
if is_debug() and print_output is None:
print_output = True
outfile = subprocess.PIPE if print_output else None
t = ShellCommandThread(cmd, outfile=outfile, env_vars=env_vars)
t.start()
TMP_THREADS.append(t)
return t
return run(cmd, env_vars=env_vars)
def start_proxy_for_service(service_name, port, backend_port, update_listener, quiet=False, params={}):
# TODO: remove special switch for Elasticsearch (see also note in service_port(...) in config.py)
if config.FORWARD_EDGE_INMEM and service_name != 'elasticsearch':
if backend_port:
PROXY_LISTENERS[service_name] = (service_name, backend_port, update_listener)
return
# check if we have a custom backend configured
custom_backend_url = os.environ.get('%s_BACKEND' % service_name.upper())
backend_url = custom_backend_url or ('http://%s:%s' % (DEFAULT_BACKEND_HOST, backend_port))
return start_proxy(port, backend_url=backend_url, update_listener=update_listener, quiet=quiet, params=params)
def start_proxy(port, backend_url, update_listener=None, quiet=False, params={}, use_ssl=None):
use_ssl = config.USE_SSL if use_ssl is None else use_ssl
proxy_thread = start_proxy_server(port=port, forward_url=backend_url,
use_ssl=use_ssl, update_listener=update_listener, quiet=quiet, params=params)
return proxy_thread
def start_moto_server(key, port, name=None, backend_port=None, asynchronous=False, update_listener=None):
if not name:
name = key
print('Starting mock %s service on %s ...' % (name, edge_ports_info()))
if not backend_port:
if config.FORWARD_EDGE_INMEM:
backend_port = multiserver.get_moto_server_port()
elif config.USE_SSL or update_listener:
backend_port = get_free_tcp_port()
if backend_port or config.FORWARD_EDGE_INMEM:
start_proxy_for_service(key, port, backend_port, update_listener)
if config.BUNDLE_API_PROCESSES:
return multiserver.start_api_server(key, backend_port or port)
return start_moto_server_separate(key, port, name=name, backend_port=backend_port, asynchronous=asynchronous)
def start_moto_server_separate(key, port, name=None, backend_port=None, asynchronous=False):
moto_server_cmd = '%s/bin/moto_server' % LOCALSTACK_VENV_FOLDER
if not os.path.exists(moto_server_cmd):
moto_server_cmd = run('which moto_server').strip()
cmd = 'VALIDATE_LAMBDA_S3=0 %s %s -p %s -H %s' % (moto_server_cmd, key, backend_port or port, constants.BIND_HOST)
return do_run(cmd, asynchronous)
def start_local_api(name, port, api, method, asynchronous=False):
print('Starting mock %s service on %s ...' % (name, edge_ports_info()))
if config.FORWARD_EDGE_INMEM:
port = get_free_tcp_port()
PROXY_LISTENERS[api] = (api, port, None)
if asynchronous:
thread = start_thread(method, port, quiet=True)
return thread
else:
method(port)
def stop_infra(debug=False):
if common.INFRA_STOPPED:
return
common.INFRA_STOPPED = True
event_publisher.fire_event(event_publisher.EVENT_STOP_INFRA)
generic_proxy.QUIET = True
print_debug('[shutdown] Cleaning up files ...', debug)
common.cleanup(files=True, quiet=True)
print_debug('[shutdown] Cleaning up resources ...', debug)
common.cleanup_resources(debug=debug)
print_debug('[shutdown] Cleaning up Lambda resources ...', debug)
lambda_api.cleanup()
time.sleep(2)
# TODO: optimize this (takes too long currently)
# check_infra(retries=2, expect_shutdown=True)
def check_aws_credentials():
session = boto3.Session()
credentials = None
# hardcode credentials here, to allow us to determine internal API calls made via boto3
os.environ['AWS_ACCESS_KEY_ID'] = constants.INTERNAL_AWS_ACCESS_KEY_ID
os.environ['AWS_SECRET_ACCESS_KEY'] = constants.INTERNAL_AWS_ACCESS_KEY_ID
try:
credentials = session.get_credentials()
except Exception:
pass
session = boto3.Session()
credentials = session.get_credentials()
assert credentials
# -------------
# MAIN STARTUP
# -------------
def start_infra(asynchronous=False, apis=None):
try:
os.environ[LOCALSTACK_INFRA_PROCESS] = '1'
is_in_docker = in_docker()
# print a warning if we're not running in Docker but using Docker based LAMBDA_EXECUTOR
if not is_in_docker and 'docker' in config.LAMBDA_EXECUTOR and not is_linux():
print(('!WARNING! - Running outside of Docker with $LAMBDA_EXECUTOR=%s can lead to '
'problems on your OS. The environment variable $LOCALSTACK_HOSTNAME may not '
'be properly set in your Lambdas.') % config.LAMBDA_EXECUTOR)
if is_in_docker and not config.LAMBDA_REMOTE_DOCKER and not os.environ.get('HOST_TMP_FOLDER'):
print('!WARNING! - Looks like you have configured $LAMBDA_REMOTE_DOCKER=0 - '
"please make sure to configure $HOST_TMP_FOLDER to point to your host's $TMPDIR")
# apply patches
patch_urllib3_connection_pool(maxsize=128)
patch_instance_tracker_meta()
# load plugins
load_plugins()
# with plugins loaded, now start the infrastructure
thread = do_start_infra(asynchronous, apis, is_in_docker)
if not asynchronous and thread:
# this is a bit of an ugly hack, but we need to make sure that we
# stay in the execution context of the main thread, otherwise our
# signal handlers don't work
sleep_forever()
return thread
except KeyboardInterrupt:
print('Shutdown')
except Exception as e:
print('Error starting infrastructure: %s %s' % (e, traceback.format_exc()))
sys.stdout.flush()
raise e
finally:
if not asynchronous:
stop_infra()
def do_start_infra(asynchronous, apis, is_in_docker):
event_publisher.fire_event(event_publisher.EVENT_START_INFRA,
{'d': is_in_docker and 1 or 0, 'c': in_ci() and 1 or 0})
# set up logging
setup_logging()
# prepare APIs
apis = canonicalize_api_names(apis)
@log_duration()
def prepare_environment():
# set environment
os.environ['AWS_REGION'] = config.DEFAULT_REGION
os.environ['ENV'] = ENV_DEV
# register signal handlers
if not is_local_test_mode():
register_signal_handlers()
# make sure AWS credentials are configured, otherwise boto3 bails on us
check_aws_credentials()
@log_duration()
def prepare_installation():
# install libs if not present
install.install_components(apis)
@log_duration()
def start_api_services():
# Some services take a bit to come up
sleep_time = 5
# start services
thread = None
# loop through plugins and start each service
for name, plugin in SERVICE_PLUGINS.items():
if plugin.is_enabled(api_names=apis):
record_service_health(name, 'starting')
t1 = plugin.start(asynchronous=True)
thread = thread or t1
time.sleep(sleep_time)
# ensure that all infra components are up and running
check_infra(apis=apis)
# restore persisted data
persistence.restore_persisted_data(apis=apis)
return thread
prepare_environment()
prepare_installation()
thread = start_api_services()
print(READY_MARKER_OUTPUT)
sys.stdout.flush()
return thread
|
the-stack_0_6661 | from django.urls import path
from . import views
urlpatterns = [
path('<int:id_number>/<str:auth_status>/', views.leaks, name='leaks'),
path('login/', views.login_view, name='login'),
path('logout/', views.logout_view, name='logout'),
path('test/', views.test_login, name='test'),
path('ver/', views.get_ver, name='ver'),
path('save_dict/', views.save_dict, name='save_dict'),
path('info_url/<int:url_id>/', views.get_info, name='info_url'),
path('set_cookies/', views.set_cookies, name='set_cookies'),
path('check_cookies/', views.check_cookies, name='check_cookies'),
]
|
the-stack_0_6662 | from django.core.exceptions import ObjectDoesNotExist
from qfieldcloud.core import permissions_utils, serializers
from qfieldcloud.core.models import Job, Project
from rest_framework import generics, permissions, viewsets
from rest_framework.response import Response
from rest_framework.status import HTTP_201_CREATED
class JobPermissions(permissions.BasePermission):
def has_permission(self, request, view):
project_id = permissions_utils.get_param_from_request(request, "project_id")
try:
project = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return False
return permissions_utils.can_read_files(request.user, project)
class JobViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.JobSerializer
lookup_url_kwarg = "job_id"
permission_classes = [permissions.IsAuthenticated]
def get_serializer_by_job_type(self, job_type, *args, **kwargs):
if job_type == Job.Type.DELTA_APPLY:
return serializers.ApplyJobSerializer(*args, **kwargs)
elif job_type == Job.Type.PACKAGE:
return serializers.PackageJobSerializer(*args, **kwargs)
elif job_type == Job.Type.PROCESS_PROJECTFILE:
return serializers.ProcessProjectfileJobSerializer(*args, **kwargs)
else:
raise NotImplementedError(f'Unknown job type "{job_type}"')
def get_serializer(self, *args, **kwargs):
kwargs.setdefault("context", self.get_serializer_context())
if self.action in ("create"):
if "data" in kwargs:
job_type = kwargs["data"]["type"]
else:
job_type = args[0].type
return self.get_serializer_by_job_type(job_type, *args, **kwargs)
if self.action in ("retrieve",):
job_type = args[0].type
return self.get_serializer_by_job_type(job_type, *args, **kwargs)
return serializers.JobSerializer(*args, **kwargs)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if bool(int(request.data.get("force", 0))):
serializer.is_valid(raise_exception=True)
serializer.save()
else:
serializer.is_valid(raise_exception=True)
if not serializer.Meta.allow_parallel_jobs:
job = serializer.get_lastest_not_finished_job()
if job:
return Response(self.get_serializer(job).data)
serializer.save()
return Response(serializer.data, status=HTTP_201_CREATED)
def get_queryset(self):
qs = Job.objects.select_subclasses()
if self.action == "list":
project_id = self.request.data.get("project_id")
project = generics.get_object_or_404(Project, pk=project_id)
qs = qs.filter(project=project)
return qs
|
the-stack_0_6665 | """Plugin system for strax
A 'plugin' is something that outputs an array and gets arrays
from one or more other plugins.
"""
from concurrent.futures import wait
from enum import IntEnum
import inspect
import itertools
import logging
import time
import typing
from immutabledict import immutabledict
import numpy as np
import strax
export, __all__ = strax.exporter()
@export
class SaveWhen(IntEnum):
"""Plugin's preference for having it's data saved"""
NEVER = 0 # Throw an error if the user lists it
EXPLICIT = 1 # Save ONLY if the user lists it explicitly
TARGET = 2 # Save if the user asks for it as a final target
ALWAYS = 3 # Save even if the user does not list it
@export
class InputTimeoutExceeded(Exception):
pass
@export
class PluginGaveWrongOutput(Exception):
pass
@export
class Plugin:
"""Plugin containing strax computation
You should NOT instantiate plugins directly.
Do NOT add unpickleable things (e.g. loggers) as attributes.
"""
__version__ = '0.0.0'
# For multi-output plugins these should be (immutable)dicts
data_kind: typing.Union[str, immutabledict, dict]
dtype: typing.Union[tuple, np.dtype, immutabledict, dict]
depends_on: tuple
provides: tuple
input_buffer: typing.Dict[str, strax.Chunk]
compressor = 'blosc'
rechunk_on_save = True # Saver is allowed to rechunk
# For a source with online input (e.g. DAQ readers), crash if no new input
# has appeared for this many seconds
# This should be smaller than the mailbox timeout (which is intended as
# a deep fallback)
input_timeout = 80
save_when = SaveWhen.ALWAYS
# Instructions how to parallelize
# False: never parallellize;
# 'process': use processpool;
# 'thread' (or just True): use threadpool.
parallel = False # For the computation itself
# Maximum number of output messages
max_messages = None # use default
# Do not specify attributes below
# Set using the takes_config decorator
takes_config = immutabledict()
# These are set on plugin initialization, which is done in the core
run_id: str
run_i: int
config: typing.Dict
deps: typing.Dict # Dictionary of dependency plugin instances
compute_takes_chunk_i = False # Autoinferred, no need to set yourself
compute_takes_start_end = False
def __init__(self):
if not hasattr(self, 'depends_on'):
raise ValueError('depends_on not provided for '
f'{self.__class__.__name__}')
self.depends_on = strax.to_str_tuple(self.depends_on)
# Store compute parameter names, see if we take chunk_i too
compute_pars = list(
inspect.signature(self.compute).parameters.keys())
if 'chunk_i' in compute_pars:
self.compute_takes_chunk_i = True
del compute_pars[compute_pars.index('chunk_i')]
if 'start' in compute_pars:
if 'end' not in compute_pars:
raise ValueError(f"Compute of {self} takes start, "
f"so it should also take end.")
self.compute_takes_start_end = True
del compute_pars[compute_pars.index('start')]
del compute_pars[compute_pars.index('end')]
self.compute_pars = compute_pars
self.input_buffer = dict()
def fix_dtype(self):
if not hasattr(self, 'dtype'):
self.dtype = self.infer_dtype()
if self.multi_output:
# Convert to a dict of numpy dtypes
if (not hasattr(self, 'data_kind')
or not isinstance(self.data_kind, (dict, immutabledict))):
raise ValueError(
f"{self.__class__.__name__} has multiple outputs and "
"must declare its data kind as a dict: "
"{dtypename: data kind}.")
if not isinstance(self.dtype, dict):
raise ValueError(
f"{self.__class__.__name__} has multiple outputs, so its "
"dtype must be specified as a dict: {output: dtype}.")
self.dtype = {k: strax.to_numpy_dtype(dt)
for k, dt in self.dtype.items()}
else:
# Convert to a numpy dtype
self.dtype = strax.to_numpy_dtype(self.dtype)
# Check required time information is present
for d in self.provides:
fieldnames = self.dtype_for(d).names
ok = 'time' in fieldnames and (
('dt' in fieldnames and 'length' in fieldnames)
or 'endtime' in fieldnames)
if not ok:
raise ValueError(
f"Missing time and endtime information for {d}")
@property
def multi_output(self):
return len(self.provides) > 1
def setup(self):
"""Hook if plugin wants to do something on initialization
"""
pass
def infer_dtype(self):
"""Return dtype of computed data;
used only if no dtype attribute defined"""
# Don't raise NotImplementedError, IDE will complain you're not
# implementing all abstract methods...
raise RuntimeError("No infer dtype method defined")
def version(self, run_id=None):
"""Return version number applicable to the run_id.
Most plugins just have a single version (in .__version__)
but some may be at different versions for different runs
(e.g. time-dependent corrections).
"""
return self.__version__
def __repr__(self):
return self.__class__.__name__
def dtype_for(self, data_type):
if self.multi_output:
return self.dtype[data_type]
return self.dtype
def empty_result(self):
if self.multi_output:
return {d: np.empty(0, self.dtype_for(d))
for d in self.provides}
return np.empty(0, self.dtype)
def data_kind_for(self, data_type):
if self.multi_output:
return self.data_kind[data_type]
return self.data_kind
def metadata(self, run_id, data_type):
"""Metadata to save along with produced data"""
if not data_type in self.provides:
raise RuntimeError(f"{data_type} not in {self.provides}?")
return dict(
run_id=run_id,
data_type=data_type,
data_kind=self.data_kind_for(data_type),
dtype=self.dtype_for(data_type),
lineage_hash=strax.DataKey(
run_id, data_type, self.lineage).lineage_hash,
compressor=self.compressor,
lineage=self.lineage)
def dependencies_by_kind(self):
"""Return dependencies grouped by data kind
i.e. {kind1: [dep0, dep1], kind2: [dep, dep]}
:param require_time: If True, one dependency of each kind
must provide time information. It will be put first in the list.
If require_time is omitted, we will require time only if there is
more than one data kind in the dependencies.
"""
return strax.group_by_kind(
self.depends_on,
plugins=self.deps)
def is_ready(self, chunk_i):
"""Return whether the chunk chunk_i is ready for reading.
Returns True by default; override if you make an online input plugin.
"""
return True
def source_finished(self):
"""Return whether all chunks the plugin wants to read have been written.
Only called for online input plugins.
"""
# Don't raise NotImplementedError, IDE complains
raise RuntimeError("source_finished called on a regular plugin")
def _fetch_chunk(self, d, iters, check_end_not_before=None):
"""Add a chunk of the datatype d to the input buffer.
Return True if this succeeded, False if the source is exhausted.
:param d: data type to fetch
:param iters: iterators that produce data
:param check_end_not_before: Raise a runtimeError if the source
is exhausted, but the input buffer ends before this time.
"""
try:
# print(f"Fetching {d} in {self}, hope to see {hope_to_see}")
self.input_buffer[d] = strax.Chunk.concatenate(
[self.input_buffer[d], next(iters[d])])
# print(f"Fetched {d} in {self}, "
# f"now have {self.input_buffer[d]}")
return True
except StopIteration:
# print(f"Got StopIteration while fetching for {d} in {self}")
if (check_end_not_before is not None
and self.input_buffer[d].end < check_end_not_before):
raise RuntimeError(
f"Tried to get data until {check_end_not_before}, but {d} "
f"ended prematurely at {self.input_buffer[d].end}")
return False
def iter(self, iters, executor=None):
"""Iterate over dependencies and yield results
:param iters: dict with iterators over dependencies
:param executor: Executor to punt computation tasks to. If None,
will compute inside the plugin's thread.
"""
pending_futures = []
last_input_received = time.time()
self.input_buffer = {d: None
for d in self.depends_on}
# Fetch chunks from all inputs. Whoever is the slowest becomes the
# pacemaker
pacemaker = None
_end = float('inf')
for d in self.depends_on:
self._fetch_chunk(d, iters)
if self.input_buffer[d].end < _end:
pacemaker = d
_end = self.input_buffer[d].end
for chunk_i in itertools.count():
# Online input support
while not self.is_ready(chunk_i):
if self.source_finished():
# Chunk_i does not exist. We are done.
print("Source finished!")
self.cleanup(iters, wait_for=pending_futures)
return
if time.time() > last_input_received + self.input_timeout:
raise InputTimeoutExceeded(
f"{self.__class__.__name__}:{id(self)} waited for "
f"more than {self.input_timeout} sec for arrival of "
f"input chunk {chunk_i}, and has given up.")
print(f"{self.__class__.__name__}:{id(self)} "
f"waiting for chunk {chunk_i}")
time.sleep(2)
last_input_received = time.time()
if pacemaker is None:
inputs_merged = dict()
else:
if chunk_i != 0:
# Fetch the pacemaker, to figure out when this chunk ends
# (don't do it for chunk 0, for which we already fetched)
if not self._fetch_chunk(pacemaker, iters):
# Source exhausted. Cleanup will do final checks.
self.cleanup(iters, wait_for=pending_futures)
return
this_chunk_end = self.input_buffer[pacemaker].end
inputs = dict()
# Fetch other inputs (when needed)
for d in self.depends_on:
if d != pacemaker:
while (self.input_buffer[d] is None
or self.input_buffer[d].end < this_chunk_end):
self._fetch_chunk(
d, iters,
check_end_not_before=this_chunk_end)
inputs[d], self.input_buffer[d] = \
self.input_buffer[d].split(
t=this_chunk_end,
allow_early_split=True)
# If any of the inputs were trimmed due to early splits,
# trim the others too.
# In very hairy cases this can take multiple passes.
# TODO: can we optimize this, or code it more elegantly?
max_passes_left = 10
while max_passes_left > 0:
this_chunk_end = min([x.end for x in inputs.values()]
+ [this_chunk_end])
if len(set([x.end for x in inputs.values()])) <= 1:
break
for d in self.depends_on:
inputs[d], back_to_buffer = \
inputs[d].split(
t=this_chunk_end,
allow_early_split=True)
self.input_buffer[d] = strax.Chunk.concatenate(
[back_to_buffer, self.input_buffer[d]])
max_passes_left -= 1
else:
raise RuntimeError(
f"{self} was unable to get time-consistent "
f"inputs after ten passess. Inputs: \n{inputs}\n"
f"Input buffer:\n{self.input_buffer}")
# Merge inputs of the same kind
inputs_merged = {
kind: strax.Chunk.merge([inputs[d] for d in deps_of_kind])
for kind, deps_of_kind in self.dependencies_by_kind().items()}
# Submit the computation
# print(f"{self} calling with {inputs_merged}")
if self.parallel and executor is not None:
new_future = executor.submit(
self.do_compute,
chunk_i=chunk_i,
**inputs_merged)
pending_futures.append(new_future)
pending_futures = [f for f in pending_futures if not f.done()]
yield new_future
else:
yield self.do_compute(chunk_i=chunk_i, **inputs_merged)
raise RuntimeError("This cannot happen.")
def cleanup(self,
iters: typing.Dict[str, typing.Iterable],
wait_for):
# The wait_for option is only used in child classes;
# A standard plugin doesn't need to do anything with the computation
# future results.
# Check all sources are exhausted.
# This is more than a check though -- it ensure the content of
# all sources are requested all the way (including the final
# Stopiteration), as required by lazy-mode processing requires
for d in iters.keys():
if self._fetch_chunk(d, iters):
raise RuntimeError(
f"Plugin {d} terminated without fetching last {d}!")
# Check the input buffer is empty
for d, buffer in self.input_buffer.items():
if buffer is not None and len(buffer):
raise RuntimeError(
f"Plugin {d} terminated with leftover {d}: {buffer}")
def _check_dtype(self, x, d=None):
# There is an additional 'last resort' data type check
# in the chunk initialization.
# This one is broader and gives a more context-aware message.
if d is None:
assert not self.multi_output
d = self.provides[0]
pname = self.__class__.__name__
if not isinstance(x, np.ndarray):
raise strax.PluginGaveWrongOutput(
f"Plugin {pname} did not deliver "
f"data type {d} as promised.\n"
f"Delivered a {type(x)}")
expect = strax.remove_titles_from_dtype(self.dtype_for(d))
if not isinstance(expect, np.dtype):
raise ValueError(f"Plugin {pname} expects {expect} as dtype??")
got = strax.remove_titles_from_dtype(x.dtype)
if got != expect:
raise strax.PluginGaveWrongOutput(
f"Plugin {pname} did not deliver "
f"data type {d} as promised.\n"
f"Promised: {expect}\n"
f"Delivered: {got}.")
def do_compute(self, chunk_i=None, **kwargs):
"""Wrapper for the user-defined compute method
This is the 'job' that gets executed in different processes/threads
during multiprocessing
"""
for k, v in kwargs.items():
if not isinstance(v, strax.Chunk):
raise RuntimeError(
f"do_compute of {self.__class__.__name__} got a {type(v)} "
f"instead of a strax Chunk for {k}")
if len(kwargs):
# Check inputs describe the same time range
tranges = {k: (v.start, v.end) for k, v in kwargs.items()}
if len(set(tranges.values())) != 1:
raise ValueError(f"{self.__class__.__name__} got inconsistent "
f"time ranges of inputs: {tranges}")
start, end = list(tranges.values())[0]
else:
# This plugin starts from scratch
start, end = None, None
kwargs = {k: v.data for k, v in kwargs.items()}
if self.compute_takes_chunk_i:
kwargs['chunk_i'] = chunk_i
if self.compute_takes_start_end:
kwargs['start'] = start
kwargs['end'] = end
result = self.compute(**kwargs)
return self._fix_output(result, start, end)
def _fix_output(self, result, start, end, _dtype=None):
if self.multi_output and _dtype is None:
if not isinstance(result, dict):
raise ValueError(
f"{self.__class__.__name__} is multi-output and should "
"provide a dict output {dtypename: result}")
return {d: self._fix_output(result[d], start, end, _dtype=d)
for d in self.provides}
if _dtype is None:
assert not self.multi_output
_dtype = self.provides[0]
if not isinstance(result, strax.Chunk):
if start is None:
assert len(self.depends_on) == 0
raise ValueError(
"Plugins without dependencies must return full strax "
f"Chunks, but {self.__class__.__name__} produced a "
f"{type(result)}!")
result = strax.dict_to_rec(result, dtype=self.dtype_for(_dtype))
self._check_dtype(result, _dtype)
result = self.chunk(
start=start,
end=end,
data_type=_dtype,
data=result)
return result
def chunk(self, *, start, end, data, data_type=None, run_id=None):
if data_type is None:
if self.multi_output:
raise ValueError("Must give data_type when making chunks from "
"a multi-output plugin")
data_type = self.provides[0]
if run_id is None:
run_id = self.run_id
return strax.Chunk(
start=start,
end=end,
run_id=run_id,
data_kind=self.data_kind_for(data_type),
data_type=data_type,
dtype=self.dtype_for(data_type),
data=data)
def compute(self, **kwargs):
raise NotImplementedError
##
# Special plugins
##
@export
class OverlapWindowPlugin(Plugin):
"""Plugin whose computation depends on having its inputs extend
a certain window on both sides.
Current implementation assumes:
- All inputs are sorted by *endtime*. Since everything in strax is sorted
by time, this only works for disjoint intervals such as peaks or events,
but NOT records!
- You must read time info for your data kind, or create a new data kind.
"""
parallel = False
def __init__(self):
super().__init__()
self.cached_input = {}
self.cached_results = None
self.sent_until = 0
# This guy can have a logger, it's not parallelized anyway
self.log = logging.getLogger(self.__class__.__name__)
def get_window_size(self):
"""Return the required window size in nanoseconds"""
raise NotImplementedError
def iter(self, iters, executor=None):
yield from super().iter(iters, executor=executor)
# Yield final results, kept at bay in fear of a new chunk
if self.cached_results is not None:
yield self.cached_results
def do_compute(self, chunk_i=None, **kwargs):
if not len(kwargs):
raise RuntimeError("OverlapWindowPlugin must have a dependency")
# Add cached inputs to compute arguments
for k, v in kwargs.items():
if len(self.cached_input):
kwargs[k] = strax.Chunk.concatenate(
[self.cached_input[k], v])
# Compute new results
result = super().do_compute(chunk_i=chunk_i, **kwargs)
# Throw away results we already sent out
_, result = result.split(t=self.sent_until,
allow_early_split=False)
# When does this batch of inputs end?
ends = [v.end for v in kwargs.values()]
if not len(set(ends)) == 1:
raise RuntimeError(
f"OverlapWindowPlugin got incongruent inputs: {kwargs}")
end = ends[0]
# When can we no longer trust our results?
# Take slightly larger windows for safety: it is very easy for me
# (or the user) to have made an off-by-one error
invalid_beyond = int(end - self.get_window_size() - 1)
# Prepare to send out valid results, cache the rest
# Do not modify result anymore after this
# Note result.end <= invalid_beyond, with equality if there are
# no overlaps
result, self.cached_results = result.split(t=invalid_beyond,
allow_early_split=True)
self.sent_until = result.end
# Cache a necessary amount of input for next time
# Again, take a bit of overkill for good measure
cache_inputs_beyond = int(self.sent_until
- 2 * self.get_window_size() - 1)
for k, v in kwargs.items():
_, self.cached_input[k] = v.split(t=cache_inputs_beyond,
allow_early_split=True)
return result
@export
class LoopPlugin(Plugin):
"""Plugin that disguises multi-kind data-iteration by an event loop
"""
def compute(self, **kwargs):
# If not otherwise specified, data kind to loop over
# is that of the first dependency (e.g. events)
# Can't be in __init__: deps not initialized then
if hasattr(self, 'loop_over'):
loop_over = self.loop_over
else:
loop_over = self.deps[self.depends_on[0]].data_kind
# Group into lists of things (e.g. peaks)
# contained in the base things (e.g. events)
base = kwargs[loop_over]
if len(base) > 1:
assert np.all(base[1:]['time'] >= strax.endtime(base[:-1])), \
f'{base}s overlap'
for k, things in kwargs.items():
# Check for sorting
difs = np.diff(things['time'])
if difs.min(initial=0) < 0:
i_bad = np.argmin(difs)
examples = things[i_bad-1:i_bad+3]
t0 = examples['time'].min()
raise ValueError(
f'Expected {k} to be sorted, but found ' +
str([(x['time'] - t0, strax.endtime(x) - t0)
for x in examples]))
if k != loop_over:
r = strax.split_by_containment(things, base)
if len(r) != len(base):
raise RuntimeError(f"Split {k} into {len(r)}, "
f"should be {len(base)}!")
kwargs[k] = r
results = np.zeros(len(base), dtype=self.dtype)
deps_by_kind = self.dependencies_by_kind()
for i in range(len(base)):
r = self.compute_loop(base[i],
**{k: kwargs[k][i]
for k in deps_by_kind
if k != loop_over})
# Convert from dict to array row:
for k, v in r.items():
results[i][k] = v
return results
def compute_loop(self, *args, **kwargs):
raise NotImplementedError
##
# "Plugins" for internal use
# These do not actually do computations, but do other tasks
# for which posing as a plugin is helpful.
# Do not subclass unless you know what you are doing..
##
@export
class MergeOnlyPlugin(Plugin):
"""Plugin that merges data from its dependencies
"""
save_when = SaveWhen.NEVER
def infer_dtype(self):
deps_by_kind = self.dependencies_by_kind()
if len(deps_by_kind) != 1:
raise ValueError("MergeOnlyPlugins can only merge data "
"of the same kind, but got multiple kinds: "
+ str(deps_by_kind))
return strax.merged_dtype([
self.deps[d].dtype_for(d)
# Sorting is needed here to match what strax.Chunk does in merging
for d in sorted(self.depends_on)])
def compute(self, **kwargs):
return kwargs[list(kwargs.keys())[0]]
@export
class ParallelSourcePlugin(Plugin):
"""An plugin that inlines the computations of other plugins
and the saving of their results.
This evades data transfer (pickling and/or memory copy) penalties
while multiprocessing.
"""
parallel = 'process'
@classmethod
def inline_plugins(cls, components, start_from, log):
plugins = components.plugins.copy()
sub_plugins = {start_from: plugins[start_from]}
del plugins[start_from]
# Gather all plugins that do not rechunk and which branch out as a
# simple tree from the input plugin.
# We'll run these all together in one process.
while True:
# Scan for plugins we can inline
for p in plugins.values():
if (p.parallel
and all([d in sub_plugins for d in p.depends_on])):
for d in p.provides:
sub_plugins[d] = p
if d in plugins:
del plugins[d]
# Rescan
break
else:
# No more plugins we can inline
break
if len(set(list(sub_plugins.values()))) == 1:
# Just one plugin to inline: no use
log.debug("Just one plugin to inline: skipping")
return components
# Which data types should we output? Three cases follow.
outputs_to_send = set()
# Case 1. Requested as a final target
for p in sub_plugins.values():
outputs_to_send.update(set(components.targets)
.intersection(set(p.provides)))
# Case 2. Requested by a plugin we did not inline
for d, p in plugins.items():
outputs_to_send.update(set(p.depends_on))
outputs_to_send &= sub_plugins.keys()
# Inline savers that do not require rechunking
savers = components.savers
sub_savers = dict()
for p in sub_plugins.values():
for d in p.provides:
if d not in savers:
continue
if p.rechunk_on_save:
# Case 3. has a saver we can't inline
outputs_to_send.add(d)
continue
remaining_savers = []
for s_i, s in enumerate(savers[d]):
if not s.allow_fork:
# Case 3 again, cannot inline saver
outputs_to_send.add(d)
remaining_savers.append(s)
continue
if d not in sub_savers:
sub_savers[d] = []
s.is_forked = True
sub_savers[d].append(s)
savers[d] = remaining_savers
if not len(savers[d]):
del savers[d]
p = cls(depends_on=sub_plugins[start_from].depends_on)
p.sub_plugins = sub_plugins
assert len(outputs_to_send)
p.provides = tuple(outputs_to_send)
p.sub_savers = sub_savers
p.start_from = start_from
if p.multi_output:
p.dtype = {d: p.sub_plugins[d].dtype_for(d)
for d in outputs_to_send}
else:
to_send = list(outputs_to_send)[0]
p.dtype = p.sub_plugins[to_send].dtype_for(to_send)
for d in p.provides:
plugins[d] = p
p.deps = {d: plugins[d] for d in p.depends_on}
log.debug(f"Inlined plugins: {p.sub_plugins}."
f"Inlined savers: {p.sub_savers}")
return strax.ProcessorComponents(
plugins, components.loaders, savers, components.targets)
def __init__(self, depends_on):
self.depends_on = depends_on
super().__init__()
def source_finished(self):
return self.sub_plugins[self.start_from].source_finished()
def is_ready(self, chunk_i):
return self.sub_plugins[self.start_from].is_ready(chunk_i)
def do_compute(self, chunk_i=None, **kwargs):
results = kwargs
# Run the different plugin computations
while True:
for output_name, p in self.sub_plugins.items():
if output_name in results:
continue
if any([d not in results for d in p.depends_on]):
continue
compute_kwargs = dict(chunk_i=chunk_i)
for kind, d_of_kind in p.dependencies_by_kind().items():
compute_kwargs[kind] = strax.Chunk.merge(
[results[d] for d in d_of_kind])
# Store compute result(s)
r = p.do_compute(**compute_kwargs)
if p.multi_output:
for d in r:
results[d] = r[d]
else:
results[output_name] = r
# Rescan plugins to see if we can compute anything more
break
else:
# Nothing further to compute
break
for d in self.provides:
assert d in results, f"Output {d} missing!"
# Save anything we can through the inlined savers
for d, savers in self.sub_savers.items():
for s in savers:
s.save(chunk=results[d], chunk_i=chunk_i)
# Remove results we do not need to send
for d in list(results.keys()):
if d not in self.provides:
del results[d]
if self.multi_output:
for k in self.provides:
assert k in results
assert isinstance(results[k], strax.Chunk)
r0 = results[k]
else:
results = r0 = results[self.provides[0]]
assert isinstance(r0, strax.Chunk)
return self._fix_output(results, start=r0.start, end=r0.end)
def cleanup(self, iters, wait_for):
print(f"{self.__class__.__name__} exhausted. "
f"Waiting for {len(wait_for)} pending futures.")
for savers in self.sub_savers.values():
for s in savers:
s.close(wait_for=wait_for)
super().cleanup(iters, wait_for)
|
the-stack_0_6666 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import io
import os
from ruamel.yaml import YAML
from armi.tests.test_plugins import TestPlugin
from armi.physics import neutronics
from armi.settings import caseSettings
from armi.physics.neutronics.const import CONF_CROSS_SECTION
XS_EXAMPLE = """AA:
geometry: 0D
criticalBuckling: true
blockRepresentation: Median
BA:
geometry: 1D slab
criticalBuckling: false
blockRepresentation: Median
"""
class Test_NeutronicsPlugin(TestPlugin):
plugin = neutronics.NeutronicsPlugin
def test_customSettingObjectIO(self):
"""Check specialized settings can build objects as values and write."""
cs = caseSettings.Settings()
yaml = YAML()
inp = yaml.load(io.StringIO(XS_EXAMPLE))
cs[CONF_CROSS_SECTION] = inp
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].geometry, "0D")
fname = "test_setting_obj_io_.yaml"
cs.writeToYamlFile(fname)
os.remove(fname)
def test_customSettingRoundTrip(self):
"""Check specialized settings can go back and forth."""
cs = caseSettings.Settings()
yaml = YAML()
inp = yaml.load(io.StringIO(XS_EXAMPLE))
cs[CONF_CROSS_SECTION] = inp
cs[CONF_CROSS_SECTION] = cs[CONF_CROSS_SECTION]
fname = "test_setting_obj_io_round.yaml"
cs.writeToYamlFile(fname)
os.remove(fname)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_6668 | import os
import re
import numpy as np
import csv
def write2csv(path):
# path='Planetoid_node_classification/results/result_GAT_pyg_Citeseer_GPU0_23h12m32s_on_Oct_28_2020.txt'
csv_file=open('results.csv','w',encoding='gbk',newline='')
csv_writer=csv.writer(csv_file)
csv_writer.writerow(['data','model','L','params','train','val','test','epoch'])
totals = []
for path in findAllFile(path):
print(path)
file=open(path)
iterf=iter(file)
for line in iterf:
a = line.find('Dataset:')
b = line.find('net_params={\'L\':')
c=line.find('Model:')
d=line.find('Total Parameters:')
e=line.find('TEST ACCURACY')
h = line.find('val ACCURACY')
f=line.find('TRAIN ACCURACY')
g=line.find(' Convergence Time (Epochs):')
# h=line.find('params={\'seed\':')
# print(g)
if a == 0:
dataset = line[line.index(':') + 2:line.index(',')]
if b == 0:
net = line[line.index(':') + 2:line.index(',')]
if c == 0:
model = line[line.index(':')+2:line.index('_')]
if d == 0:
Parameters = line[line.index(':')+2:line.index('\n')]
if e == 0:
TEST = line[line.index(':')+2:line.index('\n')]
if h == 0:
val = line[line.index(':')+2:line.index('\n')]
if f == 0:
TRAIN = line[line.index(':') + 2:line.index('\n')]
# if h == 0:
# seed = line[line.index(':') + 2:line.index(',')]
if g == 0:
Epochs = line[line.index(':') + 2:line.index('\n')]
totals.append([dataset, model, net, Parameters, TRAIN, val,TEST, Epochs])
# csv_writer.writerow([dataset, model, net, Parameters, TRAIN, TEST, Epochs])
break
totals.sort(key=lambda x: ((x[0]), (x[1]), int(x[2])), reverse=False)
out = []
calculate = []
for i in range(totals.__len__()):
out.append(totals[i])
csv_writer.writerow(out[i])
if (i+1)%4 == 0:
avg_train_acc = np.array(totals[i-3:i+1])[:,4]
avg_val_acc = np.array(totals[i-3:i+1])[:,5]
avg_test_acc = np.array(totals[i-3:i+1])[:,6]
# avg_test_acc [totals[i-4:i][0][4], totals[:4][1][4], totals[:4][2][4], totals[:4][3][4]]
avg_epoch = np.array(totals[i-3:i+1])[:,7]
train_acc=str(np.around(np.mean(np.array(avg_train_acc, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_train_acc, dtype=np.float32),ddof = 1),decimals=4))
val_acc = str(np.around(np.mean(np.array(avg_val_acc, dtype=np.float32)),decimals=4)) + '±' + str(np.around(np.std(np.array(avg_val_acc, dtype=np.float32), ddof=1),decimals=4))
test_acc= str(np.around(np.mean(np.array(avg_test_acc, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_test_acc, dtype=np.float32),ddof = 1),decimals=4))
Epochs_acc = str(np.around(np.mean(np.array(avg_epoch, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_epoch, dtype=np.float32),ddof = 1),decimals=4))
calculate.append([out[i-1][0], out[i-1][1], out[i-1][2], out[i-1][3], train_acc, val_acc ,test_acc, Epochs_acc])
csv_writer.writerow(calculate[int((i+1)/4-1)])
csv_file.close()
file.close()
def findAllFile(base):
for root, ds, fs in os.walk(base):
for f in fs:
if f.endswith('.txt'):
fullname = os.path.join(root, f)
yield fullname
def main():
# base = 'Planetoid_node_classification/results/'SBMs_node_classification
base = 'SBMs_node_classification/results/'
# for path in findAllFile(base):
# print(path)
np.set_printoptions(precision=4)
write2csv(base)
if __name__ == '__main__':
main() |
the-stack_0_6669 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,invalid-name
"""Bitserial conv2d schedule on arm cpu"""
from __future__ import absolute_import as _abs
import tvm
from tvm import autotvm
from .. import tag
from ..nn.pad import pad
from ..nn.bitserial_conv2d import bitpack, bitserial_conv2d_nhwc
from ..nn.util import get_pad_tuple
from ..util import get_const_int, get_const_tuple
from .. import generic
def _kernel_vec_spatial_pack_nhwc(kernel, kernel_bits, VC, use_bitpack=True):
if use_bitpack:
kernel_q = bitpack(kernel, kernel_bits, pack_axis=2, bit_axis=2, pack_type='uint8')
else:
kernel_q = kernel
KH, KW, KB, CI, CO = kernel_q.shape
kvshape = (CO//VC, KH, KW, KB, VC, CI)
return tvm.compute(kvshape, lambda co, dh, dw, b, vc, ci: \
kernel_q[dh][dw][b][ci][co*VC+vc], name='kernel_vec')
@autotvm.register_topi_compute(bitserial_conv2d_nhwc, 'arm_cpu', 'direct')
def spatial_pack_nhwc(cfg, data, kernel, stride, padding, activation_bits, weight_bits,
pack_dtype, out_dtype, unipolar):
""" Compute convolution with pack on spatial axes. """
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
assert pack_dtype == 'uint8', "only support packing into uint8 bits"
assert out_dtype == 'int16', "only support output type of int16"
N, H, W, CI = get_const_tuple(data.shape)
if len(kernel.shape) == 4:
KH, KW, _, CO = get_const_tuple(kernel.shape)
CI_packed = CI // 8
else:
KH, KW, KB, CI_packed, CO = get_const_tuple(kernel.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH-1, KW-1
PAD_H = H + (TPAD + DPAD)
PAD_W = W + (LPAD + RPAD)
OH = (PAD_H - KH) // HSTR + 1
OW = (PAD_W - KW) // WSTR + 1
oshape = (1, OH, OW, CO)
# Pad input channels of weights and data when it is not a multiple of 8
if CI_packed % 8 != 0:
CI_PAD = CI_packed % 8
CI_packed += CI_PAD
else:
CI_PAD = 0
# ==================== define configuration space ====================
n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO)
ci, kh, kw = cfg.reduce_axis(CI_packed), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(activation_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split('tile_co', co, policy='all', num_outputs=2,
filter=lambda x: x.size[-1] == 8)
oh, vh = cfg.define_split('tile_oh', oh, policy='all', num_outputs=2,
filter=lambda x: x.size[-1] >= 2)
ow, vw = cfg.define_split('tile_ow', ow, policy='all', num_outputs=2,
filter=lambda x: x.size[-1] >= 2)
ci_o, ci_i = cfg.define_split("tile_ci", ci, num_outputs=2,
filter=lambda x: x.size[-1] == 8 or x.size[-1] == 16)
re_axes = cfg.define_reorder("reorder_0",
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i],
policy='candidate', candidate=[
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i],
[n, oh, ow, co, vh, vw, kw, kh, ci_o, kb, ib, vc, ci_i],])
cfg.add_flop(2 * N * OH * OW * CO * CI * 8 * KH * KW) # these are actually binary ops
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
data_q = bitpack(data, activation_bits, pack_axis=3, bit_axis=3, pack_type='uint8')
kernel_vec = _kernel_vec_spatial_pack_nhwc(kernel, weight_bits, VC, len(kernel.shape) == 4)
if kernel_vec.shape[-1] % 8 != 0 and CI_PAD != 0:
kernel_vec = pad(kernel_vec, [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, CI_PAD])
N, H, W, IB, CI = data_q.shape
OCO, KH, KW, KB, VC, CI = kernel_vec.shape
dvshape = (N, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, IB, CI)
ovshape = (1, OH // VH, OW // VW, CO // VC, VH, VW, VC)
if (TPAD != 0 and RPAD != 0):
data_pad = pad(data_q, (0, TPAD, LPAD, 0, 0), (0, DPAD, RPAD, 0, CI_PAD), name="data_pad")
elif CI_PAD != 0:
data_pad = pad(data_q, (0, 0, 0, 0, 0), (0, 0, 0, 0, CI_PAD), name="data_pad")
else:
data_pad = data_q
data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, b, ci: \
data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][b][ci], name='data_vec')
ci = tvm.reduce_axis((0, CI), name='ci')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
ib = tvm.reduce_axis((0, IB), name='ib')
kb = tvm.reduce_axis((0, KB), name='kb')
def _bipolar_conv(n, h, w, co, vh, vw, vc):
return tvm.sum((tvm.popcount(
kernel_vec[co, dh, dw, kb, vc, ci].astype('uint16') &
data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ib, ci].astype('uint16'))
<< (kb + ib).astype('uint16')), axis=[dh, dw, kb, ib, ci])
def _unipolar_conv(n, h, w, co, vh, vw, vc):
return tvm.sum(
((tvm.popcount(kernel_vec[co, dh, dw, kb, vc, ci].astype('int16') &
data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ib, ci].astype('int16')) -
tvm.popcount(~kernel_vec[co, dh, dw, kb, vc, ci].astype('int16') &
data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ib, ci]).astype('int16'))
<< (kb + ib).astype('int16')), axis=[dh, dw, kb, ib, ci])
if unipolar:
conv_vec = tvm.compute(ovshape, _unipolar_conv, name='conv_vec', tag='unipolar')
else:
conv_vec = tvm.compute(ovshape, _bipolar_conv, name='conv_vec', tag='bipolar')
conv = tvm.compute(oshape, lambda n, h, w, co:
conv_vec[n][h//VH][w//VW][co//VC][h%VH][w%VW][co%VC].astype(out_dtype),
name='conv', tag='spatial_bitserial_conv_nhwc')
return conv
def _intrin_popcount(m, k_i, w_b, x_b, unipolar):
pack_dtype = 'uint8'
w = tvm.placeholder((w_b, m, k_i), dtype=pack_dtype, name='w')
x = tvm.placeholder((x_b, k_i,), dtype=pack_dtype, name='x')
k = tvm.reduce_axis((0, k_i), name='k')
bw = tvm.reduce_axis((0, w_b), name='bw')
bx = tvm.reduce_axis((0, x_b), name='bx')
if unipolar:
dtype = 'int16'
z = tvm.compute((m,), lambda i:
tvm.sum((tvm.popcount(w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype)) -
tvm.popcount(~w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype)))
<< (bw+bx).astype(dtype), axis=[bw, bx, k]), name='z')
else:
dtype = 'uint16'
z = tvm.compute((m,), lambda i:
tvm.sum(tvm.popcount(w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
<< (bw+bx).astype(dtype), axis=[bw, bx, k]), name='z')
Wb = tvm.decl_buffer(w.shape, w.dtype,
name="W",
offset_factor=k_i,
strides=[tvm.var('ldw'), tvm.var('ldw'), 1]) # stride can be inferred
Xb = tvm.decl_buffer(x.shape, x.dtype,
name="X",
offset_factor=k_i,
strides=[tvm.var('ldw'), 1])
Zb = tvm.decl_buffer(z.shape, z.dtype,
name="Z",
offset_factor=1,
strides=[1])
def _intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
args_1 = tvm.const(1, 'uint32')
args_2 = tvm.const(2, 'uint32')
if unipolar:
vpadd = "llvm.arm.neon.vpadd.v8i8"
vpadalu = "llvm.arm.neon.vpadals.v16i8.v8i16"
full_dtype = 'int8x16'
half_dtype = 'int8x8'
return_dtype = 'int16x8'
else:
vpadd = "llvm.arm.neon.vpadd.v8u8"
vpadalu = "llvm.arm.neon.vpadalu.v16u8.v8u16"
full_dtype = 'uint8x16'
half_dtype = 'uint8x8'
return_dtype = 'uint16x8'
def _instr(index):
irb = tvm.ir_builder.create()
if index == 1: # reduce reset
irb.emit(zz.vstore(0, tvm.const(0, return_dtype)))
return irb.get()
# body and reduce update
cnts8 = [None] * 8
cnts4 = [None] * 4
cnts2 = [None] * 2
for bw in range(w_b):
for bx in range(x_b):
if k_i == 16:
for i in range(m):
w_ = ww.vload([bw, i, 0], 'uint8x16').astype(full_dtype)
x_ = xx.vload([bx, 0], 'uint8x16').astype(full_dtype)
if unipolar:
cnts = tvm.popcount(w_ & x_) - tvm.popcount(~w_ & x_)
else:
cnts = tvm.popcount(w_ & x_)
upper_half = tvm.call_pure_intrin(half_dtype, 'vectorhigh', cnts)
lower_half = tvm.call_pure_intrin(half_dtype, 'vectorlow', cnts)
cnts8[i] = upper_half + lower_half
for i in range(m//2):
cnts4[i] = tvm.call_llvm_intrin(half_dtype, vpadd,
args_1, cnts8[i*2], cnts8[i*2+1])
for i in range(m//4):
cnts2[i] = tvm.call_llvm_intrin(half_dtype, vpadd,
args_1, cnts4[i*2], cnts4[i*2+1])
cnts = tvm.call_pure_intrin(full_dtype, 'vectorcombine', cnts2[0], cnts2[1])
shifted_cnts = cnts << tvm.const(bw+bx, pack_dtype)
out = tvm.call_llvm_intrin(return_dtype, vpadalu,
args_2, zz.vload(0, return_dtype), shifted_cnts)
else: # ki == 8
for i in range(m):
w_ = ww.vload([bw, i, 0], 'uint8x8').astype(half_dtype)
x_ = xx.vload([bx, 0], 'uint8x8').astype(half_dtype)
if unipolar:
cnts8[i] = tvm.popcount(w_ & x_) - tvm.popcount(~w_ & x_)
else:
cnts8[i] = tvm.popcount(w_ & x_)
for i in range(m//2):
cnts4[i] = tvm.call_llvm_intrin(half_dtype, vpadd,
args_1, cnts8[i*2], cnts8[i*2+1])
for i in range(m//4):
cnts2[i] = tvm.call_llvm_intrin(half_dtype, vpadd,
args_1, cnts4[i*2], cnts4[i*2+1])
cnts = tvm.call_pure_intrin(full_dtype, 'vectorcombine', cnts2[0], cnts2[1])
shifted_cnts = cnts << tvm.const(bw+bx, pack_dtype)
out = tvm.call_llvm_intrin(return_dtype, vpadalu,
args_2, zz.vload(0, return_dtype), shifted_cnts)
irb.emit(zz.vstore(0, out))
return irb.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
with tvm.build_config(offset_factor=1, partition_const_loop=True):
return tvm.decl_tensor_intrin(z.op, _intrin_func, binds={w: Wb, x:Xb, z:Zb})
# ARM specific schedule that using custom microkernel
def _schedule_spatial_conv2d_nhwc(cfg, s, data_pad, data_vec, kernel_vec,
conv_out, output, last, unipolar):
_, _, _, _, _, IB, CI = data_vec.shape
_, KH, KW, KB, _, _ = kernel_vec.shape
KB = get_const_int(KB)
IB = get_const_int(IB)
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule data padding and packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), policy="all", num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
s[data_vec].parallel(oh)
#### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), policy="all", num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
s[kernel_vec].parallel(oco)
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
kh, kw, kb, ib, ci = s[conv_out].op.reduce_axis
ci_o, ci_i = cfg['tile_ci'].apply(s, conv_out, ci)
re_axes = cfg["reorder_0"].apply(s, conv_out,
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i])
# Use microkernel
kfactor = cfg['tile_ci'].size[1]
if kfactor % 8 == 0:
pc = _intrin_popcount(VC, kfactor, KB, IB, unipolar)
s[conv_out].tensorize(kb, pc)
n, h, w, co = s[last].op.axis
co, vc = cfg['tile_co'].apply(s, last, co)
oh, vh = cfg['tile_oh'].apply(s, last, h)
ow, vw = cfg['tile_ow'].apply(s, last, w)
s[last].reorder(n, oh, ow, co, vh, vw, vc)
s[last].vectorize(vc)
if last != output:
s[last].compute_inline()
s[conv_out].compute_at(s[last], co)
s[last].parallel(oh)
s = s.normalize()
return s
@autotvm.register_topi_schedule(generic.nn.schedule_bitserial_conv2d_nhwc, 'arm_cpu', 'direct')
def schedule_bitserial_conv2d_nhwc(cfg, outs):
"""Arm cpu schedule for bitserial conv2d"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'spatial_bitserial_conv_nhwc' in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[0]
kernel_q = kernel_vec.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[1]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, tvm.tensor.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data.op.input_tensors[0]
unipolar = "unipolar" in conv_out.op.tag
_schedule_spatial_conv2d_nhwc(cfg, s, data_pad, data_vec, kernel_vec,
conv_out, output, outs[0], unipolar)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
|
the-stack_0_6670 | import copy
from math import floor
from Objects.Object import Object
class Repeater(Object):
def __init__(self, isVisible, position, content, pixellength, numRepeats=-1, spacing=0):
super().__init__(isVisible, position, content)
self.numRepeats = numRepeats
self.spacing = spacing
self.pixellength = pixellength
def getContent(self):
max_reps = floor(self.pixellength / (len(self.content) + self.spacing))
reps = max_reps if self.numRepeats == -1 else min(self.numRepeats, max_reps)
full = copy.deepcopy(self.content)
full.extend([[-1,-1,-1]]*self.spacing)
return full * reps
|
the-stack_0_6671 | import adv_test
import adv
from slot.d import *
def module():
return Curran
class Curran(adv.Adv):
comment = "no fs"
a1 = ('od',0.13)
a3 = ('lo',0.5)
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1
`s2, seq=2
`s3
"""
conf['slot.d'] = Shinobi()
adv_test.test(module(), conf, verbose=-2)
|
the-stack_0_6672 | def _apply_entities(text, entities, escape_map, format_map):
def inside_entities(i):
return any(map(lambda e:
e['offset'] <= i < e['offset']+e['length'],
entities))
# Split string into char sequence and escape in-place to
# preserve index positions.
seq = list(map(lambda c,i:
escape_map[c] # escape special characters
if c in escape_map and not inside_entities(i)
else c,
list(text), # split string to char sequence
range(0,len(text)))) # along with each char's index
# Ensure smaller offsets come first
sorted_entities = sorted(entities, key=lambda e: e['offset'])
offset = 0
result = ''
for e in sorted_entities:
f,n,t = e['offset'], e['length'], e['type']
result += ''.join(seq[offset:f])
if t in format_map:
# apply format
result += format_map[t](''.join(seq[f:f+n]), e)
else:
result += ''.join(seq[f:f+n])
offset = f + n
result += ''.join(seq[offset:])
return result
def apply_entities_as_markdown(text, entities):
"""
Format text as Markdown. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'*': '\\*',
'_': '\\_',
'[': '\\[',
'`': '\\`',}
formatters = {'bold': lambda s,e: '*'+s+'*',
'italic': lambda s,e: '_'+s+'_',
'text_link': lambda s,e: '['+s+']('+e['url']+')',
'text_mention': lambda s,e: '['+s+'](tg://user?id='+str(e['user']['id'])+')',
'code': lambda s,e: '`'+s+'`',
'pre': lambda s,e: '```text\n'+s+'```'}
return _apply_entities(text, entities, escapes, formatters)
def apply_entities_as_html(text, entities):
"""
Format text as HTML. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'<': '<',
'>': '>',
'&': '&',}
formatters = {'bold': lambda s,e: '<b>'+s+'</b>',
'italic': lambda s,e: '<i>'+s+'</i>',
'text_link': lambda s,e: '<a href="'+e['url']+'">'+s+'</a>',
'text_mention': lambda s,e: '<a href="tg://user?id='+str(e['user']['id'])+'">'+s+'</a>',
'code': lambda s,e: '<code>'+s+'</code>',
'pre': lambda s,e: '<pre>'+s+'</pre>'}
return _apply_entities(text, entities, escapes, formatters)
|
the-stack_0_6673 | """Support for (EMEA/EU-based) Honeywell TCC climate systems.
Such systems include evohome, Round Thermostat, and others.
"""
from datetime import datetime as dt, timedelta
import logging
import re
from typing import Any, Dict, Optional, Tuple
import aiohttp.client_exceptions
import evohomeasync
import evohomeasync2
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
HTTP_SERVICE_UNAVAILABLE,
HTTP_TOO_MANY_REQUESTS,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.service import verify_domain_control
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from .const import DOMAIN, EVO_FOLLOW, GWS, STORAGE_KEY, STORAGE_VERSION, TCS
_LOGGER = logging.getLogger(__name__)
ACCESS_TOKEN = "access_token"
ACCESS_TOKEN_EXPIRES = "access_token_expires"
REFRESH_TOKEN = "refresh_token"
USER_DATA = "user_data"
CONF_LOCATION_IDX = "location_idx"
SCAN_INTERVAL_DEFAULT = timedelta(seconds=300)
SCAN_INTERVAL_MINIMUM = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_LOCATION_IDX, default=0): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DEFAULT
): vol.All(cv.time_period, vol.Range(min=SCAN_INTERVAL_MINIMUM)),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_SYSTEM_MODE = "mode"
ATTR_DURATION_DAYS = "period"
ATTR_DURATION_HOURS = "duration"
ATTR_ZONE_TEMP = "setpoint"
ATTR_DURATION_UNTIL = "duration"
SVC_REFRESH_SYSTEM = "refresh_system"
SVC_SET_SYSTEM_MODE = "set_system_mode"
SVC_RESET_SYSTEM = "reset_system"
SVC_SET_ZONE_OVERRIDE = "set_zone_override"
SVC_RESET_ZONE_OVERRIDE = "clear_zone_override"
RESET_ZONE_OVERRIDE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id})
SET_ZONE_OVERRIDE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_ZONE_TEMP): vol.All(
vol.Coerce(float), vol.Range(min=4.0, max=35.0)
),
vol.Optional(ATTR_DURATION_UNTIL): vol.All(
cv.time_period, vol.Range(min=timedelta(days=0), max=timedelta(days=1)),
),
}
)
# system mode schemas are built dynamically, below
def _local_dt_to_aware(dt_naive: dt) -> dt:
dt_aware = dt_util.now() + (dt_naive - dt.now())
if dt_aware.microsecond >= 500000:
dt_aware += timedelta(seconds=1)
return dt_aware.replace(microsecond=0)
def _dt_to_local_naive(dt_aware: dt) -> dt:
dt_naive = dt.now() + (dt_aware - dt_util.now())
if dt_naive.microsecond >= 500000:
dt_naive += timedelta(seconds=1)
return dt_naive.replace(microsecond=0)
def convert_until(status_dict, until_key) -> str:
"""Convert datetime string from "%Y-%m-%dT%H:%M:%SZ" to local/aware/isoformat."""
if until_key in status_dict: # only present for certain modes
dt_utc_naive = dt_util.parse_datetime(status_dict[until_key])
status_dict[until_key] = dt_util.as_local(dt_utc_naive).isoformat()
def convert_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively convert a dict's keys to snake_case."""
def convert_key(key: str) -> str:
"""Convert a string to snake_case."""
string = re.sub(r"[\-\.\s]", "_", str(key))
return (string[0]).lower() + re.sub(
r"[A-Z]", lambda matched: "_" + matched.group(0).lower(), string[1:]
)
return {
(convert_key(k) if isinstance(k, str) else k): (
convert_dict(v) if isinstance(v, dict) else v
)
for k, v in dictionary.items()
}
def _handle_exception(err) -> bool:
"""Return False if the exception can't be ignored."""
try:
raise err
except evohomeasync2.AuthenticationError:
_LOGGER.error(
"Failed to authenticate with the vendor's server. "
"Check your network and the vendor's service status page. "
"Also check that your username and password are correct. "
"Message is: %s",
err,
)
return False
except aiohttp.ClientConnectionError:
# this appears to be a common occurrence with the vendor's servers
_LOGGER.warning(
"Unable to connect with the vendor's server. "
"Check your network and the vendor's service status page. "
"Message is: %s",
err,
)
return False
except aiohttp.ClientResponseError:
if err.status == HTTP_SERVICE_UNAVAILABLE:
_LOGGER.warning(
"The vendor says their server is currently unavailable. "
"Check the vendor's service status page."
)
return False
if err.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"The vendor's API rate limit has been exceeded. "
"If this message persists, consider increasing the %s.",
CONF_SCAN_INTERVAL,
)
return False
raise # we don't expect/handle any other Exceptions
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Create a (EMEA/EU-based) Honeywell TCC system."""
async def load_auth_tokens(store) -> Tuple[Dict, Optional[Dict]]:
app_storage = await store.async_load()
tokens = dict(app_storage if app_storage else {})
if tokens.pop(CONF_USERNAME, None) != config[DOMAIN][CONF_USERNAME]:
# any tokens won't be valid, and store might be be corrupt
await store.async_save({})
return ({}, None)
# evohomeasync2 requires naive/local datetimes as strings
if tokens.get(ACCESS_TOKEN_EXPIRES) is not None:
tokens[ACCESS_TOKEN_EXPIRES] = _dt_to_local_naive(
dt_util.parse_datetime(tokens[ACCESS_TOKEN_EXPIRES])
)
user_data = tokens.pop(USER_DATA, None)
return (tokens, user_data)
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
tokens, user_data = await load_auth_tokens(store)
client_v2 = evohomeasync2.EvohomeClient(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
**tokens,
session=async_get_clientsession(hass),
)
try:
await client_v2.login()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
return False
finally:
config[DOMAIN][CONF_PASSWORD] = "REDACTED"
loc_idx = config[DOMAIN][CONF_LOCATION_IDX]
try:
loc_config = client_v2.installation_info[loc_idx][GWS][0][TCS][0]
except IndexError:
_LOGGER.error(
"Config error: '%s' = %s, but the valid range is 0-%s. "
"Unable to continue. Fix any configuration errors and restart HA.",
CONF_LOCATION_IDX,
loc_idx,
len(client_v2.installation_info) - 1,
)
return False
_LOGGER.debug("Config = %s", loc_config)
client_v1 = evohomeasync.EvohomeClient(
client_v2.username,
client_v2.password,
user_data=user_data,
session=async_get_clientsession(hass),
)
hass.data[DOMAIN] = {}
hass.data[DOMAIN]["broker"] = broker = EvoBroker(
hass, client_v2, client_v1, store, config[DOMAIN]
)
await broker.save_auth_tokens()
await broker.async_update() # get initial state
hass.async_create_task(async_load_platform(hass, "climate", DOMAIN, {}, config))
if broker.tcs.hotwater:
hass.async_create_task(
async_load_platform(hass, "water_heater", DOMAIN, {}, config)
)
hass.helpers.event.async_track_time_interval(
broker.async_update, config[DOMAIN][CONF_SCAN_INTERVAL]
)
setup_service_functions(hass, broker)
return True
@callback
def setup_service_functions(hass: HomeAssistantType, broker):
"""Set up the service handlers for the system/zone operating modes.
Not all Honeywell TCC-compatible systems support all operating modes. In addition,
each mode will require any of four distinct service schemas. This has to be
enumerated before registering the appropriate handlers.
It appears that all TCC-compatible systems support the same three zones modes.
"""
@verify_domain_control(hass, DOMAIN)
async def force_refresh(call) -> None:
"""Obtain the latest state data via the vendor's RESTful API."""
await broker.async_update()
@verify_domain_control(hass, DOMAIN)
async def set_system_mode(call) -> None:
"""Set the system mode."""
payload = {
"unique_id": broker.tcs.systemId,
"service": call.service,
"data": call.data,
}
async_dispatcher_send(hass, DOMAIN, payload)
@verify_domain_control(hass, DOMAIN)
async def set_zone_override(call) -> None:
"""Set the zone override (setpoint)."""
entity_id = call.data[ATTR_ENTITY_ID]
registry = await hass.helpers.entity_registry.async_get_registry()
registry_entry = registry.async_get(entity_id)
if registry_entry is None or registry_entry.platform != DOMAIN:
raise ValueError(f"'{entity_id}' is not a known {DOMAIN} entity")
if registry_entry.domain != "climate":
raise ValueError(f"'{entity_id}' is not an {DOMAIN} controller/zone")
payload = {
"unique_id": registry_entry.unique_id,
"service": call.service,
"data": call.data,
}
async_dispatcher_send(hass, DOMAIN, payload)
hass.services.async_register(DOMAIN, SVC_REFRESH_SYSTEM, force_refresh)
# Enumerate which operating modes are supported by this system
modes = broker.config["allowedSystemModes"]
# Not all systems support "AutoWithReset": register this handler only if required
if [m["systemMode"] for m in modes if m["systemMode"] == "AutoWithReset"]:
hass.services.async_register(DOMAIN, SVC_RESET_SYSTEM, set_system_mode)
system_mode_schemas = []
modes = [m for m in modes if m["systemMode"] != "AutoWithReset"]
# Permanent-only modes will use this schema
perm_modes = [m["systemMode"] for m in modes if not m["canBeTemporary"]]
if perm_modes: # any of: "Auto", "HeatingOff": permanent only
schema = vol.Schema({vol.Required(ATTR_SYSTEM_MODE): vol.In(perm_modes)})
system_mode_schemas.append(schema)
modes = [m for m in modes if m["canBeTemporary"]]
# These modes are set for a number of hours (or indefinitely): use this schema
temp_modes = [m["systemMode"] for m in modes if m["timingMode"] == "Duration"]
if temp_modes: # any of: "AutoWithEco", permanent or for 0-24 hours
schema = vol.Schema(
{
vol.Required(ATTR_SYSTEM_MODE): vol.In(temp_modes),
vol.Optional(ATTR_DURATION_HOURS): vol.All(
cv.time_period,
vol.Range(min=timedelta(hours=0), max=timedelta(hours=24)),
),
}
)
system_mode_schemas.append(schema)
# These modes are set for a number of days (or indefinitely): use this schema
temp_modes = [m["systemMode"] for m in modes if m["timingMode"] == "Period"]
if temp_modes: # any of: "Away", "Custom", "DayOff", permanent or for 1-99 days
schema = vol.Schema(
{
vol.Required(ATTR_SYSTEM_MODE): vol.In(temp_modes),
vol.Optional(ATTR_DURATION_DAYS): vol.All(
cv.time_period,
vol.Range(min=timedelta(days=1), max=timedelta(days=99)),
),
}
)
system_mode_schemas.append(schema)
if system_mode_schemas:
hass.services.async_register(
DOMAIN,
SVC_SET_SYSTEM_MODE,
set_system_mode,
schema=vol.Any(*system_mode_schemas),
)
# The zone modes are consistent across all systems and use the same schema
hass.services.async_register(
DOMAIN,
SVC_RESET_ZONE_OVERRIDE,
set_zone_override,
schema=RESET_ZONE_OVERRIDE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SVC_SET_ZONE_OVERRIDE,
set_zone_override,
schema=SET_ZONE_OVERRIDE_SCHEMA,
)
class EvoBroker:
"""Container for evohome client and data."""
def __init__(self, hass, client, client_v1, store, params) -> None:
"""Initialize the evohome client and its data structure."""
self.hass = hass
self.client = client
self.client_v1 = client_v1
self._store = store
self.params = params
loc_idx = params[CONF_LOCATION_IDX]
self.config = client.installation_info[loc_idx][GWS][0][TCS][0]
self.tcs = client.locations[loc_idx]._gateways[0]._control_systems[0]
self.temps = {}
async def save_auth_tokens(self) -> None:
"""Save access tokens and session IDs to the store for later use."""
# evohomeasync2 uses naive/local datetimes
access_token_expires = _local_dt_to_aware(self.client.access_token_expires)
app_storage = {CONF_USERNAME: self.client.username}
app_storage[REFRESH_TOKEN] = self.client.refresh_token
app_storage[ACCESS_TOKEN] = self.client.access_token
app_storage[ACCESS_TOKEN_EXPIRES] = access_token_expires.isoformat()
if self.client_v1 and self.client_v1.user_data:
app_storage[USER_DATA] = {
"userInfo": {"userID": self.client_v1.user_data["userInfo"]["userID"]},
"sessionId": self.client_v1.user_data["sessionId"],
}
else:
app_storage[USER_DATA] = None
await self._store.async_save(app_storage)
async def call_client_api(self, api_function, refresh=True) -> Any:
"""Call a client API."""
try:
result = await api_function
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
if not _handle_exception(err):
return
if refresh:
self.hass.helpers.event.async_call_later(1, self.async_update())
return result
async def _update_v1(self, *args, **kwargs) -> None:
"""Get the latest high-precision temperatures of the default Location."""
def get_session_id(client_v1) -> Optional[str]:
user_data = client_v1.user_data if client_v1 else None
return user_data.get("sessionId") if user_data else None
session_id = get_session_id(self.client_v1)
try:
temps = list(await self.client_v1.temperatures(force_refresh=True))
except aiohttp.ClientError as err:
_LOGGER.warning(
"Unable to obtain the latest high-precision temperatures. "
"Check your network and the vendor's service status page. "
"Proceeding with low-precision temperatures. "
"Message is: %s",
err,
)
self.temps = None # these are now stale, will fall back to v2 temps
else:
if (
str(self.client_v1.location_id)
!= self.client.locations[self.params[CONF_LOCATION_IDX]].locationId
):
_LOGGER.warning(
"The v2 API's configured location doesn't match "
"the v1 API's default location (there is more than one location), "
"so the high-precision feature will be disabled"
)
self.client_v1 = self.temps = None
else:
self.temps = {str(i["id"]): i["temp"] for i in temps}
_LOGGER.debug("Temperatures = %s", self.temps)
if session_id != get_session_id(self.client_v1):
await self.save_auth_tokens()
async def _update_v2(self, *args, **kwargs) -> None:
"""Get the latest modes, temperatures, setpoints of a Location."""
access_token = self.client.access_token
loc_idx = self.params[CONF_LOCATION_IDX]
try:
status = await self.client.locations[loc_idx].status()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
else:
async_dispatcher_send(self.hass, DOMAIN)
_LOGGER.debug("Status = %s", status[GWS][0][TCS][0])
if access_token != self.client.access_token:
await self.save_auth_tokens()
async def async_update(self, *args, **kwargs) -> None:
"""Get the latest state data of an entire Honeywell TCC Location.
This includes state data for a Controller and all its child devices, such as the
operating mode of the Controller and the current temp of its children (e.g.
Zones, DHW controller).
"""
await self._update_v2()
if self.client_v1:
await self._update_v1()
# inform the evohome devices that state data has been updated
async_dispatcher_send(self.hass, DOMAIN)
class EvoDevice(Entity):
"""Base for any evohome device.
This includes the Controller, (up to 12) Heating Zones and (optionally) a
DHW controller.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize the evohome entity."""
self._evo_device = evo_device
self._evo_broker = evo_broker
self._evo_tcs = evo_broker.tcs
self._unique_id = self._name = self._icon = self._precision = None
self._supported_features = None
self._device_state_attrs = {}
async def async_refresh(self, payload: Optional[dict] = None) -> None:
"""Process any signals."""
if payload is None:
self.async_schedule_update_ha_state(force_refresh=True)
return
if payload["unique_id"] != self._unique_id:
return
if payload["service"] in [SVC_SET_ZONE_OVERRIDE, SVC_RESET_ZONE_OVERRIDE]:
await self.async_zone_svc_request(payload["service"], payload["data"])
return
await self.async_tcs_svc_request(payload["service"], payload["data"])
async def async_tcs_svc_request(self, service: dict, data: dict) -> None:
"""Process a service request (system mode) for a controller."""
raise NotImplementedError
async def async_zone_svc_request(self, service: dict, data: dict) -> None:
"""Process a service request (setpoint override) for a zone."""
raise NotImplementedError
@property
def should_poll(self) -> bool:
"""Evohome entities should not be polled."""
return False
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the evohome entity."""
return self._name
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the evohome-specific state attributes."""
status = self._device_state_attrs
if "systemModeStatus" in status:
convert_until(status["systemModeStatus"], "timeUntil")
if "setpointStatus" in status:
convert_until(status["setpointStatus"], "until")
if "stateStatus" in status:
convert_until(status["stateStatus"], "until")
return {"status": convert_dict(status)}
@property
def icon(self) -> str:
"""Return the icon to use in the frontend UI."""
return self._icon
@property
def supported_features(self) -> int:
"""Get the flag of supported features of the device."""
return self._supported_features
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
async_dispatcher_connect(self.hass, DOMAIN, self.async_refresh)
@property
def precision(self) -> float:
"""Return the temperature precision to use in the frontend UI."""
return self._precision
@property
def temperature_unit(self) -> str:
"""Return the temperature unit to use in the frontend UI."""
return TEMP_CELSIUS
class EvoChild(EvoDevice):
"""Base for any evohome child.
This includes (up to 12) Heating Zones and (optionally) a DHW controller.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize a evohome Controller (hub)."""
super().__init__(evo_broker, evo_device)
self._schedule = {}
self._setpoints = {}
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature of a Zone."""
if not self._evo_device.temperatureStatus["isAvailable"]:
return None
if self._evo_broker.temps:
return self._evo_broker.temps[self._evo_device.zoneId]
return self._evo_device.temperatureStatus["temperature"]
@property
def setpoints(self) -> Dict[str, Any]:
"""Return the current/next setpoints from the schedule.
Only Zones & DHW controllers (but not the TCS) can have schedules.
"""
if not self._schedule["DailySchedules"]:
return {} # no schedule {'DailySchedules': []}, so no scheduled setpoints
day_time = dt_util.now()
day_of_week = int(day_time.strftime("%w")) # 0 is Sunday
time_of_day = day_time.strftime("%H:%M:%S")
try:
# Iterate today's switchpoints until past the current time of day...
day = self._schedule["DailySchedules"][day_of_week]
sp_idx = -1 # last switchpoint of the day before
for i, tmp in enumerate(day["Switchpoints"]):
if time_of_day > tmp["TimeOfDay"]:
sp_idx = i # current setpoint
else:
break
# Did the current SP start yesterday? Does the next start SP tomorrow?
this_sp_day = -1 if sp_idx == -1 else 0
next_sp_day = 1 if sp_idx + 1 == len(day["Switchpoints"]) else 0
for key, offset, idx in [
("this", this_sp_day, sp_idx),
("next", next_sp_day, (sp_idx + 1) * (1 - next_sp_day)),
]:
sp_date = (day_time + timedelta(days=offset)).strftime("%Y-%m-%d")
day = self._schedule["DailySchedules"][(day_of_week + offset) % 7]
switchpoint = day["Switchpoints"][idx]
dt_local_aware = _local_dt_to_aware(
dt_util.parse_datetime(f"{sp_date}T{switchpoint['TimeOfDay']}")
)
self._setpoints[f"{key}_sp_from"] = dt_local_aware.isoformat()
try:
self._setpoints[f"{key}_sp_temp"] = switchpoint["heatSetpoint"]
except KeyError:
self._setpoints[f"{key}_sp_state"] = switchpoint["DhwState"]
except IndexError:
self._setpoints = {}
_LOGGER.warning(
"Failed to get setpoints, report as an issue if this error persists",
exc_info=True,
)
return self._setpoints
async def _update_schedule(self) -> None:
"""Get the latest schedule, if any."""
if "DailySchedules" in self._schedule and not self._schedule["DailySchedules"]:
if not self._evo_device.setpointStatus["setpointMode"] == EVO_FOLLOW:
return # avoid unnecessary I/O - there's nothing to update
self._schedule = await self._evo_broker.call_client_api(
self._evo_device.schedule(), refresh=False
)
_LOGGER.debug("Schedule['%s'] = %s", self.name, self._schedule)
async def async_update(self) -> None:
"""Get the latest state data."""
next_sp_from = self._setpoints.get("next_sp_from", "2000-01-01T00:00:00+00:00")
if dt_util.now() >= dt_util.parse_datetime(next_sp_from):
await self._update_schedule() # no schedule, or it's out-of-date
self._device_state_attrs = {"setpoints": self.setpoints}
|
the-stack_0_6674 | import os
import genapi
import numpy_api
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
h_template = r"""
#ifdef _UMATHMODULE
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
#else
NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
#endif
%s
#else
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
extern void **PyUFunc_API;
#else
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
void **PyUFunc_API;
#else
static void **PyUFunc_API=NULL;
#endif
#endif
%s
static int
_import_umath(void)
{
PyObject *numpy = PyImport_ImportModule("numpy.core.umath");
PyObject *c_api = NULL;
if (numpy == NULL) return -1;
c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
if (c_api == NULL) {Py_DECREF(numpy); return -1;}
if (PyCObject_Check(c_api)) {
PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api);
}
Py_DECREF(c_api);
Py_DECREF(numpy);
if (PyUFunc_API == NULL) return -1;
return 0;
}
#define import_umath() { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); return; }}
#define import_umath1(ret) { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); return ret; }}
#define import_umath2(msg, ret) { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; }}
#define import_ufunc() { UFUNC_NOFPE if (_import_umath() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); }}
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyUFunc_API[] = {
%s
};
"""
def generate_api(output_dir, force=False):
basename = 'ufunc_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = ['ufunc_api_order.txt']
if (not force and not genapi.should_rebuild(targets, sources + [__file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
ufunc_api_index = genapi.merge_api_dicts((
numpy_api.ufunc_funcs_api,
numpy_api.ufunc_types_api))
genapi.check_api_dict(ufunc_api_index)
ufunc_api_list = genapi.get_api_functions('UFUNC_API', numpy_api.ufunc_funcs_api)
# Create dict name -> *Api instance
ufunc_api_dict = {}
api_name = 'PyUFunc_API'
for f in ufunc_api_list:
name = f.name
index = ufunc_api_index[name]
ufunc_api_dict[name] = FunctionApi(f.name, index, f.return_type,
f.args, api_name)
for name, index in numpy_api.ufunc_types_api.items():
ufunc_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
# set up object API
module_list = []
extension_list = []
init_list = []
for name, index in genapi.order_dict(ufunc_api_index):
api_item = ufunc_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# Write to documentation
fid = open(doc_file, 'w')
fid.write('''
=================
Numpy Ufunc C-API
=================
''')
for func in ufunc_api_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
|
the-stack_0_6677 | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
# Wonbin Jeong
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse as parseu
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
scheme = data.split("\r\n")[0]
code = scheme.split()[1]
return int(code)
def get_headers(self,data):
return None
def get_body(self, data):
body = data.split("\r\n\r\n")[1]
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
parse_result = parseu.urlparse(url)
# print(parse_result)
host = parse_result.netloc.split(":")[0]
path = parse_result.path
if path == "":
path = "/"
port = parse_result.port
if port == None and parse_result.scheme == "https":
port = 443
if port == None and parse_result.scheme == "http":
port = 80
# print("Port {}\n Path {}\n".format(port, path))
self.connect(host, port)
req_header = "GET {} HTTP/1.1\r\n".format(path)
req_header += "Host: {}\r\n".format(host)
req_header += "Accept: */*\r\n"
req_header += "Connection: close\r\n\r\n"
self.sendall(req_header)
response = self.recvall(self.socket)
# print(response)
self.close()
code = self.get_code(response)
body = self.get_body(response)
print(code)
print(body)
return HTTPResponse(code, body)
def POST(self, url, args=None):
code = 500
body = ""
parse_result = parseu.urlparse(url)
# print(parse_result)
host = parse_result.netloc.split(":")[0]
path = parse_result.path
port = parse_result.port
if port == None and parse_result.scheme == "https":
port = 443
if port == None and parse_result.scheme == "http":
port = 80
# print("Port {}\n Path {}\n".format(port, path))
self.connect(host, port)
if args == None:
args = parseu.urlencode("")
else:
args = parseu.urlencode(args)
req_header = "POST {} HTTP/1.1\r\n".format(path)
req_header += "Host: {}\r\n".format(host)
req_header += "Content-Type: application/x-www-form-urlencoded\r\n"
req_header += "Content-Length: {}\r\n".format(len(args))
req_header += "Connection: close\r\n\r\n"
req_header += args
self.sendall(req_header)
response = self.recvall(self.socket)
self.close()
code = self.get_code(response)
body = self.get_body(response)
print(code)
print(body)
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
|
the-stack_0_6679 | from django.urls import path
from django.contrib.auth import views as views_auth
from . import views_profile
app_name = 'profiles'
urlpatterns = [
path('ورود/',
views_auth.LoginView.as_view(template_name='profiles/login.html'),
name='ورود'),
path('خروج/', views_auth.LogoutView.as_view(), name='خروج'),
path('ثبت/', views_profile.SignUp.as_view(), name='ثبت'),
]
|
the-stack_0_6682 | # This is basically a copy-paste from https://github.com/facebookresearch/detr/blob/master/models/matcher.py
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import sys
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
sys.path.append('./util')
from seg_ops import seg_bxw_to_cxw, seg_cxw_to_x0x1, generalized_seg_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bsegment: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bseg: This is the relative weight of the L1 error of the bounding segment coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding segment in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bsegment = cost_bsegment
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bseg != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_segments": Tensor of dim [batch_size, num_queries, 2] with the predicted segment coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_segments] (where num_target_segments is the number of ground-truth
pulses in the target) containing the class labels
"segments": Tensor of dim [num_target_segments, 2] containing the target segments coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_segments)
"""
batch_size, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bsegment = outputs["pred_segments"].flatten(0, 1) # [batch_size * num_queries, 2]
# Also concat the target labels and segments
tgt_ids = torch.cat([v["labels"] for v in targets]).long()
# [num_target_segments_(1) + num_target_segments_(2) + ... + num_target_segments_(batch_size)]
tgt_bsegment = torch.cat([v["segments"] for v in targets])
# [num_target_segments_(1) + num_target_segments_(2) + ... + num_target_segments_(batch_size), 2]
# Compute the classification cost. Contrary to the loss, we don't use the Negative Log-Likelihood (NLL),
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between segments
cost_bsegment = torch.cdist(out_bsegment, tgt_bsegment, p=1)
# Compute the giou cost betwen segments
cost_giou = -generalized_seg_iou(seg_cxw_to_x0x1(seg_bxw_to_cxw(out_bsegment)), seg_cxw_to_x0x1(seg_bxw_to_cxw(tgt_bsegment)))
# Final cost matrix
C = self.cost_bsegment * cost_bsegment + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(batch_size, num_queries, -1).cpu()
sizes = [len(v['segments']) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bsegment=args.set_cost_bsegment, cost_giou=args.set_cost_giou)
|
the-stack_0_6683 | import os
import sys
import click
from zipfile import ZipFile, ZIP_DEFLATED
import pathlib
import hashlib
import re
from loguetools import og, xd, common
from loguetools import version
XD_PATCH_LENGTH = 1024
def explode(filename, match_name, match_ident, prepend_id, append_md5_4, append_version, unskip_init):
"""Explode a minilogue og or xd or prologue program bank or extract a program.
\b
Examples
--------
explode xd_program_bank.mnlgxdlib
explode -n XDProgName xd_program_bank.mnlgxdlib
"""
zipobj = ZipFile(filename, "r", compression=ZIP_DEFLATED, compresslevel=9)
proglist = common.zipread_progbins(zipobj)
proginfo_dict = common.zipread_all_prog_info(zipobj)
if match_name is not None:
match_ident = common.id_from_name(zipobj, match_name)
if match_ident is not None:
proglist = [proglist[match_ident - 1]]
# Create directory based on the filename stem
input_file = pathlib.Path(filename)
dir_path = input_file.with_suffix("")
dir_path.mkdir(exist_ok=True)
if input_file.suffix in {".mnlgxdpreset", ".mnlgxdlib"}:
suffix = ".mnlgxdprog"
flavour = "xd"
elif input_file.suffix in {".mnlgpreset", ".mnlglib"}:
suffix = ".mnlgprog"
flavour = "og"
elif input_file.suffix in {".prlgpreset", ".prlglib"}:
suffix = ".prlgprog"
flavour = "prologue"
elif input_file.suffix in {".molgpreset", ".molglib"}:
suffix = ".molgprog"
flavour = "monologue"
elif input_file.suffix in {".kklib"}:
suffix = ".kkprog"
flavour = "kk"
fileinfo_xml = common.fileinfo_xml(flavour, [0], False)
# Read any copyright and author information if available
copyright = None
author = None
comment = None
if input_file.suffix in {".mnlgxdpreset", ".mnlgpreset", ".prlgpreset", ".molgpreset"}:
author, copyright = common.author_copyright_from_presetinformation_xml(zipobj)
sanitise = common.sanitise_patchname()
for i, p in enumerate(proglist):
patchdata = zipobj.read(p)
hash = hashlib.md5(patchdata).hexdigest()
flavour = common.patch_type(patchdata)
if common.is_init_patch(flavour, hash):
# Init Program identified based on hash; i.e. a "True" Init Program
continue
prgname = common.program_name(patchdata, flavour)
if common.is_init_program_name(prgname) and not unskip_init:
# Init Program found and option not to skip is unchecked
continue
if prepend_id:
prgname = f"{i+1:03d}_{prgname}"
if append_md5_4:
hash = hashlib.md5(patchdata).hexdigest()
prgname = f"{prgname}-{hash[:4]}"
if append_version:
ver = version.__version__.replace(".", "")
prgname = f"{prgname}-v{ver}"
output_path = (dir_path / (sanitise(prgname) + suffix))
with ZipFile(output_path, "w") as zip:
binary = zipobj.read(p)
# .prog_bin record/file
zip.writestr(f"Prog_000.prog_bin", binary)
# .prog_info record/file
# Use any available presetinformation_xml author and copyright fields
if author is not None:
comment = f"Author: {author}"
proginfo_comment = (proginfo_dict[p])['Comment']
if proginfo_comment is not None:
comment = f"{comment}, " + proginfo_comment
prog_info_template = common.prog_info_template_xml(flavour, comment=comment, copyright=copyright)
zip.writestr(f"Prog_000.prog_info", prog_info_template)
# FileInformation.xml record/file
zip.writestr(f"FileInformation.xml", fileinfo_xml, False)
print(f"{int(p[5:8])+1:03d}: {prgname:<12s} -> {output_path}")
@click.command()
@click.argument("filename", type=click.Path(exists=True))
@click.option("--match_name", "-n", help="Dump the patch with name NAME")
@click.option("--match_ident", "-i", type=int, help="Dump the patch with ident ID")
@click.option("--prepend_id", "-p", is_flag=True, help="Prepend patch ID to the filename")
@click.option("--append_md5_4", "-m", is_flag=True, help="Append 4 digits of an md5 checksum to the filename")
@click.option("--append_version", "-v", is_flag=True, help="Append loguetools version to the filename")
@click.option("--unskip_init", "-u", is_flag=True, help="Don't skip patches named Init Program")
def click_explode(filename, match_name, match_ident, prepend_id, append_md5_4, append_version, unskip_init):
explode(filename, match_name, match_ident, prepend_id, append_md5_4, append_version, unskip_init)
if __name__ == "__main__":
click_explode()
|
the-stack_0_6684 | from flask import request, render_template, session
class Version_HTML():
endpoints = ["/version", "/version.html"]
endpoint_name = "page_version_html"
endpoint_access_level = 1
endpoint_category = "tool_pages"
pretty_name = "Version"
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
version_dict = {}
for key in list(self.fhdhr.config.internal["versions"].keys()):
version_dict[key] = self.fhdhr.config.internal["versions"][key]
# Sort the Version Info
sorted_version_list = sorted(version_dict, key=lambda i: (version_dict[i]['type'], version_dict[i]['name']))
sorted_version_dict = {
"fHDHR": version_dict["fHDHR"],
"fHDHR_web": version_dict["fHDHR_web"]
}
for version_item in sorted_version_list:
if version_item not in ["fHDHR", "fHDHR_web"]:
sorted_version_dict[version_item] = version_dict[version_item]
return render_template('version.html', request=request, session=session, fhdhr=self.fhdhr, version_dict=sorted_version_dict, list=list)
|
the-stack_0_6685 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_modeling_common import ModelTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
BatchEncoding,
MBartConfig,
MBartForConditionalGeneration,
)
EN_CODE = 250004
RO_CODE = 250020
@require_torch
class ModelTester:
def __init__(self, parent):
self.config = MBartConfig(
vocab_size=99,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
add_final_layer_norm=True,
)
def prepare_config_and_inputs_for_common(self):
return self.config, {}
@require_torch
class SelectiveCommonTest(unittest.TestCase):
all_model_classes = (MBartForConditionalGeneration,) if is_torch_available() else ()
test_save_load__keys_to_ignore_on_save = ModelTesterMixin.test_save_load__keys_to_ignore_on_save
def setUp(self):
self.model_tester = ModelTester(self)
@require_torch
@require_sentencepiece
@require_tokenizers
class AbstractSeq2SeqIntegrationTest(unittest.TestCase):
maxDiff = 1000 # longer string compare tracebacks
checkpoint_name = None
@classmethod
def setUpClass(cls):
cls.tokenizer = AutoTokenizer.from_pretrained(cls.checkpoint_name, use_fast=False)
return cls
@cached_property
def model(self):
"""Only load the model if needed."""
model = AutoModelForSeq2SeqLM.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model
@require_torch
@require_sentencepiece
@require_tokenizers
class MBartEnroIntegrationTest(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "facebook/mbart-large-en-ro"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor face decât să înrăutăţească violenţa şi mizeria pentru milioane de oameni.',
]
expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@slow
def test_enro_generate_one(self):
batch: BatchEncoding = self.tokenizer.prepare_seq2seq_batch(
["UN Chief Says There Is No Military Solution in Syria"], return_tensors="pt"
).to(torch_device)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
# self.assertEqual(self.tgt_text[1], decoded[1])
@slow
def test_enro_generate_batch(self):
batch: BatchEncoding = self.tokenizer.prepare_seq2seq_batch(self.src_text, return_tensors="pt").to(
torch_device
)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
assert self.tgt_text == decoded
def test_mbart_enro_config(self):
mbart_models = ["facebook/mbart-large-en-ro"]
expected = {"scale_embedding": True, "output_past": True}
for name in mbart_models:
config = MBartConfig.from_pretrained(name)
self.assertTrue(config.is_valid_mbart())
for k, v in expected.items():
try:
self.assertEqual(v, getattr(config, k))
except AssertionError as e:
e.args += (name, k)
raise
def test_mbart_fast_forward(self):
config = MBartConfig(
vocab_size=99,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
add_final_layer_norm=True,
)
lm_model = MBartForConditionalGeneration(config).to(torch_device)
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(result.logits.shape, expected_shape)
@require_torch
@require_sentencepiece
@require_tokenizers
class MBartCC25IntegrationTest(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "facebook/mbart-large-cc25"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" I ate lunch twice yesterday",
]
tgt_text = ["Şeful ONU declară că nu există o soluţie militară în Siria", "to be padded"]
@unittest.skip("This test is broken, still generates english")
def test_cc25_generate(self):
inputs = self.tokenizer.prepare_seq2seq_batch([self.src_text[0]], return_tensors="pt").to(torch_device)
translated_tokens = self.model.generate(
input_ids=inputs["input_ids"].to(torch_device),
decoder_start_token_id=self.tokenizer.lang_code_to_id["ro_RO"],
)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
@slow
def test_fill_mask(self):
inputs = self.tokenizer.prepare_seq2seq_batch(["One of the best <mask> I ever read!"], return_tensors="pt").to(
torch_device
)
outputs = self.model.generate(
inputs["input_ids"], decoder_start_token_id=self.tokenizer.lang_code_to_id["en_XX"], num_beams=1
)
prediction: str = self.tokenizer.batch_decode(
outputs, clean_up_tokenization_spaces=True, skip_special_tokens=True
)[0]
self.assertEqual(prediction, "of the best books I ever read!")
|
the-stack_0_6687 | # coding=utf-8
from common import errcode
from dao.ask.ask_answer_reply_dao import AskAnswerReplyDao
from dao.ask.ask_answer_reply_like_dao import AskAnswerReplyLikeDao
from handlers.base.base_handler import BaseHandler
class LikeAnswerReplyHandler(BaseHandler):
methods = ['POST']
def __init__(self):
expect_request_para = {
"ask_id": None,
"answer_id": None,
"reply_id": None,
"common_param": {},
}
need_para = (
"ask_id",
"answer_id",
"reply_id",
"common_param",
)
super(LikeAnswerReplyHandler, self).__init__(expect_request_para, need_para)
def _process_imp(self):
# xxx 给 xxx答案的xxx回复点赞
ret = AskAnswerReplyLikeDao.insert(self.para_map["ask_id"], self.para_map["answer_id"],
self.para_map["reply_id"], self.uid)
# 答案点赞数 + 1
if ret:
AskAnswerReplyDao.update({
"id": self.para_map["reply_id"],
"like_num": (1, True),
})
self.ret_code = errcode.NO_ERROR
self.ret_msg = 'ok'
return
|
the-stack_0_6688 | """Date based tools"""
import calendar
import datetime
from dateutil import parser as date_parser
import datetime as dt
import calendar
MONTHS = {
"jan": 1,
"jan.": 1,
"january": 1,
"feb": 2,
"feb.": 2,
"february": 2,
"mar": 3,
"mar.": 3,
"march": 3,
"apr": 4,
"apr.": 4,
"april": 4,
"may": 5,
"may.": 5,
"jun": 6,
"jun.": 6,
"june": 6,
"jul": 7,
"jul.": 7,
"july": 7,
"aug": 8,
"aug.": 8,
"august": 8,
"sep": 9,
"sep.": 9,
"sept": 9,
"sept.": 9,
"september": 9,
"oct": 10,
"oct.": 10,
"october": 10,
"nov": 11,
"nov.": 11,
"november": 11,
"dec": 12,
"dec.": 12,
"december": 12,
"": 1,
"tbd": 1
}
def month_to_int(m):
"""Converts a month to an integer."""
try:
m = int(m)
except ValueError:
m = MONTHS[m.lower()]
return m
def month_to_str_int(m):
"""Converts a month to an int form, str type, with a leading zero"""
mi = month_to_int(m)
if mi < 10:
ms = "0{}".format(mi)
else:
ms = str(mi)
return ms
def day_to_str_int(d):
"""Converts a day to an int form, str type, with a leading zero"""
if d < 10:
ds = "0{}".format(d)
else:
ds = str(d)
return ds
def date_to_float(y, m, d=0):
"""Converts years / months / days to a float, eg 2015.0818 is August
18th 2015. """
y = int(y)
m = month_to_int(m)
d = int(d)
return y + (m / 100.0) + (d / 10000.0)
def find_gaps_overlaps(dateslist, overlaps_ok=False):
'''
Find whether there is a gap or an overlap in a list of date-ranges
Parameters
----------
dateslist: list of tuples of datetime.date objects
The list of date-ranges.
overlaps_ok: bool
Returns false if there are gaps but true if there are overlaps but no gaps
Returns
-------
True if there are no gaps or overlaps else False
'''
status = True
dateslist.sort(key=lambda x: x[0])
for i in range(len(dateslist) - 1):
if dateslist[i + 1][0] <= dateslist[i][1] and not overlaps_ok:
status = False
elif (dateslist[i + 1][0] - dateslist[i][1]).days > 1:
status = False
return status
def last_day(year, month):
"""
Returns the last day of the month for the month given
Parameters
----------
year: integer
the year that the month is in
month: integer or string
the month. if a string should be resolvable using regolith month_to_int
Returns
-------
The last day of that month
"""
return calendar.monthrange(year, month_to_int(month))[1]
def get_dates(thing):
'''
given a dict like thing, return the items
Parameters
----------
thing: dict
the dict that contains the dates
Returns
-------
dict containing datetime.date objects for begin_date end_date and date
Description
-----------
If "begin_date", "end_date" or "date" values are found, if these are are in
an ISO format string they will be converted to datetime.date objects and
returned in the dictionary under keys of the same name. A specified date
will override any date built from year/month/day data.
If they are not found the function will look for begin_year, end_year and
year.
If "year", "month" and "day" are found the function will return these in the
"date" field and begin_date and end_date will be None
If year is found but no month or day are found the function will return
begin_date and end_date with the beginning and the end of the given year/month.
The returned date will be None.
If end_year is found, the end month and end day are missing they are set to
12 and 31, respectively
If begin_year is found, the begin month and begin day are missing they are set to
1 and 1, respectively
'''
if thing.get("end_year") and not thing.get("begin_year"):
print('WARNING: end_year specified without begin_year')
begin_date, end_date, date = None, None, None
if thing.get('begin_year'):
if not thing.get('begin_month'):
thing['begin_month'] = 1
if not thing.get('begin_day'):
thing['begin_day'] = 1
begin_date = datetime.date(thing['begin_year'],month_to_int(thing['begin_month']),
thing['begin_day'])
if thing.get('end_year'):
if not thing.get('end_month'):
thing['end_month'] = 12
if not thing.get('end_day'):
thing['end_day'] = last_day(thing['end_year'], thing['end_month'])
end_date = datetime.date(thing['end_year'],month_to_int(thing['end_month']),
thing['end_day'])
if thing.get('year'):
if not thing.get('month'):
if thing.get('begin_year'):
print("WARNING: both year and begin_year specified. Year info will be used")
begin_date = datetime.date(thing['year'],1,1)
end_date = datetime.date(thing['year'],12,31)
elif not thing.get('day'):
if thing.get('begin_year'):
print("WARNING: both year and begin_year specified. Year info will be used")
begin_date = datetime.date(thing['year'],month_to_int(thing['month']),
1)
end_date = datetime.date(thing['year'],
month_to_int(thing['month']),
last_day(thing['year'], thing['month']))
else:
date = datetime.date(thing['year'],
month_to_int(thing['month']),
thing['day'])
begin_date = datetime.date(thing['year'],
month_to_int(thing['month']),
thing['day'])
end_date = datetime.date(thing['year'],
month_to_int(thing['month']),
thing['day'])
if thing.get('begin_date'):
if isinstance(thing.get('begin_date'), str):
begin_date = date_parser.parse(thing.get('begin_date')).date()
else:
begin_date = thing.get('begin_date')
if thing.get('end_date'):
if isinstance(thing.get('end_date'), str):
end_date = date_parser.parse(thing.get('end_date')).date()
else:
end_date = thing.get('end_date')
if thing.get('date'):
if isinstance(thing.get('date'), str):
date = date_parser.parse(thing.get('date')).date()
else:
date = thing.get('date')
dates = {'begin_date': begin_date, 'end_date': end_date, 'date': date}
return dates
def get_due_date(thing):
"""
Parameters
----------
thing: dict
gets the field named 'due_date' from doc and ensurese it is a
datetime.date object
Returns
-------
The due date as a datetime.date object
"""
due_date = thing.get('due_date')
if isinstance(due_date, str):
due_date = date_parser.parse(due_date).date()
elif isinstance(due_date, datetime.date):
pass
else:
raise RuntimeError(f'due date not a known type')
return due_date
def is_current(thing, now=None):
"""
given a thing with dates, returns true if the thing is current
looks for begin_ and end_ daty things (date, year, month, day), or just
the daty things themselves. e.g., begin_date, end_month, month, and so on.
Parameters
----------
thing: dict
the thing that we want to know whether or not it is current
now: datetime.date object
a date for now. If it is None it uses the current date. Default is None
Returns
-------
True if the thing is current and false otherwise
"""
if not now:
now = datetime.date.today()
dates = get_dates(thing)
current = False
if not dates.get("end_date"):
dates["end_date"] = datetime.date(5000, 12, 31)
try:
if dates.get("begin_date") <= now <= dates.get("end_date"):
current = True
except:
raise RuntimeError(f"Cannot find begin_date in document:\n {thing}")
return current
def has_started(thing, now=None):
"""
given a thing with dates, returns true if the thing has started
Parameters
----------
thing: dict
the thing that we want to know whether or not it is has started
now: datetime.date object
a date for now. If it is None it uses the current date. Default is None
Returns
-------
True if the thing has started and false otherwise
"""
if not now:
now = datetime.date.today()
dates = get_dates(thing)
started = False
try:
if dates.get("begin_date") <= now:
started = True
except:
raise RuntimeError(f"Cannot find begin_date in document:\n {thing}")
return started
def has_finished(thing, now=None):
"""
given a thing with dates, returns true if the thing has finished
Parameters
----------
thing: dict
the thing that we want to know whether or not it has finished
now: datetime.date object
a date for now. If it is None it uses the current date. Default is None
Returns
-------
True if the thing has finished and false otherwise
"""
if not now:
now = datetime.date.today()
dates = get_dates(thing)
finished = False
if not dates.get("end_date"):
dates["end_date"] = datetime.date(5000, 12, 31)
if dates.get("end_date") < now:
finished = True
return finished
def is_before(thing, now=None):
"""
given a thing with a date, returns true if the thing is before the input date
Parameters
----------
thing: dict
the thing that we want to know whether or not is before a date
now: datetime.date object
a date for now. If it is None it uses the current date. Default is None
Returns
-------
True if the thing is before the date
"""
if not now:
now = datetime.date.today()
dates = get_dates(thing)
before = False
try:
if dates.get("date") < now:
before = True
except:
raise RuntimeError(f"Cannot find date in document:\n {thing}")
return before
def is_after(thing, now=None):
"""
given a thing with a date, returns true if the thing is after the input date
Parameters
----------
thing: dict
the thing that we want to know whether or not is after a date
now: datetime.date object
a date for now. If it is None it uses the current date. Default is None
Returns
-------
True if the thing is after the date
"""
if not now:
now = datetime.date.today()
dates = get_dates(thing)
after = False
try:
if now < dates.get('date'):
after = True
except:
raise RuntimeError(f"Cannot find date in document:\n {thing}")
return after
def is_between(thing, start=None, end=None):
"""
given a thing with a date, returns true if the thing is between the start and end date
Parameters
----------
thing: dict
the thing that we want to know whether or not is after a date
start: datetime.date object
a date for the start. If it is None it uses the current date. Default is None
end: datetime.date object
a date for the end. If it is None it uses the current date. Default is None
Returns
-------
True if the thing is between the start and end
"""
if not start:
start = datetime.date.today()
if not end:
end = datetime.date.today()
between = False
if is_after(thing, start) and is_before(thing, end):
between = True
return between
|
the-stack_0_6689 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the PINNs network for the Schrodinger equation."""
import numpy as np
from mindspore import Parameter, Tensor, nn, ops
from mindspore.common.initializer import TruncatedNormal, Zero, initializer
from mindspore.ops import constexpr
import mindspore.common.dtype as mstype
@constexpr
def _generate_ones(batch_size):
arr = np.ones((batch_size, 1), np.float32)
return Tensor(arr, mstype.float32)
@constexpr
def _generate_zeros(batch_size):
arr = np.zeros((batch_size, 1), np.float32)
return Tensor(arr, mstype.float32)
class neural_net(nn.Cell):
"""
Neural net to fit the wave function
Args:
layers (int): num of neurons for each layer
lb (np.array): lower bound (x, t) of domain
ub (np.array): upper bound (x, t) of domain
"""
def __init__(self, layers, lb, ub):
super(neural_net, self).__init__()
self.layers = layers
self.concat = ops.Concat(axis=1)
self.lb = Tensor(lb, mstype.float32)
self.ub = Tensor(ub, mstype.float32)
self.tanh = ops.Tanh()
self.add = ops.Add()
self.matmul = ops.MatMul()
self.w0 = self._init_weight_xavier(0)
self.b0 = self._init_biase(0)
self.w1 = self._init_weight_xavier(1)
self.b1 = self._init_biase(1)
self.w2 = self._init_weight_xavier(2)
self.b2 = self._init_biase(2)
self.w3 = self._init_weight_xavier(3)
self.b3 = self._init_biase(3)
self.w4 = self._init_weight_xavier(4)
self.b4 = self._init_biase(4)
def construct(self, x, t):
"""forward propagation"""
X = self.concat((x, t))
X = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
X = self.tanh(self.add(self.matmul(X, self.w0), self.b0))
X = self.tanh(self.add(self.matmul(X, self.w1), self.b1))
X = self.tanh(self.add(self.matmul(X, self.w2), self.b2))
X = self.tanh(self.add(self.matmul(X, self.w3), self.b3))
X = self.add(self.matmul(X, self.w4), self.b4)
return X[:, 0:1], X[:, 1:2]
def _init_weight_xavier(self, layer):
"""
Initialize weight for the ith layer
"""
in_dim = self.layers[layer]
out_dim = self.layers[layer+1]
std = np.sqrt(2/(in_dim + out_dim))
name = 'w' + str(layer)
return Parameter(default_input=initializer(TruncatedNormal(std), [in_dim, out_dim], mstype.float32),
name=name, requires_grad=True)
def _init_biase(self, layer):
"""
Initialize biase for the ith layer
"""
name = 'b' + str(layer)
return Parameter(default_input=initializer(Zero(), self.layers[layer+1], mstype.float32),
name=name, requires_grad=True)
class Grad_1(nn.Cell):
"""
Using the first output to compute gradient.
"""
def __init__(self, net):
super(Grad_1, self).__init__()
self.net = net
self.grad = ops.GradOperation(get_all=True, sens_param=True)
def construct(self, x, t):
sens_1 = _generate_ones(x.shape[0])
sens_2 = _generate_zeros(x.shape[0])
return self.grad(self.net)(x, t, (sens_1, sens_2))
class Grad_2(nn.Cell):
"""
Using the second output to compute gradient.
"""
def __init__(self, net):
super(Grad_2, self).__init__()
self.net = net
self.grad = ops.GradOperation(get_all=True, sens_param=True)
def construct(self, x, t):
sens_1 = _generate_zeros(x.shape[0])
sens_2 = _generate_ones(x.shape[0])
return self.grad(self.net)(x, t, (sens_1, sens_2))
class PINNs(nn.Cell):
"""
PINNs for the Schrodinger equation.
"""
def __init__(self, layers, lb, ub):
super(PINNs, self).__init__()
self.nn = neural_net(layers, lb, ub)
self.du = Grad_1(self.nn)
self.dv = Grad_2(self.nn)
self.dux = Grad_1(self.du)
self.dvx = Grad_1(self.dv)
self.add = ops.Add()
self.pow = ops.Pow()
self.mul = ops.Mul()
def construct(self, X):
"""forward propagation"""
x = X[:, 0:1]
t = X[:, 1:2]
u, v = self.nn(x, t)
ux, ut = self.du(x, t)
vx, vt = self.dv(x, t)
uxx, _ = self.dux(x, t)
vxx, _ = self.dvx(x, t)
square_sum = self.add(self.pow(u, 2), self.pow(v, 2))
fu1 = self.mul(vxx, 0.5)
fu2 = self.mul(square_sum, v)
fu = self.add(self.add(ut, fu1), fu2)
fv1 = self.mul(uxx, -0.5)
fv2 = self.mul(square_sum, u)
fv2 = self.mul(fv2, -1.0)
fv = self.add(self.add(vt, fv1), fv2)
return u, v, ux, vx, fu, fv
|
the-stack_0_6690 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipe', views.RecipeViewSet)
app_name='recipe'
urlpatterns = [
path('', include(router.urls))
] |
the-stack_0_6694 | # Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Imports Role test"""
# pylint: disable=no-member
import pytest
from rbac.common.role import Role
from rbac.common.user import User
from rbac.common import protobuf
from rbac.common.logs import get_default_logger
from tests.rbac.common import helper
LOGGER = get_default_logger(__name__)
@pytest.mark.library
@pytest.mark.role
@pytest.mark.imports_role
def test_make():
"""Test making a message"""
name = helper.role.name()
role_id = helper.role.id()
next_id = helper.user.id()
message = Role().imports.make(
role_id=role_id, name=name, owners=[next_id], admins=[next_id]
)
assert isinstance(message, protobuf.role_transaction_pb2.ImportsRole)
assert isinstance(message.role_id, str)
assert isinstance(message.name, str)
assert message.role_id == role_id
assert message.name == name
assert message.owners == [next_id]
assert message.admins == [next_id]
@pytest.mark.library
@pytest.mark.role
@pytest.mark.imports_role
def test_make_addresses():
"""Test the make addresses method for the message"""
name = helper.role.name()
role_id = helper.role.id()
role_address = Role().address(role_id)
next_id = helper.user.id()
user_address = User().address(next_id)
signer_user_id = helper.user.id()
owner_address = Role().owner.address(role_id, next_id)
admin_address = Role().admin.address(role_id, next_id)
message = Role().imports.make(
role_id=role_id, name=name, owners=[next_id], admins=[next_id]
)
inputs, outputs = Role().imports.make_addresses(
message=message, signer_user_id=signer_user_id
)
assert role_address in inputs
assert user_address in inputs
assert owner_address in inputs
assert admin_address in inputs
assert role_address in outputs
assert user_address in outputs
assert owner_address in outputs
assert admin_address in outputs
@pytest.mark.role
@pytest.mark.imports_role
def test_create():
"""Test importing a role"""
user, keypair = helper.user.create()
name = helper.role.name()
role_id = helper.role.id()
status = Role().imports.new(
signer_keypair=keypair,
signer_user_id=user.next_id,
role_id=role_id,
name=name,
owners=[user.next_id],
admins=[user.next_id],
members=[user.next_id],
)
assert len(status) == 1
assert status[0]["status"] == "COMMITTED"
role = Role().get(object_id=role_id)
assert role.role_id == role_id
assert role.name == name
assert Role().owner.exists(object_id=role.role_id, related_id=user.next_id)
assert Role().admin.exists(object_id=role.role_id, related_id=user.next_id)
assert Role().member.exists(object_id=role.role_id, related_id=user.next_id)
|
the-stack_0_6696 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PySide2.QtCore import QDataStream, QSettings, QTimer
from PySide2.QtGui import QIntValidator
from PySide2.QtWidgets import (QApplication, QComboBox, QDialog,
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox,
QPushButton)
from PySide2.QtNetwork import (QAbstractSocket, QHostInfo, QNetworkConfiguration,
QNetworkConfigurationManager, QNetworkInterface, QNetworkSession,
QTcpSocket)
class Client(QDialog):
def __init__(self, parent=None):
super(Client, self).__init__(parent)
self.networkSession = None
self.blockSize = 0
self.currentFortune = ''
hostLabel = QLabel("&Server name:")
portLabel = QLabel("S&erver port:")
self.hostCombo = QComboBox()
self.hostCombo.setEditable(True)
name = QHostInfo.localHostName()
if name != '':
self.hostCombo.addItem(name)
domain = QHostInfo.localDomainName()
if domain != '':
self.hostCombo.addItem(name + '.' + domain)
if name != 'localhost':
self.hostCombo.addItem('localhost')
ipAddressesList = QNetworkInterface.allAddresses()
for ipAddress in ipAddressesList:
if not ipAddress.isLoopback():
self.hostCombo.addItem(ipAddress.toString())
for ipAddress in ipAddressesList:
if ipAddress.isLoopback():
self.hostCombo.addItem(ipAddress.toString())
self.portLineEdit = QLineEdit()
self.portLineEdit.setValidator(QIntValidator(1, 65535, self))
hostLabel.setBuddy(self.hostCombo)
portLabel.setBuddy(self.portLineEdit)
self.statusLabel = QLabel("This examples requires that you run "
"the Fortune Server example as well.")
self.getFortuneButton = QPushButton("Get Fortune")
self.getFortuneButton.setDefault(True)
self.getFortuneButton.setEnabled(False)
quitButton = QPushButton("Quit")
buttonBox = QDialogButtonBox()
buttonBox.addButton(self.getFortuneButton, QDialogButtonBox.ActionRole)
buttonBox.addButton(quitButton, QDialogButtonBox.RejectRole)
self.tcpSocket = QTcpSocket(self)
self.hostCombo.editTextChanged.connect(self.enableGetFortuneButton)
self.portLineEdit.textChanged.connect(self.enableGetFortuneButton)
self.getFortuneButton.clicked.connect(self.requestNewFortune)
quitButton.clicked.connect(self.close)
self.tcpSocket.readyRead.connect(self.readFortune)
self.tcpSocket.error.connect(self.displayError)
mainLayout = QGridLayout()
mainLayout.addWidget(hostLabel, 0, 0)
mainLayout.addWidget(self.hostCombo, 0, 1)
mainLayout.addWidget(portLabel, 1, 0)
mainLayout.addWidget(self.portLineEdit, 1, 1)
mainLayout.addWidget(self.statusLabel, 2, 0, 1, 2)
mainLayout.addWidget(buttonBox, 3, 0, 1, 2)
self.setLayout(mainLayout)
self.setWindowTitle("Fortune Client")
self.portLineEdit.setFocus()
manager = QNetworkConfigurationManager()
if manager.capabilities() & QNetworkConfigurationManager.NetworkSessionRequired:
settings = QSettings(QSettings.UserScope, 'QtProject')
settings.beginGroup('QtNetwork')
id = settings.value('DefaultNetworkConfiguration')
settings.endGroup()
config = manager.configurationFromIdentifier(id)
if config.state() & QNetworkConfiguration.Discovered == 0:
config = manager.defaultConfiguration()
self.networkSession = QNetworkSession(config, self)
self.networkSession.opened.connect(self.sessionOpened)
self.getFortuneButton.setEnabled(False)
self.statusLabel.setText("Opening network session.")
self.networkSession.open()
def requestNewFortune(self):
self.getFortuneButton.setEnabled(False)
self.blockSize = 0
self.tcpSocket.abort()
self.tcpSocket.connectToHost(self.hostCombo.currentText(),
int(self.portLineEdit.text()))
def readFortune(self):
instr = QDataStream(self.tcpSocket)
instr.setVersion(QDataStream.Qt_4_0)
if self.blockSize == 0:
if self.tcpSocket.bytesAvailable() < 2:
return
self.blockSize = instr.readUInt16()
if self.tcpSocket.bytesAvailable() < self.blockSize:
return
nextFortune = instr.readQString()
if nextFortune == self.currentFortune:
QTimer.singleShot(0, self.requestNewFortune)
return
self.currentFortune = nextFortune
self.statusLabel.setText(self.currentFortune)
self.getFortuneButton.setEnabled(True)
def displayError(self, socketError):
if socketError == QAbstractSocket.RemoteHostClosedError:
pass
elif socketError == QAbstractSocket.HostNotFoundError:
QMessageBox.information(self, "Fortune Client",
"The host was not found. Please check the host name and "
"port settings.")
elif socketError == QAbstractSocket.ConnectionRefusedError:
QMessageBox.information(self, "Fortune Client",
"The connection was refused by the peer. Make sure the "
"fortune server is running, and check that the host name "
"and port settings are correct.")
else:
QMessageBox.information(self, "Fortune Client",
"The following error occurred: %s." % self.tcpSocket.errorString())
self.getFortuneButton.setEnabled(True)
def enableGetFortuneButton(self):
self.getFortuneButton.setEnabled(
(self.networkSession is None or self.networkSession.isOpen())
and self.hostCombo.currentText() != ''
and self.portLineEdit.text() != '')
def sessionOpened(self):
config = self.networkSession.configuration()
if config.type() == QNetworkConfiguration.UserChoice:
id = self.networkSession.sessionProperty('UserChoiceConfiguration')
else:
id = config.identifier()
settings = QSettings(QSettings.UserScope, 'QtProject')
settings.beginGroup('QtNetwork')
settings.setValue('DefaultNetworkConfiguration', id)
settings.endGroup()
self.statusLabel.setText("This examples requires that you run the "
"Fortune Server example as well.")
self.enableGetFortuneButton()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
client = Client()
client.show()
sys.exit(client.exec_())
|
the-stack_0_6697 | #!/usr/bin/python
#
# Python library for reading and writing Windows shortcut files (.lnk)
# Copyright 2011 Tim-Christian Mundt
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
#
# hardly cannibalized from https://sourceforge.net/p/pylnk/home/Home/
# not as clean as i wished
# cannibal: @theguly
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import chr
from builtins import str
from builtins import range
from builtins import object
import sys, os, time, re
from struct import pack, unpack
from pprint import pformat,PrettyPrinter
from datetime import datetime
from io import StringIO
pp = PrettyPrinter(indent=4)
#---- constants
_SIGNATURE = 'L\x00\x00\x00'
_GUID = '\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F'
_LINK_INFO_HEADER_DEFAULT = 0x1C
_LINK_INFO_HEADER_OPTIONAL = 0x24
_LINK_FLAGS = ('has_shell_item_id_list', 'has_link_info', 'has_description',
'has_relative_path', 'has_work_directory', 'has_arguments',
'has_icon', 'is_unicode', 'force_no_link_info')
_FILE_ATTRIBUTES_FLAGS = ('read_only', 'hidden', 'system_file', 'reserved1',
'directory', 'archive', 'reserved2', 'normal',
'temporary', 'sparse_file', 'reparse_point',
'compressed', 'offline', 'not_content_indexed',
'encrypted')
_MODIFIER_KEYS = ('SHIFT', 'CONTROL', 'ALT')
WINDOW_NORMAL = "Normal"
WINDOW_MAXIMIZED = "Maximized"
WINDOW_MINIMIZED = "Minimized"
_SHOW_COMMANDS = {1:WINDOW_NORMAL, 3:WINDOW_MAXIMIZED, 7:WINDOW_MINIMIZED}
_SHOW_COMMAND_IDS = dict((v, k) for k, v in _SHOW_COMMANDS.items())
DRIVE_UNKNOWN = "Unknown"
DRIVE_NO_ROOT_DIR = "No root directory"
DRIVE_REMOVABLE = "Removable"
DRIVE_FIXED = "Fixed (Hard disk)"
DRIVE_REMOTE = "Remote (Network drive)"
DRIVE_CDROM = "CD-ROM"
DRIVE_RAMDISK = "Ram disk"
_DRIVE_TYPES = {0: DRIVE_UNKNOWN,
1: DRIVE_NO_ROOT_DIR,
2: DRIVE_REMOVABLE,
3: DRIVE_FIXED,
4: DRIVE_REMOTE,
5: DRIVE_CDROM,
6: DRIVE_RAMDISK}
_DRIVE_TYPE_IDS = dict((v, k) for k, v in _DRIVE_TYPES.items())
_KEYS = {0x30: '0', 0x31: '1', 0x32: '2', 0x33: '3', 0x34: '4', 0x35: '5', 0x36: '6',
0x37: '7', 0x38: '8', 0x39: '9', 0x41: 'A', 0x42: 'B', 0x43: 'C', 0x44: 'D',
0x45: 'E', 0x46: 'F', 0x47: 'G', 0x48: 'H', 0x49: 'I', 0x4A: 'J', 0x4B: 'K',
0x4C: 'L', 0x4D: 'M', 0x4E: 'N', 0x4F: 'O', 0x50: 'P', 0x51: 'Q', 0x52: 'R',
0x53: 'S', 0x54: 'T', 0x55: 'U', 0x56: 'V', 0x57: 'W', 0x58: 'X', 0x59: 'Y',
0x5A: 'Z', 0x70: 'F1', 0x71: 'F2', 0x72: 'F3', 0x73: 'F4', 0x74: 'F5',
0x75: 'F6', 0x76: 'F7', 0x77: 'F8', 0x78: 'F9', 0x79: 'F10', 0x7A: 'F11',
0x7B: 'F12', 0x7C: 'F13', 0x7D: 'F14', 0x7E: 'F15', 0x7F: 'F16', 0x80: 'F17',
0x81: 'F18', 0x82: 'F19', 0x83: 'F20', 0x84: 'F21', 0x85: 'F22', 0x86: 'F23',
0x87: 'F24', 0x90: 'NUM LOCK', 0x91: 'SCROLL LOCK'}
_KEY_CODES = dict((v, k) for k, v in _KEYS.items())
ROOT_MY_COMPUTER = 'MY_COMPUTER'
ROOT_MY_DOCUMENTS = 'MY_DOCUMENTS'
ROOT_NETWORK_SHARE = 'NETWORK_SHARE'
ROOT_NETWORK_SERVER = 'NETWORK_SERVER'
ROOT_NETWORK_PLACES = 'NETWORK_PLACES'
ROOT_NETWORK_DOMAIN = 'NETWORK_DOMAIN'
ROOT_INTERNET = 'INTERNET'
ROOT_RECYLCE_BIN = 'RECYLCE_BIN'
ROOT_CONTROL_PANEL = 'CONTROL_PANEL'
_ROOT_LOCATIONS = {'{20D04FE0-3AEA-1069-A2D8-08002B30309D}': ROOT_MY_COMPUTER,
'{450D8FBA-AD25-11D0-98A8-0800361B1103}': ROOT_MY_DOCUMENTS,
'{54a754c0-4bf1-11d1-83ee-00a0c90dc849}': ROOT_NETWORK_SHARE,
'{c0542a90-4bf0-11d1-83ee-00a0c90dc849}': ROOT_NETWORK_SERVER,
'{208D2C60-3AEA-1069-A2D7-08002B30309D}': ROOT_NETWORK_PLACES,
'{46e06680-4bf0-11d1-83ee-00a0c90dc849}': ROOT_NETWORK_DOMAIN,
'{871C5380-42A0-1069-A2EA-08002B30309D}': ROOT_INTERNET,
'{645FF040-5081-101B-9F08-00AA002F954E}': ROOT_RECYLCE_BIN,
'{21EC2020-3AEA-1069-A2DD-08002B30309D}': ROOT_CONTROL_PANEL}
_ROOT_LOCATION_GUIDS = dict((v, k) for k, v in _ROOT_LOCATIONS.items())
TYPE_FOLDER = 'FOLDER'
TYPE_FILE = 'FILE'
_ENTRY_TYPES = {0x31: 'FOLDER', 0x32: 'FILE',
0x35: 'FOLDER (UNICODE)', 0x36: 'FILE (UNICODE)'}
_ENTRY_TYPE_IDS = dict((v, k) for k, v in _ENTRY_TYPES.items())
_DRIVE_PATTERN = re.compile("(\w)[:/\\\\]*$")
#---- read and write binary data
def read_byte(buf):
return unpack('<B', buf.read(1))[0]
def read_short(buf):
return unpack('<H', buf.read(2))[0]
def read_int(buf):
return unpack('<I', buf.read(4))[0]
def read_double(buf):
return unpack('<Q', buf.read(8))[0]
def read_cunicode(buf):
s = ""
b = buf.read(2)
while b!= '\x00\x00':
s += b
b = buf.read(2)
return s.decode('utf-16-le')
def read_cstring(buf, padding=False):
s = ""
b = buf.read(1)
while b != '\x00':
s += b
b = buf.read(1)
if padding and not len(s) % 2:
buf.read(1) # make length + terminator even
#TODO: encoding is not clear, unicode-escape has been necessary sometimes
return s.decode('cp1252')
def read_sized_string(buf, str=True):
size = read_short(buf)
if str:
return buf.read(size*2).decode('utf-16-le')
else:
return buf.read(size)
def get_bits(value, start, count, length=16):
mask = 0
for i in range(count):
mask = mask | 1 << i
shift = length - start - count
return value >> shift & mask
def read_dos_datetime(buf):
date = read_short(buf)
time = read_short(buf)
year = get_bits(date, 0, 7) + 1980
month = get_bits(date, 7, 4)
day = get_bits(date, 11, 5)
hour = get_bits(time, 0, 5)
minute = get_bits(time, 5, 6)
second = get_bits(time, 11, 5)
return datetime(year, month, day, hour, minute, second)
def write_byte(val, buf):
buf.write(pack('<B', val))
def write_short(val, buf):
buf.write(pack('<H', val))
def write_int(val, buf):
buf.write(pack('<I', val))
def write_double(val, buf):
buf.write(pack('<Q', val))
def write_cstring(val, buf, padding=False):
#val = val.encode('unicode-escape').replace('\\\\', '\\')
val = val.encode('cp1252')
buf.write(val + '\x00')
if padding and not len(val) % 2:
buf.write('\x00')
def write_cunicode(val, buf):
uni = val.encode('utf-16-le')
buf.write(uni + '\x00\x00')
def write_sized_string(val, buf, str=True):
size = len(val)
write_short(size, buf)
if str:
buf.write(val.encode('utf-16-le'))
else:
buf.write(val)
def ret_sized_string(val, str=True):
size = len(val)
ret = pack('<H', size)
if str:
ret += val.encode('utf-16-le')
else:
ret += val
return ret
def put_bits(bits, target, start, count, length=16):
return target | bits << (length - start - count)
def write_dos_datetime(val, buf):
date = time = 0
date = put_bits(val.year-1980, date, 0, 7)
date = put_bits(val.month, date, 7, 4)
date = put_bits(val.day, date, 11, 5)
time = put_bits(val.hour, time, 0, 5)
time = put_bits(val.minute, time, 5, 6)
time = put_bits(val.second, time, 11, 5)
write_short(date, buf)
write_short(time, buf)
#---- helpers
def convert_time_to_unix(windows_time):
# Windows time is specified as the number of 0.1 nanoseconds since January 1, 1601.
# UNIX time is specified as the number of seconds since January 1, 1970.
# There are 134774 days (or 11644473600 seconds) between these dates.
unix_time = windows_time / 10000000.0 - 11644473600
return datetime.fromtimestamp(unix_time)
def convert_time_to_windows(unix_time):
if isinstance(unix_time, datetime):
unix_time = time.mktime(unix_time.timetuple())
return int((unix_time + 11644473600) * 10000000)
class FormatException(Exception):
pass
class MissingInformationException(Exception):
pass
class InvalidKeyException(Exception):
pass
#---- data structures
class Flags(object):
def __init__(self, flag_names, flags_bytes=0):
self._flag_names = flag_names
self._flags = dict([(name, None) for name in flag_names])
self.set_flags(flags_bytes)
def set_flags(self, flags_bytes):
for pos in range(len(self._flag_names)):
self._flags[self._flag_names[pos]] = flags_bytes >> pos & 0x1 and True or False
def bytes(self):
bytes = 0
for pos in range(len(self._flag_names)):
bytes = (self._flags[self._flag_names[pos]] and 1 or 0) << pos | bytes
return bytes
bytes = property(bytes)
def __getitem__(self, key):
return object.__getattribute__(self, '_flags')[key]
def __setitem__(self, key, value):
if key not in self._flags:
raise KeyError("The key '%s' is not defined for those flags." % key)
self._flags[key] = value
def __getattr__(self, key):
return object.__getattribute__(self, '_flags')[key]
def __setattr__(self, key, value):
if '_flags' not in self.__dict__:
object.__setattr__(self, key, value)
elif key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.__setitem__(key, value)
def __str__(self):
return pformat(self._flags, indent=2)
class ModifierKeys(Flags):
def __init__(self, flags_bytes=0):
Flags.__init__(self, _MODIFIER_KEYS, flags_bytes)
def __str__(self):
s = ""
s += self.CONTROL and "CONTROL+" or ""
s += self.SHIFT and "SHIFT+" or ""
s += self.ALT and "ALT+" or ""
return s
class RootEntry(object):
def __init__(self, root):
if root is not None:
if root in list(_ROOT_LOCATION_GUIDS.keys()):
self.root = root
self.guid = _ROOT_LOCATION_GUIDS[root]
else:
bytes = root
if len(bytes) == 18: # and bytes[:2] == '\x1F\x50':
# '\x1F\x50' for MY_COMPUTER
# '\x1FX' for NETWORK
bytes = bytes[2:]
if len(bytes) != 16:
raise FormatException("This is no valid _GUID: %s" % bytes)
ordered = [bytes[3], bytes[2], bytes[1], bytes[0], bytes[5], bytes[4],
bytes[7], bytes[6], bytes[8], bytes[9], bytes[10], bytes[11],
bytes[12], bytes[13], bytes[14], bytes[15]]
self.guid = "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}" % tuple(
[ord(x) for x in ordered])
self.root = _ROOT_LOCATIONS.get(self.guid, "UNKNOWN")
def bytes(self):
guid = self.guid[1:-1].replace('-', '')
chars = [chr(int(x, 16)) for x in [guid[i:i+2] for i in range(0, 32, 2)]]
return '\x1F\x50' + chars[3] + chars[2] + chars[1] + chars[0] + chars[5] + chars[4] \
+ chars[7] + chars[6] + ''.join(chars[8:])
bytes = property(bytes)
def __str__(self):
return "<RootEntry: %s>" % self.root
class DriveEntry(object):
def __init__(self, drive):
if len(drive) == 23:
self.drive = drive[1:3]
else:
m = _DRIVE_PATTERN.match(drive.strip())
if m:
self.drive = m.groups()[0].upper() + ':'
else:
raise FormatException("This is not a valid drive: " + drive)
def bytes(self):
return '/' + self.drive + '\\' + '\x00' * 19
bytes = property(bytes)
def __str__(self):
return "<DriveEntry: %s>" % self.drive
class PathSegmentEntry(object):
def __init__(self, bytes=None):
self.type = None
self.file_size = None
self.modified = None
self.short_name = None
self.created = None
self.accessed = None
self.full_name = None
if bytes is not None:
buf = StringIO(bytes)
self.type = _ENTRY_TYPES.get(read_short(buf), 'UNKNOWN')
short_name_is_unicode = self.type.endswith('(UNICODE)')
self.file_size = read_int(buf)
self.modified = read_dos_datetime(buf)
unknown = read_short(buf) # should be 0x10
if short_name_is_unicode:
self.short_name = read_cunicode(buf)
else:
self.short_name = read_cstring(buf, padding=True)
indicator_1 = read_short(buf) # see below
only_83 = read_short(buf) < 0x03
unknown = read_short(buf) # 0x04
self.is_unicode = read_short(buf) == 0xBeef
self.created = read_dos_datetime(buf)
self.accessed = read_dos_datetime(buf)
offset_unicode = read_short(buf)
only_83_2 = offset_unicode >= indicator_1 or offset_unicode < 0x14
offset_ansi = read_short(buf)
self.full_name = read_cunicode(buf)
offset_part2 = read_short(buf) # offset to byte after short name
def create_for_path(cls, path):
entry = cls()
entry.type = 'FILE'
entry.file_size = 473600
entry.short_name = path
entry.modified = datetime.fromtimestamp(1444297518)
entry.created = datetime.fromtimestamp(1444297518)
entry.accessed = datetime.fromtimestamp(1503493813)
entry.full_name = entry.short_name
return entry
create_for_path = classmethod(create_for_path)
def _validate(self):
if self.type is None:
raise MissingInformationException("Type is missing, choose either TYPE_FOLDER or TYPE_FILE.")
if self.file_size is None:
if self.type.startswith('FOLDER'):
self.file_size = 0
else:
raise MissingInformationException("File size missing")
if self.modified is None or self.accessed is None or self.created is None:
raise MissingInformationException("Date information missing")
if self.full_name is None:
raise MissingInformationException("A full name is missing")
if self.short_name is None:
self.short_name = self.full_name
def bytes(self):
self._validate()
out = StringIO()
entry_type = self.type
short_name_len = len(self.short_name) + 1
try:
self.short_name.decode("ascii")
short_name_is_unicode = False
short_name_len += short_name_len % 2 # padding
except (UnicodeEncodeError, UnicodeDecodeError):
short_name_is_unicode = True
short_name_len = short_name_len * 2
self.type += " (UNICODE)"
write_short(_ENTRY_TYPE_IDS[entry_type], out)
write_int(self.file_size, out)
write_dos_datetime(self.modified, out)
write_short(0x10, out)
if short_name_is_unicode:
write_cunicode(self.short_name, out)
else:
write_cstring(self.short_name, out, padding=True)
indicator = 24 + 2 * len(self.short_name)
write_short(indicator, out)
write_short(0x03, out)
write_short(0x04, out)
write_short(0xBeef, out)
write_dos_datetime(self.created, out)
write_dos_datetime(self.accessed, out)
offset_unicode = 0x14 # fixed data structure, always the same
write_short(offset_unicode, out)
offset_ansi = 0 # we always write unicode
write_short(offset_ansi, out)
write_cunicode(self.full_name, out)
offset_part2 = 0x0E + short_name_len
write_short(offset_part2, out)
return out.getvalue()
bytes = property(bytes)
def __str__(self):
return "<PathSegmentEntry: %s>" % self.full_name
class LinkTargetIDList(object):
def __init__(self, bytes=None):
self.items = []
if bytes is not None:
buf = StringIO(bytes)
raw = []
entry_len = read_short(buf)
while entry_len > 0:
raw.append(buf.read(entry_len - 2)) # the length includes the size
entry_len = read_short(buf)
self._interpret(raw)
def _interpret(self, raw):
if len(raw[0]) == 0x12:
self.items.append(RootEntry(raw[0]))
if self.items[0].root == ROOT_MY_COMPUTER:
if not len(raw[1]) == 0x17:
raise ValueError("This seems to be an absolute link which requires a drive as second element.")
self.items.append(DriveEntry(raw[1]))
items = raw[2:]
elif self.items[0].root == ROOT_NETWORK_PLACES:
raise NotImplementedError("""Parsing network lnks has not yet been implemented.
If you need it just contact me and we'll see...""")
else:
items = raw[1:]
else:
items = raw
for item in items:
self.items.append(PathSegmentEntry(item))
def _validate(self):
if type(self.items[0]) == RootEntry:
if self.items[0].root == ROOT_MY_COMPUTER \
and type(self.items[1]) != DriveEntry:
raise ValueError("A drive is required for absolute lnks")
def bytes(self):
self._validate()
out = StringIO()
for item in self.items:
bytes = item.bytes
write_short(len(bytes) + 2, out) # len + terminator
out.write(bytes)
out.write('\x00\x00')
return out.getvalue()
bytes = property(bytes)
def __str__(self):
return "<LinkTargetIDList:\n%s>" % pformat([str(item) for item in self.items])
class LinkInfo(object):
def __init__(self, lnk=None):
if lnk is not None:
self.start = lnk.tell()
self.size = read_int(lnk)
self.header_size = read_int(lnk)
link_info_flags = read_int(lnk)
self.local = link_info_flags & 1
self.remote = link_info_flags & 2
self.offs_local_volume_table = read_int(lnk)
self.offs_local_base_path = read_int(lnk)
self.offs_network_volume_table = read_int(lnk)
self.offs_base_name = read_int(lnk)
if self.header_size >= _LINK_INFO_HEADER_OPTIONAL:
print("TODO: read the unicode stuff") # TODO: read the unicode stuff
self._parse_path_elements(lnk)
else:
self.size = None
self.header_size = _LINK_INFO_HEADER_DEFAULT
self.remote = None
self.offs_local_volume_table = 0
self.offs_local_base_path = 0
self.offs_network_volume_table = 0
self.offs_base_name = 0
self.drive_type = None
self.drive_serial = None
self.volume_label = None
self.local_base_path = None
self.network_share_name = None
self.base_name = None
self._path = None
class Lnk(object):
def __init__(self, f=None):
self.file = None
if type(f) == str or type(f) == str:
self.file = f
try:
f = open(self.file, 'rb')
except IOError:
self.file += ".lnk"
f = open(self.file, 'rb')
# defaults
self.link_flags = Flags(_LINK_FLAGS)
self.file_flags = Flags(_FILE_ATTRIBUTES_FLAGS)
self.creation_time = datetime.now()
self.access_time = datetime.now()
self.modification_time = datetime.now()
self.file_size = 0
self.icon_index = 0
self._show_command = WINDOW_NORMAL
self.hot_key = None
self._link_info = LinkInfo()
self.description = None
self.relative_path = None
self.work_dir = None
self.arguments = None
self.icon = None
def _write_hot_key(self, hot_key, lnk):
if hot_key is None:
low = high = 0
else:
hot_key = hot_key.split('+')
try:
low = _KEY_CODES[hot_key[-1]]
except KeyError:
raise InvalidKeyException("Cannot find key code for %s" % hot_key[1])
modifiers = ModifierKeys()
for modifier in hot_key[:-1]:
modifiers[modifier.upper()] = True
high = modifiers.bytes
write_byte(low, lnk)
write_byte(high, lnk)
def save(self, f=None, force_ext=False):
if f is None:
f = self.file
if f is None:
raise ValueError("File (name) missing for saveing the lnk")
is_file = hasattr(f, 'write')
if not is_file:
if not type(f) == str and not type(f) == str:
raise ValueError("Need a writeable object or a file name to save to, got %s" % f)
if force_ext:
if not f.lower().endswith('.lnk'):
f += '.lnk'
f = open(f, 'wb')
self.write(f)
# only close the stream if it's our own
if not is_file:
f.close()
def ret(self):
ret = _SIGNATURE
ret += _GUID
ret += pack('<I',self.link_flags.bytes)
ret += pack('<I',self.file_flags.bytes)
ret += pack('<Q',convert_time_to_windows(self.creation_time))
ret += pack('<Q',convert_time_to_windows(self.access_time))
ret += pack('<Q',convert_time_to_windows(self.modification_time))
ret += pack('<I',self.file_size)
ret += pack('<I',self.icon_index)
ret += pack('<I',_SHOW_COMMAND_IDS[self._show_command])
ret += pack('<B',0) #hotkey
ret += pack('<B',0) #hotkey
ret += ('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # reserved
if self.link_flags.has_shell_item_id_list:
siil = self.shell_item_id_list.bytes
ret += pack('<H',len(siil))
ret += siil
# TOFIX / TOINVESTIGATE
#if self.link_flags.has_link_info:
#self._link_info.write(lnk)
if self.link_flags.has_description:
ret += ret_sized_string(self.description, self.link_flags.is_unicode)
if self.link_flags.has_relative_path:
ret += ret_sized_string(self.relative_path, self.link_flags.is_unicode)
if self.link_flags.has_work_directory:
ret += ret_sized_string(self.work_dir, self.link_flags.is_unicode)
if self.link_flags.has_arguments:
ret += ret_sized_string(self.arguments, self.link_flags.is_unicode)
if self.link_flags.has_icon:
ret += ret_sized_string(self.icon, self.link_flags.is_unicode)
ret += ('\x00\x00\x00\x00') # header_size
return ret
def write(self, lnk):
lnk.write(_SIGNATURE)
lnk.write(_GUID)
write_int(self.link_flags.bytes, lnk)
write_int(self.file_flags.bytes, lnk)
write_double(convert_time_to_windows(self.creation_time), lnk)
write_double(convert_time_to_windows(self.access_time), lnk)
write_double(convert_time_to_windows(self.modification_time), lnk)
write_int(self.file_size, lnk)
write_int(self.icon_index, lnk)
write_int(_SHOW_COMMAND_IDS[self._show_command], lnk)
self._write_hot_key(self.hot_key, lnk)
lnk.write('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # reserved
if self.link_flags.has_shell_item_id_list:
siil = self.shell_item_id_list.bytes
write_short(len(siil), lnk)
lnk.write(siil)
if self.link_flags.has_link_info:
self._link_info.write(lnk)
if self.link_flags.has_description:
write_sized_string(self.description, lnk, self.link_flags.is_unicode)
if self.link_flags.has_relative_path:
write_sized_string(self.relative_path, lnk, self.link_flags.is_unicode)
if self.link_flags.has_work_directory:
write_sized_string(self.work_dir, lnk, self.link_flags.is_unicode)
if self.link_flags.has_arguments:
write_sized_string(self.arguments, lnk, self.link_flags.is_unicode)
if self.link_flags.has_icon:
write_sized_string(self.icon, lnk, self.link_flags.is_unicode)
lnk.write('\x00\x00\x00\x00') # header_size
def _get_shell_item_id_list(self):
return self._shell_item_id_list
def _set_shell_item_id_list(self, shell_item_id_list):
self._shell_item_id_list = shell_item_id_list
self.link_flags.has_shell_item_id_list = shell_item_id_list != None
shell_item_id_list = property(_get_shell_item_id_list, _set_shell_item_id_list)
def _get_link_info(self):
return self._link_info
def _set_link_info(self, link_info):
self._link_info = link_info
self.link_flags.force_no_link_info = link_info == None
self.link_flags.has_link_info = link_info != None
link_info = property(_get_link_info, _set_link_info)
def _get_description(self):
return self._description
def _set_description(self, description):
self._description = description
self.link_flags.has_description = description != None
description = property(_get_description, _set_description)
def _get_relative_path(self):
return self._relative_path
def _set_relative_path(self, relative_path):
self._relative_path = relative_path
self.link_flags.has_relative_path = relative_path != None
relative_path = property(_get_relative_path, _set_relative_path)
def _get_work_dir(self):
return self._work_dir
def _set_work_dir(self, work_dir):
self._work_dir = work_dir
self.link_flags.has_work_directory = work_dir != None
work_dir = working_dir = property(_get_work_dir, _set_work_dir)
def _get_arguments(self):
return self._arguments
def _set_arguments(self, arguments):
self._arguments = arguments
self.link_flags.has_arguments = arguments != None
arguments = property(_get_arguments, _set_arguments)
def _get_icon(self):
return self._icon
def _set_icon(self, icon):
self._icon = icon
self.link_flags.has_icon = icon != None
icon = property(_get_icon, _set_icon)
def _get_window_mode(self):
return self._show_command
def _set_window_mode(self, value):
if not value in list(_SHOW_COMMANDS.values()):
raise ValueError("Not a valid window mode: %s. Choose any of pylnk.WINDOW_*" % value)
self._show_command = value
window_mode = show_command = property(_get_window_mode, _set_window_mode)
def _get_path(self):
return self._shell_item_id_list.get_path()
path = property(_get_path)
def __str__(self):
s = "Target file:\n"
s += str(self.file_flags)
s += "\nCreation Time: %s" % self.creation_time
s += "\nModification Time: %s" % self.modification_time
s += "\nAccess Time: %s" % self.access_time
s += "\nFile size: %s" % self.file_size
s += "\nWindow mode: %s" % self._show_command
s += "\nHotkey: %s\n" % self.hot_key
s += str(self._link_info)
if self.link_flags.has_shell_item_id_list:
s += "\n%s" % self.shell_item_id_list
if self.link_flags.has_description:
s += "\nDescription: %s" % self.description
if self.link_flags.has_relative_path:
s += "\nRelative Path: %s" % self.relative_path
if self.link_flags.has_work_directory:
s += "\nWorking Directory: %s" % self.work_dir
if self.link_flags.has_arguments:
s += "\nCommandline Arguments: %s" % self.arguments
if self.link_flags.has_icon:
s += "\nIcon: %s" % self.icon
s += "\nUsed Path: %s" % self.shell_item_id_list.get_path()
return s
#---- convenience functions
def create(f=None):
lnk = Lnk()
lnk.file = f
return lnk
def for_file(target_file, arguments, lnkname, lnkicon=None, description=None):
drive, full_path = target_file.split(':',1)
full_path = full_path.lstrip('\\')
lnk = create()
lnk.link_info = None
level = full_path
elements = [RootEntry(ROOT_MY_COMPUTER),
DriveEntry(drive)]
segment = PathSegmentEntry.create_for_path(level)
elements.append(segment)
lnk.shell_item_id_list = LinkTargetIDList()
lnk.shell_item_id_list.items = elements
lnk.description = description
lnk.arguments = arguments
lnk.icon = lnkicon
#if lnkname:
# lnk.save()
return lnk
|
the-stack_0_6699 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Capture the Flag.
Example video: https://youtu.be/VRNt55-0IqE
This substrate a team based zero sum game. There are four players on each team.
There is a red team and blue team. Players can paint the ground anywhere by
using their zapping beam. If they stand on their own color then they gain health
up to a maximum of 3 (so they are more likely to win shootouts). They lose
health down to 1 from their default of 2 when standing on the opposing team's
color (so they are more likely to lose shootouts in that case). Health recovers
stochastically, at a fixed rate of 0.05 per frame. It cannot exceed its maximum,
determined by the current color of the ground the agent is standing on.
Players also cannot move over their opposing team's color. If the opposing team
paints the square underneath their feet then they get stuck in place until they
use their own zapping beam to re-paint the square underneath and in front of
themselves to break free. In practice this slows them down by one frame (which
may be critical if they are being chased).
Friendly fire is impossible; agents cannot zap their teammates.
In the _Capture the Flag_ substrate the final goal is capturing the opposing
team's flag. Payoffs are common to the entire winning team. Indicator tiles
around the edge of the map and in its very center display which teams have their
own flag on their base, allowing them the possibility of capturing their
opponent's flag by bringing it to their own base/flag. When indicator tiles are
red then only the red team can score. When indicator tiles are blue then only
the blue team can score. When the indicator tiles are purple then both teams
have the possibility of scoring (though neither is close to doing so) since both
flags are in their respective home bases.
"""
from typing import Any, Dict, Optional
from ml_collections import config_dict
import numpy as np
from meltingpot.python.utils.substrates import shapes
_COMPASS = ["N", "E", "S", "W"]
DEFAULT_ASCII_MAP = """
IIIIIIIIIIIIIIIIIIIIIII
IWWWWWWWWWWWWWWWWWWWWWI
IWPPP,PPPP,F,PPPP,PPPWI
IWPPP,,PP,,,,,PP,,PPPWI
IWPPP,,,,,,,,,,,,,PPPWI
IWP,,WW,,,,,,,,,WW,,PWI
IWHHWWW,WWWWWWW,WWWHHWI
IWHHW,D,,,,,,,,,D,WHHWI
IWHH,,W,,,WWW,,,W,,HHWI
IW,,,,W,,,,,,,,,W,,,,WI
IW,,,,WWW,,,,,WWW,,,,WI
IW,,,,,,,,,I,,,,,,,,,WI
IW,,,,WWW,,,,,WWW,,,,WI
IW,,,,W,,,,,,,,,W,,,,WI
IWHH,,W,,,WWW,,,W,,HHWI
IWHHW,D,,,,,,,,,D,WHHWI
IWHHWWW,WWWWWWW,WWWHHWI
IWQ,,WW,,,,,,,,,WW,,QWI
IWQQQ,,,,,,,,,,,,,QQQWI
IWQQQ,,QQ,,,,,QQ,,QQQWI
IWQQQ,QQQQ,G,QQQQ,QQQWI
IWWWWWWWWWWWWWWWWWWWWWI
IIIIIIIIIIIIIIIIIIIIIII
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"P": {"type": "all", "list": ["spawn_point_red", "ground"]},
"Q": {"type": "all", "list": ["spawn_point_blue", "ground"]},
"W": "wall",
"D": {"type": "choice",
"list": ["destroyable_wall"] * 9 + ["destroyed_wall"]},
"H": {"type": "choice",
"list": ["destroyable_wall"] * 3 + ["destroyed_wall"]},
",": "ground",
"I": {"type": "all", "list": ["indicator", "indicator_frame"]},
"F": {"type": "all", "list": ["ground", "home_tile_red", "flag_red"]},
"G": {"type": "all", "list": ["ground", "home_tile_blue", "flag_blue"]},
}
RED_COLOR = (225, 55, 85, 255)
DARKER_RED_COLOR = (200, 35, 55, 255)
DARKEST_RED_COLOR = (160, 5, 25, 255)
BLUE_COLOR = (85, 55, 225, 255)
DARKER_BLUE_COLOR = (55, 35, 200, 255)
DARKEST_BLUE_COLOR = (25, 5, 160, 255)
PURPLE_COLOR = (107, 63, 160, 255)
def multiply_tuple(color_tuple, factor):
alpha = color_tuple[3]
return tuple([int(np.min([x * factor, alpha])) for x in color_tuple[0: 3]])
TEAMS_DATA = {
"red": {"color": RED_COLOR,
"spawn_group": "{}SpawnPoints".format("red")},
"blue": {"color": BLUE_COLOR,
"spawn_group": "{}SpawnPoints".format("blue")},
}
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall",],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [True]
}
},
{
"component": "AllBeamBlocker",
"kwargs": {}
},
]
}
INDICATOR_FRAME = {
"name": "indicator_frame",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "inert",
"stateConfigs": [
{"state": "inert",
"layer": "superOverlay",
"sprite": "InertFrame"}
]
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["InertFrame"],
"spriteShapes": [shapes.BUTTON],
"palettes": [{"*": (0, 0, 0, 0),
"x": (55, 55, 55, 255),
"#": (0, 0, 0, 0)}],
"noRotates": [True]
}
},
]
}
INDICATOR = {
"name": "control_indicator",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "both",
"stateConfigs": [
{
"state": "neither",
"layer": "background",
"sprite": "NeitherIndicator",
},
{
"state": "red",
"layer": "background",
"sprite": "RedIndicator",
},
{
"state": "blue",
"layer": "background",
"sprite": "BlueIndicator",
},
{
"state": "both",
"layer": "background",
"sprite": "BothIndicator",
},
]
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"spriteNames": ["NeitherIndicator",
"RedIndicator",
"BlueIndicator",
"BothIndicator"],
"spriteRGBColors": [(0, 0, 0, 0),
DARKER_RED_COLOR,
DARKER_BLUE_COLOR,
PURPLE_COLOR]
}
},
{"component": "ControlIndicator",},
]
}
def create_home_tile_prefab(team: str):
"""Return a home tile prefab, where the flag starts and must be brought."""
sprite_name = "HomeTileFrame{}".format(team)
prefab = {
"name": "home_tile",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "inert",
"stateConfigs": [
{"state": "inert",
"layer": "background",
"sprite": sprite_name}
]
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [sprite_name],
"spriteShapes": [shapes.BUTTON],
"palettes": [{"*": (0, 0, 0, 0),
"x": (0, 0, 0, 0),
"#": (218, 165, 32, 255)}],
"noRotates": [True]
}
},
{
"component": "HomeTile",
"kwargs": {
"team": team,
}
},
]
}
return prefab
def create_ground_prefab():
"""Return a prefab for a colorable ground prefab."""
sprite_names = ["RedGround", "BlueGround"]
sprite_colors = [DARKEST_RED_COLOR, DARKEST_BLUE_COLOR]
prefab = {
"name": "ground",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "clean",
"stateConfigs": [
{
"state": "clean",
"layer": "alternateLogic",
},
{
"state": "red",
"layer": "alternateLogic",
"sprite": sprite_names[0],
},
{
"state": "blue",
"layer": "alternateLogic",
"sprite": sprite_names[1],
},
]
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"spriteNames": sprite_names,
"spriteRGBColors": sprite_colors
}
},
{
"component": "Ground",
"kwargs": {
"teamNames": ["red", "blue"],
}
},
]
}
return prefab
def create_destroyable_wall_prefab(initial_state):
"""Return destroyable wall prefab, potentially starting in destroyed state."""
if initial_state == "destroyed":
initial_health = 0
else:
initial_health = 5
prefab = {
"name": "destroyableWall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": initial_state,
"stateConfigs": [
{
"state": "destroyable",
"layer": "upperPhysical",
"sprite": "DestroyableWall",
},
{
"state": "damaged",
"layer": "upperPhysical",
"sprite": "DamagedWall",
},
{
"state": "destroyed",
"layer": "alternateLogic",
"sprite": "Rubble",
},
],
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["DestroyableWall",
"DamagedWall",
"Rubble"],
"spriteShapes": [shapes.WALL,
shapes.WALL,
shapes.WALL],
"palettes": [{"*": (55, 55, 55, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)},
{"*": (55, 55, 55, 255),
"&": (100, 100, 100, 255),
"@": (79, 79, 79, 255),
"#": (152, 152, 152, 255)},
{"*": (0, 0, 0, 255),
"&": (0, 0, 0, 255),
"@": (29, 29, 29, 255),
"#": (0, 0, 0, 255)}],
"noRotates": [True] * 3
}
},
{
"component": "Destroyable",
"kwargs": {"hitNames": ["red", "blue"],
"initialHealth": initial_health,
"damagedHealthLevel": 2}
}
]
}
return prefab
def create_spawn_point_prefab(team):
"""Return a team-specific spawn-point prefab."""
prefab = {
"name": "spawn_point",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "playerSpawnPoint",
"stateConfigs": [{
"state": "playerSpawnPoint",
"layer": "logic",
"groups": [TEAMS_DATA[team]["spawn_group"]],
}],
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "invisible",
"spriteNames": [],
"spriteRGBColors": []
}
},
]
}
return prefab
def create_flag_prefab(team: str):
"""Return a team-specific flag prefab."""
dropped_sprite_name = "DroppedFlag_{}".format(team)
carried_sprite_name = "CarriedFlag_{}".format(team)
if team == "red":
flag_color = RED_COLOR
elif team == "blue":
flag_color = BLUE_COLOR
prefab = {
"name": "{}_flag".format(team),
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "dropped",
"stateConfigs": [
{
"state": "dropped",
"layer": "lowerPhysical",
"sprite": dropped_sprite_name,
},
{
"state": "carried",
"layer": "overlay",
"sprite": carried_sprite_name,
},
{
"state": "wait",
}
]
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [dropped_sprite_name, carried_sprite_name],
"spriteShapes": [shapes.FLAG,
shapes.FLAG_HELD],
"palettes": [shapes.get_palette(flag_color)] * 2,
"noRotates": [True, True]
}
},
{
"component": "Flag",
"kwargs": {
"team": team,
}
}
]
}
return prefab
# PREFABS is a dictionary mapping names to template game objects that can
# be cloned and placed in multiple locations accoring to an ascii map.
PREFABS = {
"wall": WALL,
"spawn_point_red": create_spawn_point_prefab("red"),
"spawn_point_blue": create_spawn_point_prefab("blue"),
"destroyable_wall": create_destroyable_wall_prefab("destroyable"),
"destroyed_wall": create_destroyable_wall_prefab("destroyed"),
"ground": create_ground_prefab(),
"indicator": INDICATOR,
"indicator_frame": INDICATOR_FRAME,
"flag_red": create_flag_prefab("red"),
"flag_blue": create_flag_prefab("blue"),
"home_tile_red": create_home_tile_prefab("red"),
"home_tile_blue": create_home_tile_prefab("blue"),
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
)
# The Scene is a non-physical object, its components implement global logic.
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{"component": "Transform",},
{
"component": "FlagManager",
"kwargs": {}
},
]
}
return scene
def create_avatar_object(
player_idx: int,
team: str,
override_taste_kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Create an avatar object."""
# Lua is 1-indexed.
lua_index = player_idx + 1
team_color = TEAMS_DATA[team]["color"]
health1_avatar_sprite_name = "avatarSprite{}Health1".format(lua_index)
health2_avatar_sprite_name = "avatarSprite{}Health2".format(lua_index)
health3_avatar_sprite_name = "avatarSprite{}Health3".format(lua_index)
health1_color_palette = shapes.get_palette(multiply_tuple(team_color, 0.35))
health2_color_palette = shapes.get_palette(team_color)
health3_color_palette = shapes.get_palette(multiply_tuple(team_color, 1.75))
taste_kwargs = {
"defaultTeamReward": 1.0,
"rewardForZapping": 0.0,
"extraRewardForZappingFlagCarrier": 0.0,
"rewardForReturningFlag": 0.0,
"rewardForPickingUpOpposingFlag": 0.0,
}
if override_taste_kwargs:
taste_kwargs.update(override_taste_kwargs)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "health2",
"stateConfigs": [
{"state": "health1",
"layer": "upperPhysical",
"sprite": health1_avatar_sprite_name,
"contact": "avatar",
"groups": ["players"]},
{"state": "health2",
"layer": "upperPhysical",
"sprite": health2_avatar_sprite_name,
"contact": "avatar",
"groups": ["players"]},
{"state": "health3",
"layer": "upperPhysical",
"sprite": health3_avatar_sprite_name,
"contact": "avatar",
"groups": ["players"]},
# Player wait state used when they have been zapped out.
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{"component": "Transform",},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [health1_avatar_sprite_name,
health2_avatar_sprite_name,
health3_avatar_sprite_name],
"spriteShapes": [shapes.CUTE_AVATAR,
shapes.CUTE_AVATAR,
shapes.CUTE_AVATAR],
"palettes": [health1_color_palette,
health2_color_palette,
health3_color_palette],
"noRotates": [True] * 3
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": "health2",
"additionalLiveStates": ["health1", "health3"],
"waitState": "playerWait",
"spawnGroup": TEAMS_DATA[team]["spawn_group"],
"actionOrder": ["move",
"turn",
"fireZap"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
# The following kwarg makes it possible to get rewarded for
# team rewards even when an avatar is "dead".
"skipWaitStateRewards": False,
}
},
{
"component": "ColorZapper",
"kwargs": {
"team": team,
# The color zapper beam is somewhat transparent.
"color": (team_color[0], team_color[1], team_color[2], 150),
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"aliveStates": ["health1", "health2", "health3"],
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
"zapperComponent": "ColorZapper",
}
},
{
"component": "ZappedByColor",
"kwargs": {
"team": team,
"allTeamNames": ["red", "blue"],
"framesTillRespawn": 80,
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
"healthRegenerationRate": 0.05,
"maxHealthOnGround": 2,
"maxHealthOnOwnColor": 3,
"maxHealthOnEnemyColor": 1,
}
},
{
"component": "TeamMember",
"kwargs": {"team": team}
},
{
"component": "Taste",
"kwargs": taste_kwargs
},
{
"component": "LocationObserver",
"kwargs": {
"objectIsAvatar": True,
"alsoReportOrientation": True
}
},
]
}
return avatar_object
def _even_vs_odd_team_assignment(num_players,
taste_kwargs: Optional[Any] = None):
"""Assign players with even ids to red team and odd ids to blue team."""
avatar_objects = []
for player_idx in range(0, num_players):
if player_idx % 2 == 0:
team = "red"
elif player_idx % 2 == 1:
team = "blue"
game_object = create_avatar_object(player_idx, team,
override_taste_kwargs=taste_kwargs)
avatar_objects.append(game_object)
return avatar_objects
def _low_vs_high_team_assignment(num_players,
taste_kwargs: Optional[Any] = None):
"""Assign players with id below the median id to blue and above it to red."""
median = np.median(range(num_players))
avatar_objects = []
for player_idx in range(0, num_players):
if player_idx < median:
team = "blue"
elif player_idx > median:
team = "red"
game_object = create_avatar_object(player_idx, team,
override_taste_kwargs=taste_kwargs)
avatar_objects.append(game_object)
return avatar_objects
def create_avatar_objects(num_players,
taste_kwargs: Optional[Any] = None,
fixed_teams: Optional[bool] = False):
"""Returns list of avatar objects of length 'num_players'."""
assert num_players % 2 == 0, "num players must be divisible by 2"
if fixed_teams:
avatar_objects = _low_vs_high_team_assignment(num_players,
taste_kwargs=taste_kwargs)
else:
avatar_objects = _even_vs_odd_team_assignment(num_players,
taste_kwargs=taste_kwargs)
return avatar_objects
def create_lab2d_settings(
num_players: int,
avatar_taste_kwargs: Optional[Any] = None,
fixed_teams: Optional[bool] = False) -> Dict[str, Any]:
"""Returns the lab2d settings."""
lab2d_settings = {
"levelName": "paintball_capture_the_flag",
"levelDirectory": "meltingpot/lua/levels",
"numPlayers": num_players,
"maxEpisodeLengthFrames": 1000,
"spriteSize": 8,
"topology": "BOUNDED", # Choose from ["BOUNDED", "TORUS"],
"simulation": {
"map": DEFAULT_ASCII_MAP,
"gameObjects": create_avatar_objects(num_players,
taste_kwargs=avatar_taste_kwargs,
fixed_teams=fixed_teams),
"scene": create_scene(),
"prefabs": PREFABS,
"charPrefabMap": CHAR_PREFAB_MAP,
},
}
return lab2d_settings
def get_config(factory=create_lab2d_settings):
"""Default configuration for training on the capture_the_flag level."""
config = config_dict.ConfigDict()
config.num_players = 8
config.lab2d_settings = factory(config.num_players)
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
"POSITION",
"ORIENTATION",
]
config.global_observation_names = [
"WORLD.RGB",
]
return config
|
the-stack_0_6702 | import xarray as xr
import numpy as np
from climate_toolbox.utils.utils import \
remove_leap_days, convert_kelvin_to_celsius
def snyder_edd(tasmin, tasmax, threshold):
r"""
Snyder exceedance degree days/cooling degree days
Similarly to Snyder HDDs, Snyder exceedance degree days for any given day
are given by the integral between the sinosiod-interpolated temperature and
the threshold.
The closed form solution is given by:
.. math::
EDD_{P} = \sum_{d \in P} EDD_d
where
.. math::
EED_d =
\begin{cases}
( (M - e)(\pi /2 - \theta) + w \cos(\theta) ) / \pi, & \text{if } tmin_d < e < tmax_d \\
0 , & \text{if } tmax_d < e \\
M - e, & \text{otherwise}
\end{cases}
and
.. math::
\begin{array}{rll}
M & = & (tmax_d + tmin_d)/2 \\
w & = & (tmax_d-tmin_d)/2 \\
\theta & = & \arcsin( (e-M)/w ) \\
\end{array}
Parameters
----------
tasmin : xarray.DataArray
Daily minimum temperature (degrees C)
tasmax : xarray.DataArray
Daily maximum temperature (degrees C)
threshold : int, float, xarray.DataArray
Threshold (degrees C)
Returns
-------
edd : xarray.DataArray
Snyder exceedance degree days (degreedays)
"""
# Check for unit agreement
assert tasmin.units == tasmax.units
# check to make sure tasmax > tasmin everywhere
assert not (tasmax < tasmin).any(), "values encountered where tasmin > tasmax"
# compute useful quantities for use in the transformation
snyder_mean = ((tasmax + tasmin)/2)
snyder_width = ((tasmax - tasmin)/2)
snyder_theta = xr.ufuncs.arcsin((threshold - snyder_mean)/snyder_width)
# the trasnformation is computed using numpy arrays, taking advantage of
# numpy's second where clause. Note that in the current dev build of
# xarray, xr.where allows this functionality. As soon as this goes live,
# this block can be replaced with xarray
res = xr.where(
tasmin < threshold,
xr.where(
tasmax > threshold,
((snyder_mean - threshold) * (np.pi/2 - snyder_theta)
+ (snyder_width * np.cos(snyder_theta))) / np.pi,
0),
snyder_mean - threshold)
res.attrs['units'] = (
'degreedays_{}{}'.format(threshold, tasmax.attrs['units']))
return res
def snyder_gdd(tasmin, tasmax, threshold_low, threshold_high):
r"""
Snyder growing degree days
Growing degree days are the difference between EDD measures at two
thresholds.
.. math::
{GDD}_{T_{low}, T_{high}, y, i} = {EDD}_{T_{low}, y, i} - {EDD}_{T_{high}, y, i}
Note that where :math:`tas_{d,i}>{T_{high}}`, GDD will be a constant value
:math:`T_{high}-T_{low}`. Thus, this measure is only useful when another
measure, e.g. :math:`{EDD}_{T_{high}}`, sometimes referred to as
*killing degree days*, is used as an additional predictor.
Parameters
----------
tasmin : xarray.DataArray
Daily minimum temperature (degrees C)
tasmax : xarray.DataArray
Daily maximum temperature (degrees C)
threshold_low : int, float, xarray.DataArray
Lower threshold (degrees C)
threshold_high : int, float, xarray.DataArray
Upper threshold (degrees C)
Returns
-------
gdd : xarray.DataArray
Snyder growing degree days (degreedays)
"""
# Check for unit agreement
assert tasmin.units == tasmax.units
res = (
snyder_edd(tasmin, tasmax, threshold_low)
- snyder_edd(tasmin, tasmax, threshold_high))
res.attrs['units'] = (
'degreedays_{}-{}{}'.format(threshold_low, threshold_high, tasmax.attrs['units']))
return res
def validate_edd_snyder_agriculture(ds, thresholds):
msg_null = 'hierid dims do not match 24378'
assert ds.hierid.shape == (24378,), msg_null
for threshold in thresholds:
assert threshold in list(ds.refTemp)
return
def tas_poly(ds, power, varname):
"""
Daily average temperature (degrees C), raised to a power
Leap years are removed before counting days (uses a 365 day
calendar).
"""
powername = ordinal(power)
description = ('''
Daily average temperature (degrees C){raised}
Leap years are removed before counting days (uses a 365 day
calendar).
'''.format(
raised='' if power == 1 else (
' raised to the {powername} power'
.format(powername=powername)))).strip()
ds1 = xr.Dataset()
# remove leap years
ds = remove_leap_days(ds)
# do transformation
ds1[varname] = (ds.tas - 273.15)**power
# Replace datetime64[ns] 'time' with YYYYDDD int 'day'
if ds.dims['time'] > 365:
raise ValueError
ds1.coords['day'] = ds['time.year']*1000 + np.arange(1, len(ds.time)+1)
ds1 = ds1.swap_dims({'time': 'day'})
ds1 = ds1.drop('time')
ds1 = ds1.rename({'day': 'time'})
# document variable
ds1[varname].attrs['units'] = (
'C^{}'.format(power) if power > 1 else 'C')
ds1[varname].attrs['long_title'] = description.splitlines()[0]
ds1[varname].attrs['description'] = description
ds1[varname].attrs['variable'] = varname
return ds1
def ordinal(n):
""" Converts numbers into ordinal strings """
return (
"%d%s" %
(n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4]))
|
the-stack_0_6703 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
class SquareLinearOperatorFullMatrixTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [1., 11.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
# Auto-detected.
self.assertTrue(operator.is_square)
def test_assert_non_singular_raises_if_cond_too_big_but_finite(self):
with self.test_session():
tril = linear_operator_test_util.random_tril_matrix(
shape=(50, 50), dtype=np.float32)
diag = np.logspace(-2, 2, 50).astype(np.float32)
tril = array_ops.matrix_set_diag(tril, diag)
matrix = math_ops.matmul(tril, tril, transpose_b=True).eval()
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
# Ensure that we have finite condition number...just HUGE.
cond = np.linalg.cond(matrix)
self.assertTrue(np.isfinite(cond))
self.assertGreater(cond, 1e12)
operator.assert_non_singular().run()
def test_assert_non_singular_raises_if_cond_infinite(self):
with self.test_session():
matrix = [[1., 1.], [1., 1.]]
# We don't pass the is_self_adjoint hint here, which means we take the
# generic code path.
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.test_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=True)
with self.test_session():
with self.assertRaisesOpError("Cholesky decomposition was not success"):
operator.assert_positive_definite().run()
class SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest.
In this test, the operator is constructed with hints that invoke the use of
a Cholesky decomposition for solves/determinant.
"""
def setUp(self):
# Increase from 1e-6 to 1e-5. This reduction in tolerance happens,
# presumably, because we are taking a different code path in the operator
# and the matrix. The operator uses a Choleksy, the matrix uses standard
# solve.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
@property
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.float64]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [0., 7.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_self_adjoint)
# Should be auto-set
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator._can_use_cholesky)
self.assertTrue(operator.is_square)
def test_assert_non_singular(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.test_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.test_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.test_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_positive_definite().run()
class NonSquareLinearOperatorFullMatrixTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
return operator, matrix
def test_is_x_flags(self):
matrix = [[3., 2., 1.], [1., 1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_self_adjoint=False)
self.assertEqual(operator.is_positive_definite, None)
self.assertEqual(operator.is_non_singular, None)
self.assertFalse(operator.is_self_adjoint)
self.assertFalse(operator.is_square)
def test_matrix_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorFullMatrix([1.])
if __name__ == "__main__":
test.main()
|
the-stack_0_6704 | #!/usr/bin/env python
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import logging
from time import time
import argparse
from xml.etree import ElementTree
import multiprocessing as mp
import io
from pyocd.core.session import Session
from pyocd.core.helpers import ConnectHelper
from pyocd.utility.conversion import float32_to_u32
from pyocd.probe.aggregator import DebugProbeAggregator
from test_util import (
get_env_file_name,
TestResult,
Test,
IOTee,
RecordingLogHandler,
get_session_options,
ensure_output_dir,
TEST_OUTPUT_DIR,
)
from basic_test import BasicTest
from speed_test import SpeedTest
from cortex_test import CortexTest
from flash_test import FlashTest
from flash_loader_test import FlashLoaderTest
from gdb_test import GdbTest
from json_lists_test import JsonListsTest
from connect_test import ConnectTest
from debug_context_test import DebugContextTest
from concurrency_test import ConcurrencyTest
from commands_test import CommandsTest
XML_RESULTS_TEMPLATE = "test_results{}.xml"
LOG_FILE_TEMPLATE = "automated_test_result{}.txt"
SUMMARY_FILE_TEMPLATE = "automated_test_summary{}.txt"
LOG_FORMAT = "%(relativeCreated)07d:%(levelname)s:%(module)s:%(message)s"
JOB_TIMEOUT = 30 * 60 # 30 minutes
# Put together list of all tests.
all_tests = [
BasicTest(),
JsonListsTest(),
ConnectTest(),
SpeedTest(),
CortexTest(),
ConcurrencyTest(),
FlashTest(),
FlashLoaderTest(),
DebugContextTest(),
GdbTest(),
CommandsTest(),
]
# Actual list used at runtime, filted by command line args.
test_list = []
def print_summary(test_list, result_list, test_time, output_file=None):
for test in test_list:
test.print_perf_info(result_list, output_file=output_file)
Test.print_results(result_list, output_file=output_file)
print("", file=output_file)
print("Test Time: %.3f" % test_time, file=output_file)
if Test.all_tests_pass(result_list):
print("All tests passed", file=output_file)
else:
print("One or more tests has failed!", file=output_file)
def split_results_by_board(result_list):
boards = {}
for result in result_list:
if result.board_name in boards:
boards[result.board_name].append(result)
else:
boards[result.board_name] = [result]
return boards
def generate_xml_results(result_list):
board_results = split_results_by_board(result_list)
suite_id = 0
total_failures = 0
total_tests = 0
total_time = 0
root = ElementTree.Element('testsuites',
name="pyocd"
)
root.text = "\n"
for board_name, results in board_results.items():
total = 0
failures = 0
suite_time = 0
suite = ElementTree.SubElement(root, 'testsuite',
name=board_name,
id=str(suite_id))
suite.text = "\n"
suite.tail = "\n"
suite_id += 1
for result in results:
total += 1
if not result.passed:
failures += 1
case = result.get_test_case()
suite.append(case)
suite_time += result.time
suite.set('tests', str(total))
suite.set('failures', str(failures))
suite.set('time', "%.3f" % suite_time)
total_tests += total
total_failures += failures
total_time += suite_time
root.set('tests', str(total_tests))
root.set('failures', str(total_failures))
root.set('time', "%.3f" % total_time)
xml_results = os.path.join(TEST_OUTPUT_DIR, XML_RESULTS_TEMPLATE.format(get_env_file_name()))
ElementTree.ElementTree(root).write(xml_results, encoding="UTF-8", xml_declaration=True)
def print_board_header(outputFile, board, n, includeDividers=True, includeLeadingNewline=False):
header = "TESTING BOARD {name} [{target}] [{uid}] #{n}".format(
name=board.name, target=board.target_type, uid=board.unique_id, n=n)
if includeDividers:
divider = "=" * len(header)
if includeLeadingNewline:
print("\n" + divider, file=outputFile)
else:
print(divider, file=outputFile)
print(header, file=outputFile)
if includeDividers:
print(divider + "\n", file=outputFile)
def test_board(board_id, n, loglevel, logToConsole, commonLogFile):
"""! @brief Run all tests on a given board.
When multiple test jobs are being used, this function is the entry point executed in
child processes.
Always writes both stdout and log messages of tests to a board-specific log file, and saves
the output for each test to a string that is stored in the TestResult object. Depending on
the logToConsole and commonLogFile parameters, output may also be copied to the console
(sys.stdout) and/or a common log file for all boards.
@param board_id Unique ID of the board to test.
@param n Unique index of the test run.
@param loglevel Log level passed to logger instance. Usually INFO or DEBUG.
@param logToConsole Boolean indicating whether output should be copied to sys.stdout.
@param commonLogFile If not None, an open file object to which output should be copied.
"""
probe = DebugProbeAggregator.get_probe_with_id(board_id)
assert probe is not None
session = Session(probe, **get_session_options())
board = session.board
originalStdout = sys.stdout
originalStderr = sys.stderr
# Set up board-specific output file. A previously existing file is removed.
env_name = (("_" + os.environ['TOX_ENV_NAME']) if ('TOX_ENV_NAME' in os.environ) else '')
name_info = "{}_{}_{}".format(env_name, board.name, n)
log_filename = os.path.join(TEST_OUTPUT_DIR, LOG_FILE_TEMPLATE.format(name_info))
if os.path.exists(log_filename):
os.remove(log_filename)
# Skip board if specified in the config.
if session.options['skip_test']:
print("Skipping board %s due as specified in config" % board.unique_id)
return []
# Skip this board if we don't have a test binary.
if board.test_binary is None:
print("Skipping board %s due to missing test binary" % board.unique_id)
return []
# Open board-specific output file. This is done after skipping so a skipped board doesn't have a
# log file created for it (but a previous log file will be removed, above).
log_file = open(log_filename, "w", buffering=1) # 1=Line buffered
# Setup logging.
log_handler = RecordingLogHandler(None)
log_handler.setFormatter(logging.Formatter(LOG_FORMAT))
root_logger = logging.getLogger()
root_logger.setLevel(loglevel)
root_logger.addHandler(log_handler)
result_list = []
try:
# Write board header to board log file, common log file, and console.
print_board_header(log_file, board, n)
if commonLogFile:
print_board_header(commonLogFile, board, n, includeLeadingNewline=(n != 0))
print_board_header(originalStdout, board, n, logToConsole, includeLeadingNewline=(n != 0))
# Run all tests on this board.
for test in test_list:
print("{} #{}: starting {}...".format(board.name, n, test.name), file=originalStdout)
# Set a unique port for the GdbTest.
if isinstance(test, GdbTest):
test.n = n
# Create a StringIO object to record the test's output, an IOTee to copy
# output to both the log file and StringIO, then set the log handler and
# stdio to write to the tee.
testOutput = io.StringIO()
tee = IOTee(log_file, testOutput)
if logToConsole:
tee.add(originalStdout)
if commonLogFile is not None:
tee.add(commonLogFile)
log_handler.stream = tee
sys.stdout = tee
sys.stderr = tee
test_start = time()
result = test.run(board)
test_stop = time()
result.time = test_stop - test_start
tee.flush()
result.output = testOutput.getvalue()
result_list.append(result)
passFail = "PASSED" if result.passed else "FAILED"
print("{} #{}: finished {}... {} ({:.3f} s)".format(
board.name, n, test.name, passFail, result.time),
file=originalStdout)
finally:
# Restore stdout/stderr in case we're running in the parent process (1 job).
sys.stdout = originalStdout
sys.stderr = originalStderr
root_logger.removeHandler(log_handler)
log_handler.flush()
log_handler.close()
return result_list
def filter_tests(args):
"""! @brief Generate the list of tests to run based on arguments."""
if args.exclude_tests and args.include_tests:
print("Please only include or exclude tests, not both simultaneously.")
sys.exit(1)
excludes = [t.strip().lower() for t in args.exclude_tests.split(',')] if args.exclude_tests else []
includes = [t.strip().lower() for t in args.include_tests.split(',')] if args.include_tests else []
for test in all_tests:
if excludes:
include_it = (test.name.lower() not in excludes)
elif includes:
include_it = (test.name.lower() in includes)
else:
include_it = True
if include_it:
test_list.append(test)
def main():
parser = argparse.ArgumentParser(description='pyOCD automated testing')
parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job')
parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS",
help='Set number of concurrent board tests (default is 1)')
parser.add_argument('-b', '--board', action="append", metavar="ID", help="Limit testing to boards with specified unique IDs. Multiple boards can be listed.")
parser.add_argument('-l', '--list-tests', action="store_true", help="Print a list of tests that will be run.")
parser.add_argument('-x', '--exclude-tests', metavar="TESTS", default="", help="Comma-separated list of tests to exclude.")
parser.add_argument('-i', '--include-tests', metavar="TESTS", default="", help="Comma-separated list of tests to include.")
args = parser.parse_args()
# Allow CI to override the number of concurrent jobs.
if 'CI_JOBS' in os.environ:
args.jobs = int(os.environ['CI_JOBS'])
filter_tests(args)
if args.list_tests:
for test in test_list:
print(test.name)
return
# Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses
# fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec()
# to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python
# version 3.4+ is the multiprocessing.set_start_method() API available that lets us
# switch to the 'spawn' method, i.e. exec().
if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4):
print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.")
args.jobs = 1
ensure_output_dir()
# Setup logging based on concurrency and quiet option.
level = logging.DEBUG if args.debug else logging.INFO
if args.jobs == 1 and not args.quiet:
log_file = os.path.join(TEST_OUTPUT_DIR, LOG_FILE_TEMPLATE.format(get_env_file_name()))
# Create common log file.
if os.path.exists(log_file):
os.remove(log_file)
logToConsole = True
commonLogFile = open(log_file, "a")
else:
logToConsole = False
commonLogFile = None
board_list = []
result_list = []
# Put together list of boards to test
board_list = ConnectHelper.get_all_connected_probes(blocking=False)
board_id_list = sorted(b.unique_id for b in board_list)
# Filter boards.
if args.board:
board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())]
# If only 1 job was requested, don't bother spawning processes.
start = time()
if args.jobs == 1:
for n, board_id in enumerate(board_id_list):
result_list += test_board(board_id, n, level, logToConsole, commonLogFile)
else:
# Create a pool of processes to run tests.
try:
pool = mp.Pool(args.jobs)
# Issue board test job to process pool.
async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile))
for n, board_id in enumerate(board_id_list)]
# Gather results.
for r in async_results:
result_list += r.get(timeout=JOB_TIMEOUT)
finally:
pool.close()
pool.join()
stop = time()
test_time = (stop - start)
print_summary(test_list, result_list, test_time)
summary_file = os.path.join(TEST_OUTPUT_DIR, SUMMARY_FILE_TEMPLATE.format(get_env_file_name()))
with open(summary_file, "w") as output_file:
print_summary(test_list, result_list, test_time, output_file)
generate_xml_results(result_list)
exit_val = 0 if Test.all_tests_pass(result_list) else -1
exit(exit_val)
#TODO - check if any threads are still running?
if __name__ == "__main__":
# set_start_method is only available in Python 3.4+.
if sys.version_info[0:2] >= (3, 4):
mp.set_start_method('spawn')
main()
|
the-stack_0_6705 | #!/usr/bin/env python3
import os, sys
import json
from pathlib import Path
import requests
from time import time
from tempfile import gettempdir
import tarfile
import concurrent
from concurrent.futures import ThreadPoolExecutor
import pkg_resources
## Just perform a sanity check on hit caches:
toolchains_file = Path("./toolchains.yaml")
toolchains = None
toolchains_dir = Path.home() / "toolchains"
if os.getenv("CACHED_SETUP_TOOLCHAINS") == 'true' :
sys.exit(0)
# Check if we are running in a development version
#
if toolchains_file.exists():
from ruamel.yaml import YAML
yaml = YAML(typ="safe")
toolchains = yaml.load(toolchains_file)
else:
toolchains_str = pkg_resources.resource_string(__name__, "toolchains.json")
toolchains = json.loads(toolchains_str)
def download_chunks(tc):
url = tc.get("url")
tarfile = f"{tc.get('release')}.tgz"
local_filename = Path(gettempdir()) / tarfile
print(url)
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
"""
This assumes the resulting tar file already has a top level directory for now.
"""
def extract_as_tar(tc, tar):
to_path = toolchains_dir
tc_dir = Path(to_path) / tc.get("release")
with tarfile.open(tar, "r:*") as tar:
tar.extractall(path=to_path)
return tc_dir
def main():
with ThreadPoolExecutor(max_workers=len(toolchains) + 1) as executor:
tasks = {executor.submit(download_chunks, tc): tc for tc in toolchains}
print(tasks)
files = []
for future in concurrent.futures.as_completed(tasks):
tc = tasks[future]
try:
dl = future.result()
files.append(dl)
except Exception as exc:
print("%r generated an exception: %s" % (tc, exc))
tc_paths = []
with ThreadPoolExecutor(max_workers=len(toolchains) + 1) as executor:
tasks = {
executor.submit(extract_as_tar, tc, file): tc
for (tc, file) in zip(toolchains, files)
}
for future in concurrent.futures.as_completed(tasks):
try:
tc_paths.append(future.result())
except Exception as exc:
print("Uh oh %s" % exc )
## Add PATHS to the envrionment
expand_path = os.pathsep.join(str(p) for p in tc_paths)
GHAction.addPath(expand_path)
os.environ["PATH"] = f"{expand_path}{os.pathsep}{os.environ['PATH']}"
print(os.environ["PATH"])
class GHAction:
def __init__ (self):
self.GITHUB_ = ''
@staticmethod
def addPath(pathstr):
"""
A string on a new line prepends to PATH
echo "::add-path::BAR"
"""
lout=f"::add-path::{pathstr}"
# Print an extra new line incase of missing from previous flush
print("\n", lout)
@staticmethod
def exportVariable(env, val):
"""
A string on a new line with exports a environment variable
echo "::set-env name=FOO::BAR"
"""
lout=f"::set-env name={env}::{val}"
# Print an extra new line incase of missing from previous flush
print("\n", lout )
if __name__ == "__main__":
main()
|
the-stack_0_6708 | ##############################################################################
## This file is part of 'L2SI Core'.
## It is subject to the license terms in the LICENSE.txt file found in the
## top-level directory of this distribution and at:
## https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
## No part of 'L2SI Core', including this file,
## may be copied, modified, propagated, or distributed except according to
## the terms contained in the LICENSE.txt file.
##############################################################################
import ctypes
import struct
def getField(value, highBit, lowBit):
mask = 2**(highBit-lowBit+1)-1
return (value >> lowBit) & mask
def makeInt(ba):
return int.from_bytes(ba, 'little', signed=False)
c_uint64 = ctypes.c_uint64
c_uint = ctypes.c_uint
class PackedStruct(ctypes.LittleEndianStructure):
_pack_ = 1
def __str__(self):
li = []
for f in self._fields_:
if issubclass(f[1], ctypes._SimpleCData):
li.append(f'{f[0]} - {getattr(self, f[0]):x}')
else:
li.append(f'{f[0]} - {getattr(self, f[0])}')
return '\n'.join(li)
def __new__(self, ba):
return self.from_buffer_copy(ba)
def __init__(self, ba):
pass
class TransitionInfo(PackedStruct):
_fields_ = [
('dmy1', c_uint, 1),
('l0Tag', c_uint, 5),
('dmy2', c_uint, 2),
('header', c_uint, 7)]
class EventInfo(PackedStruct):
_fields_ = [
('l0Accept', c_uint, 1),
('l0Tag', c_uint, 5),
('dmy1', c_uint, 1),
('l0Reject', c_uint, 1),
('l1Expect', c_uint, 1),
('l1Accept', c_uint, 1),
('l1Tag', c_uint, 5) ]
class TriggerInfo(ctypes.Union):
_fields_ = [
('eventInfo', EventInfo),
('transitionInfo', TransitionInfo),
('asWord', ctypes.c_uint16)]
def __init__(self, word):
self.asWord = word
def isEvent(self):
return ((self.asWord & 0x8000) != 0)
class EventHeader(PackedStruct):
_fields_ = [
('pulseId', ctypes.c_uint64, 56),
('dmy1', ctypes.c_uint8),
('timeStamp', ctypes.c_uint64),
('partitions', ctypes.c_uint8),
('dmy2', ctypes.c_uint8),
('triggerInfo', ctypes.c_uint16),
('count', ctypes.c_uint32, 24),
('version', ctypes.c_uint8, 8)]
def parseEventHeaderFrame(frame, enPrint=False):
"""Given a rogue Frame representing an Event Header or Transition, parse into a dictionary of fields"""
frameSize = frame.getPayload()
ba = bytearray(frameSize)
channel = frame.getChannel()
if (enPrint):
print(f'Got Event Header frame with channel: {channel} and size: {frameSize}')
frame.read(ba, 0)
return parseBa2(ba)
def parseBa1(ba):
eh = EventHeader(ba=ba)
ti = TriggerInfo(eh.triggerInfo)
return ti
fmt = '<QQBxHLxxxxxxxx'
def parseBa2(ba):
s = struct.unpack(fmt, ba)
d = {}
d['pulseId'] = (s[0] & 0x00FFFFFFFFFFFFFF)
d['timeStamp'] = s[1]
d['partitions'] = s[2]
d['triggerInfo'] = s[3]
d['count'] = s[4] & 0x00FFFFFF
d['version'] = s[4] >> 24
return d
|
the-stack_0_6709 | """
Various helper functions related to dictionaries.
"""
def extend_dictionary(d1, d2):
"""
Helper function to create a new dictionary with the contents of the two
given dictionaries. Does not modify either dictionary, and the values are
copied shallowly. If there are repeats, the second dictionary wins ties.
The function is written to ensure Skulpt compatibility.
Args:
d1 (dict): The first dictionary
d2 (dict): The second dictionary
Returns:
dict: The new dictionary
"""
d3 = {}
for key, value in d1.items():
d3[key] = value
for key, value in d2.items():
d3[key] = value
return d3
|
the-stack_0_6711 | int_to_mod = {
0 : ["NM", "NoMod"],
1 << 0 : ["NF", "NoFail"],
1 << 1 : ["EZ", "Easy"],
1 << 2 : ["TD", "TouchDevice"],
1 << 3 : ["HD", "Hidden"],
1 << 4 : ["HR", "HardRock"],
1 << 5 : ["SD", "SuddenDeath"],
1 << 6 : ["DT", "DoubleTime"],
1 << 7 : ["RX", "Relax"],
1 << 8 : ["HT", "HalfTime"],
1 << 9 : ["NC", "Nightcore"],
1 << 10 : ["FL", "Flashlight"],
1 << 11 : ["AT", "Autoplay"],
1 << 12 : ["SO", "SpunOut"],
1 << 13 : ["AP", "Autopilot"],
1 << 14 : ["PF", "Perfect"],
1 << 15 : ["K4", "Key4"],
1 << 16 : ["K5", "Key5"],
1 << 17 : ["K6", "Key6"],
1 << 18 : ["K7", "Key7"],
1 << 19 : ["K8", "Key8"],
1 << 20 : ["FI", "FadeIn"],
1 << 21 : ["RD", "Random"],
1 << 22 : ["CN", "Cinema"],
1 << 23 : ["TP", "Target"],
1 << 24 : ["K9", "Key9"],
1 << 25 : ["CO", "KeyCoop"],
1 << 26 : ["K1", "Key1"],
1 << 27 : ["K3", "Key3"],
1 << 28 : ["K2", "Key2"],
1 << 29 : ["V2", "ScoreV2"],
1 << 30 : ["MR", "Mirror"]
}
class ModCombination():
"""
An osu! mod combination.
Notes
-----
This class only exists to allow ``Mod`` to have ``ModCombination`` objects
as class attributes, as you can't instantiate instances of your own class in
a class definition.
"""
def __init__(self, value):
self.value = value
@staticmethod
def _parse_mod_string(mod_string):
"""
Creates an integer representation of a mod string made up of two letter
mod names ("HDHR", for example).
Parameters
----------
mod_string: str
The mod string to represent as an int.
Returns
-------
int
The integer representation of the mod string.
Raises
------
ValueError
If mod_string is empty, not of even length, or any of its 2-length
substrings do not correspond to a Mod in Mod.ORDER.
"""
if mod_string == "":
raise ValueError("Invalid mod string (cannot be empty)")
if len(mod_string) % 2 != 0:
raise ValueError(f"Invalid mod string {mod_string} (not of even "
"length)")
mod = Mod.NM
for i in range(0, len(mod_string) - 1, 2):
single_mod = mod_string[i: i + 2]
# there better only be one Mod that has an acronym matching ours,
# but a comp + 0 index works too
matching_mods = [mod for mod in Mod.ORDER if \
mod.short_name() == single_mod]
# ``mod.ORDER`` uses ``_NC`` and ``_PF``, and we want to parse
# eg "NC" as "DTNC"
if Mod._NC in matching_mods:
matching_mods.remove(Mod._NC)
matching_mods.append(Mod.NC)
if Mod._PF in matching_mods:
matching_mods.remove(Mod._PF)
matching_mods.append(Mod.PF)
if not matching_mods:
raise ValueError("Invalid mod string (no matching mod found "
f"for {single_mod})")
mod += matching_mods[0]
return mod.value
def short_name(self):
"""
The acronym-ized names of the component mods.
Returns
-------
str
The short name of this ModCombination.
Examples
--------
>>> ModCombination(576).short_name()
"NC"
>>> ModCombination(24).short_name()
"HDHR"
Notes
-----
This is a function instead of an attribute set at initialization time
because otherwise we couldn't refer to a :class:`~.Mod`\s as its class
body isn't loaded while it's instantiating :class:`~.Mod`\s.
Although technically mods such as NC are represented with two bits -
DT and NC - being set, short_name removes DT and so returns "NC"
rather than "DTNC".
"""
if self.value in int_to_mod:
# avoid infinite recursion with every mod decomposing into itself
# ad infinitum
return int_to_mod[self.value][0]
component_mods = self.decompose(clean=True)
return "".join(mod.short_name() for mod in component_mods)
def long_name(self):
"""
The spelled out names of the component mods.
Returns
-------
str
The long name of this ModCombination.
Examples
--------
>>> ModCombination(576).long_name()
"Nightcore"
>>> ModCombination(24).long_name()
"Hidden HardRock"
Notes
-----
This is a function instead of an attribute set at initialization time
because otherwise we couldn't refer to :class:`~.Mod`\s as its class
body isn't loaded while it's instantiating :class:`~.Mod`\s.
Although technically mods such as NC are represented with two bits -
DT and NC - being set, long_name removes DT and so returns "Nightcore"
rather than "DoubleTime Nightcore".
"""
if self.value in int_to_mod:
return int_to_mod[self.value][1]
component_mods = self.decompose(clean=True)
return " ".join(mod.long_name() for mod in component_mods)
def __eq__(self, other):
"""Compares the ``value`` of each object"""
if not isinstance(other, ModCombination):
return False
return self.value == other.value
def __add__(self, other):
"""Returns a Mod representing the bitwise OR of the two Mods"""
return ModCombination(self.value | other.value)
def __sub__(self, other):
return ModCombination(self.value & ~other.value)
def __hash__(self):
return hash(self.value)
def __repr__(self):
return f"ModCombination(value={self.value})"
def __str__(self):
return self.short_name()
def __contains__(self, other):
return bool(self.value & other.value)
def decompose(self, clean=False):
"""
Decomposes this mod into its base component mods, which are
:class:`~.ModCombination`\s with a ``value`` of a power of two.
Parameters
----------
clean: bool
If true, removes mods that we would think of as duplicate - if both
NC and DT are component mods, remove DT. If both PF and SD are
component mods, remove SD.
Returns
-------
list[:class:`~.ModCombination`]
A list of the component :class:`~.ModCombination`\s of this mod,
ordered according to :const:`~circleguard.mod.ModCombination.ORDER`.
"""
mods = [ModCombination(mod_int) for mod_int in int_to_mod if
self.value & mod_int]
# order the mods by Mod.ORDER
mods = [mod for mod in Mod.ORDER if mod in mods]
if not clean:
return mods
if Mod._NC in mods and Mod.DT in mods:
mods.remove(Mod.DT)
if Mod._PF in mods and Mod.SD in mods:
mods.remove(Mod.SD)
return mods
class Mod(ModCombination):
"""
An ingame osu! mod.
Common combinations are available as ``HDDT``, ``HDHR``, and ``HDDTHR``.
Parameters
----------
value: int or str or list
A representation of the desired mod. This can either be its integer
representation such as ``64`` for ``DT`` and ``72`` (``64`` + ``8``) for
``HDDT``, or a string such as ``"DT"`` for ``DT`` and ``"HDDT"`` (or
``DTHD``) for ``HDDT``, or a list of strings such as ``["HD", "DT"]``
for ``HDDT``.
|br|
If used, the string must be composed of two-letter acronyms for mods,
in any order.
Notes
-----
The nightcore mod is never set by itself. When we see plays set with ``NC``,
we are really seeing a ``DT + NC`` play. ``NC`` by itself is ``512``, but
what we expect to see is ``576`` (``512 + 64``; ``DT`` is ``64``). As such
``Mod.NC`` is defined to be the more intuitive version—``DT + NC``. We
provide the true, technical version of the ``NC`` mod (``512``) as
``Mod._NC``.
This same treatment and reasoning applies to ``Mod.PF``, which we define
as ``PF + SD``. The technical version of PF is available as ``Mod._PF``.
A full list of mods and their specification can be found at
https://osu.ppy.sh/help/wiki/Game_Modifiers, or a more technical list at
https://github.com/ppy/osu-api/wiki#mods.
Warnings
--------
The fact that this class subclasses ModCombination is slightly misleading.
This is only done so that this class can be instantiated directly, backed
by an internal ModCombination, instead of exposing ModCombination to users.
"""
NM = NoMod = ModCombination(0)
NF = NoFail = ModCombination(1 << 0)
EZ = Easy = ModCombination(1 << 1)
TD = TouchDevice = ModCombination(1 << 2)
HD = Hidden = ModCombination(1 << 3)
HR = HardRock = ModCombination(1 << 4)
SD = SuddenDeath = ModCombination(1 << 5)
DT = DoubleTime = ModCombination(1 << 6)
RX = Relax = ModCombination(1 << 7)
HT = HalfTime = ModCombination(1 << 8)
_NC = _Nightcore = ModCombination(1 << 9)
# most people will find it more useful for NC to be defined as it is ingame
NC = Nightcore = _NC + DT
FL = Flashlight = ModCombination(1 << 10)
AT = Autoplay = ModCombination(1 << 11)
SO = SpunOut = ModCombination(1 << 12)
AP = Autopilot = ModCombination(1 << 13)
_PF = _Perfect = ModCombination(1 << 14)
PF = Perfect = _PF + SD
K4 = Key4 = ModCombination(1 << 15)
K5 = Key5 = ModCombination(1 << 16)
K6 = Key6 = ModCombination(1 << 17)
K7 = Key7 = ModCombination(1 << 18)
K8 = Key8 = ModCombination(1 << 19)
FI = FadeIn = ModCombination(1 << 20)
RD = Random = ModCombination(1 << 21)
CN = Cinema = ModCombination(1 << 22)
TP = Target = ModCombination(1 << 23)
K9 = Key9 = ModCombination(1 << 24)
CO = KeyCoop = ModCombination(1 << 25)
K1 = Key1 = ModCombination(1 << 26)
K3 = Key3 = ModCombination(1 << 27)
K2 = Key2 = ModCombination(1 << 28)
V2 = ScoreV2 = ModCombination(1 << 29)
MR = Mirror = ModCombination(1 << 30)
KM = KeyMod = K1 + K2 + K3 + K4 + K5 + K6 + K7 + K8 + K9 + KeyCoop
# common mod combinations
HDDT = HD + DT
HDHR = HD + HR
HDDTHR = HD + DT + HR
# how people naturally sort mods in combinations (HDDTHR, not DTHRHD)
# sphinx uses repr() here
# (see https://github.com/sphinx-doc/sphinx/issues/3857), so provide
# our own, more human readable docstrings. #: denotes sphinx docstrings.
#: [NM, EZ, HD, HT, DT, _NC, HR, FL, NF, SD, _PF, RX, AP, SO, AT, V2, TD,
#: FI, RD, CN, TP, K1, K2, K3, K4, K5, K6, K7, K8, K9, CO, MR]
ORDER = [NM, EZ, HD, HT, DT, _NC, HR, FL, NF, SD, _PF, RX, AP, SO, AT,
V2, TD, # we stop caring about order after this point
FI, RD, CN, TP, K1, K2, K3, K4, K5, K6, K7, K8, K9, CO, MR]
def __init__(self, value):
if isinstance(value, str):
value = ModCombination._parse_mod_string(value)
if isinstance(value, list):
mod = Mod.NM
for mod_str in value:
mod += Mod(mod_str)
value = mod.value
if isinstance(value, ModCombination):
value = value.value
super().__init__(value)
|
the-stack_0_6715 | from collections import OrderedDict
from nmigen import *
from nmigen.hdl.rec import *
from .endpoint import *
__all__ = ["DoubleBuffer", "InputMultiplexer", "OutputMultiplexer"]
class DoubleBuffer(Elaboratable):
def __init__(self, *, depth, width, read_ack=False):
self.w_stb = Signal()
self.w_lst = Signal()
self.w_data = Signal(width)
self.w_drop = Signal()
self.w_rdy = Signal()
self.r_stb = Signal()
self.r_lst = Signal()
self.r_data = Signal(width)
self.r_rdy = Signal()
self.r_ack = Signal(1 if read_ack else 0)
self.depth = depth
self.width = width
self.read_ack = read_ack
def elaborate(self, platform):
m = Module()
banks = [Record([("w_addr", range(self.depth)), ("w_data", self.width), ("w_en", 1),
("r_addr", range(self.depth)), ("r_data", self.width), ("r_en", 1),
("valid", 1), ("level", range(self.depth + 1))],
name="bank_{}".format(i))
for i in range(2)]
for i, bank in enumerate(banks):
mem = Memory(depth=self.depth, width=self.width)
m.submodules["mem{}_wp".format(i)] = mem_wp = mem.write_port()
m.submodules["mem{}_rp".format(i)] = mem_rp = mem.read_port(transparent=False)
m.d.comb += [
mem_wp.addr.eq(bank.w_addr),
mem_wp.data.eq(bank.w_data),
mem_wp.en.eq(bank.w_en),
mem_rp.addr.eq(bank.r_addr),
mem_rp.en.eq(bank.r_en),
bank.r_data.eq(mem_rp.data),
]
bank_lru = Signal()
with m.FSM(reset="WRITE-0") as write_fsm:
with m.State("WAIT"):
with m.If(~banks[0].valid):
m.next = "WRITE-0"
with m.Elif(~banks[1].valid):
m.next = "WRITE-1"
for i, bank in enumerate(banks):
with m.State("WRITE-{}".format(i)):
w_addr_inc = Signal.like(bank.w_addr, name_suffix="_inc")
m.d.comb += w_addr_inc.eq(bank.w_addr + 1)
m.d.comb += [
self.w_rdy.eq(1),
bank.w_en.eq(self.w_stb),
bank.w_data.eq(self.w_data),
]
with m.If(self.w_stb):
with m.If(self.w_lst):
m.d.sync += bank.w_addr.eq(0)
m.next = "WAIT"
with m.If(~self.w_drop):
m.d.sync += [
bank.valid.eq(1),
bank.level.eq(w_addr_inc),
bank_lru.eq(1 - i),
]
with m.Elif(w_addr_inc == self.depth):
# Overflow. Flush remaining bytes.
m.d.sync += bank.w_addr.eq(0)
m.next = "FLUSH"
with m.Else():
m.d.sync += bank.w_addr.eq(w_addr_inc)
with m.State("FLUSH"):
m.d.comb += self.w_rdy.eq(1)
with m.If(self.w_stb & self.w_lst):
m.next = "WAIT"
with m.FSM() as read_fsm:
with m.State("WAIT"):
with m.If(banks[0].valid & ~(banks[1].valid & bank_lru)):
m.d.comb += banks[0].r_en.eq(1)
m.d.sync += banks[0].r_addr.eq(1)
m.d.sync += self.r_lst.eq(banks[0].level == 1)
m.next = "READ-0"
with m.Elif(banks[1].valid):
m.d.comb += banks[1].r_en.eq(1)
m.d.sync += banks[1].r_addr.eq(1)
m.d.sync += self.r_lst.eq(banks[1].level == 1)
m.next = "READ-1"
for i, bank in enumerate(banks):
with m.State("READ-{}".format(i)):
r_addr_inc = Signal.like(bank.r_addr, name_suffix="_inc")
m.d.comb += r_addr_inc.eq(bank.r_addr + 1)
m.d.comb += [
self.r_stb.eq(1),
self.r_data.eq(bank.r_data),
]
with m.If(self.r_rdy):
r_done = self.r_ack if self.read_ack else self.r_lst
with m.If(r_done):
m.d.sync += bank.valid.eq(0)
m.d.sync += bank.r_addr.eq(0)
m.next = "WAIT"
with m.Else():
m.d.comb += bank.r_en.eq(1)
with m.If(r_addr_inc == bank.level):
m.d.sync += bank.r_addr.eq(0)
m.d.sync += self.r_lst.eq(1)
with m.Else():
m.d.sync += bank.r_addr.eq(r_addr_inc)
m.d.sync += self.r_lst.eq(0)
return m
class InputMultiplexer(Elaboratable):
def __init__(self):
self.sel = Record([
("addr", 4, DIR_FANIN),
("xfer", 2, DIR_FANOUT),
("err", 1, DIR_FANOUT),
])
self.pkt = Record([
("stb", 1, DIR_FANOUT),
("lst", 1, DIR_FANOUT),
("data", 8, DIR_FANOUT),
("zlp", 1, DIR_FANOUT),
("rdy", 1, DIR_FANIN),
("ack", 1, DIR_FANIN),
])
self.sof = Signal()
self._ep_map = OrderedDict()
self._addr_map = OrderedDict()
def add_endpoint(self, ep, *, addr, buffered=False):
if not isinstance(ep, InputEndpoint):
raise TypeError("Endpoint must be an InputEndpoint, not {!r}"
.format(ep))
if not isinstance(addr, int):
raise TypeError("Endpoint address must be an integer, not {!r}"
.format(addr))
if not addr in range(0, 16):
raise ValueError("Endpoint address must be between 0 and 15, not {}"
.format(addr))
if addr in self._ep_map:
raise ValueError("Endpoint address {} has already been assigned"
.format(addr))
if ep in self._addr_map:
raise ValueError("Endpoint {!r} has already been added at address {}"
.format(ep, self._addr_map[ep]))
if addr == 0 and ep.xfer is not Transfer.CONTROL:
raise ValueError("Invalid transfer type {} for endpoint 0; must be CONTROL"
.format(Transfer(ep.xfer).name))
self._ep_map[addr] = ep, buffered
self._addr_map[ep] = addr
def elaborate(self, platform):
m = Module()
port_map = OrderedDict({addr: Record.like(self.pkt) for addr in self._ep_map})
for addr, (ep, buffered) in self._ep_map.items():
port = port_map[addr]
if buffered:
dbuf = DoubleBuffer(depth=ep.max_size, width=port.data.width + port.zlp.width,
read_ack=ep.xfer is not Transfer.ISOCHRONOUS)
m.submodules["dbuf_{}".format(addr)] = dbuf
m.d.comb += [
dbuf.w_stb.eq(ep.stb),
dbuf.w_lst.eq(ep.lst),
dbuf.w_data.eq(Cat(ep.data, ep.zlp)),
ep.rdy.eq(dbuf.w_rdy),
port.stb.eq(dbuf.r_stb),
port.lst.eq(dbuf.r_lst),
Cat(port.data, port.zlp).eq(dbuf.r_data),
dbuf.r_rdy.eq(port.rdy),
dbuf.r_ack.eq(port.ack),
]
else:
m.d.comb += [
port.stb.eq(ep.stb),
port.lst.eq(ep.lst),
port.data.eq(ep.data),
port.zlp.eq(ep.zlp),
ep.rdy.eq(port.rdy),
ep.ack.eq(port.ack),
]
m.d.comb += ep.sof.eq(self.sof)
with m.Switch(self.sel.addr):
for addr, port in port_map.items():
ep, _ = self._ep_map[addr]
with m.Case(addr):
m.d.comb += [
self.sel.xfer.eq(ep.xfer),
port.connect(self.pkt),
]
with m.Default():
# Unknown endpoint.
m.d.comb += self.sel.err.eq(1)
return m
class OutputMultiplexer(Elaboratable):
def __init__(self):
self.sel = Record([
("addr", 4, DIR_FANIN),
("xfer", 2, DIR_FANOUT),
("err", 1, DIR_FANOUT),
])
self.pkt = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("zlp", 1, DIR_FANIN),
("setup", 1, DIR_FANIN),
("drop", 1, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.sof = Signal()
self._ep_map = OrderedDict()
self._addr_map = OrderedDict()
def add_endpoint(self, ep, *, addr, buffered=False):
if not isinstance(ep, OutputEndpoint):
raise TypeError("Endpoint must be an OutputEndpoint, not {!r}"
.format(ep))
if not isinstance(addr, int):
raise TypeError("Endpoint address must be an integer, not {!r}"
.format(addr))
if not addr in range(0, 16):
raise ValueError("Endpoint address must be between 0 and 15, not {}"
.format(addr))
if addr in self._ep_map:
raise ValueError("Endpoint address {} has already been assigned"
.format(addr))
if ep in self._addr_map:
raise ValueError("Endpoint {!r} has already been added at address {}"
.format(ep, self._addr_map[ep]))
if addr == 0 and ep.xfer is not Transfer.CONTROL:
raise ValueError("Invalid transfer type {} for endpoint 0; must be CONTROL"
.format(Transfer(ep.xfer).name))
self._ep_map[addr] = ep, buffered
self._addr_map[ep] = addr
def elaborate(self, platform):
m = Module()
port_map = OrderedDict({addr: Record.like(self.pkt) for addr in self._ep_map})
for addr, (ep, buffered) in self._ep_map.items():
port = port_map[addr]
if buffered:
dbuf_w_data = Cat(port.data, port.zlp, port.setup)
dbuf = DoubleBuffer(depth=ep.max_size, width=len(dbuf_w_data))
m.submodules["dbuf_{}".format(addr)] = dbuf
m.d.comb += [
dbuf.w_stb.eq(port.stb),
dbuf.w_lst.eq(port.lst),
dbuf.w_data.eq(dbuf_w_data),
dbuf.w_drop.eq(port.drop),
port.rdy.eq(dbuf.w_rdy),
ep.stb.eq(dbuf.r_stb),
ep.lst.eq(dbuf.r_lst),
Cat(ep.data, ep.zlp, ep.setup).eq(dbuf.r_data),
dbuf.r_rdy.eq(ep.rdy),
]
else:
m.d.comb += [
ep.stb.eq(port.stb),
ep.lst.eq(port.lst),
ep.data.eq(port.data),
ep.zlp.eq(port.zlp),
ep.setup.eq(port.setup),
ep.drop.eq(port.drop),
port.rdy.eq(ep.rdy),
]
m.d.comb += ep.sof.eq(self.sof)
with m.Switch(self.sel.addr):
for addr, port in port_map.items():
ep, _ = self._ep_map[addr]
with m.Case(addr):
m.d.comb += [
self.sel.xfer.eq(ep.xfer),
port.connect(self.pkt),
]
with m.Default():
# Unknown endpoint.
m.d.comb += self.sel.err.eq(1)
return m
|
the-stack_0_6717 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for TensorFlow snt.
This file contains the Abstract Base Class for defining Modules in TensorFlow.
A Module is an object that can be connected into the Graph multiple times
using the __call__ method, sharing variables automatically with no need to
explicitly use scopes or specify reuse=True.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import inspect
import types
# Dependency imports
import contextlib2
import six
from sonnet.python.modules import base_info
from sonnet.python.modules import util
import tensorflow as tf
import wrapt
# Import error class from base_errors for backward compatibility.
from sonnet.python.modules.base_errors import Error
from sonnet.python.modules.base_errors import NotConnectedError
from sonnet.python.modules.base_errors import ParentNotBuiltError
from sonnet.python.modules.base_errors import IncompatibleShapeError
from sonnet.python.modules.base_errors import UnderspecifiedError
from sonnet.python.modules.base_errors import NotSupportedError
from sonnet.python.modules.base_errors import NotInitializedError
from sonnet.python.modules.base_errors import DifferentGraphError
from sonnet.python.modules.base_errors import ModuleInfoError
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
from tensorflow.python.framework import ops
_MODULE_STACK = []
_CONNECTION_OBSERVER_STACK = []
@contextlib.contextmanager
def observe_connections(observer):
"""Notifies the observer whenever any Sonnet module is connected to the graph.
If a module contains nested modules, the observer is notified once for each
nested module, followed by the containing module.
For example:
```python
def logging_observer(connected_subgraph):
logging.info(connected_subgraph.module.module_name)
with snt.observe_connections(logging_observer):
output = imagenet_module(input_tensor)
```
Args:
observer: Callable accepting a single argument. Will be called with a
`ConnectedSubGraph` each time a module is connected to the graph.
Yields:
None: just yields control to the inner context.
"""
_CONNECTION_OBSERVER_STACK.append(observer)
try:
yield
finally:
_CONNECTION_OBSERVER_STACK.pop()
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(object):
"""Superclass for Sonnet Modules.
This class defines the functionality that every module should implement,
principally the `build` method which is wrapped using `tf.make_template`
and called from `__call__`. Every time the module is called it will
be connected into the graph but using the same shared set of variables, thanks
to the template.
For this to work correctly, the `build` implementation in the derived class
must access all variables using `tf.get_variable`, not `tf.Variable`. The same
set of variables must be created each time, if this is not the case an Error
will be raised.
Every subclass must call this class' `__init__` at the start of their
`__init__`, passing the relevant name. If this step is omitted variable
sharing will not work.
"""
def __init__(self, _sentinel=None, custom_getter=None,
name=None): # pylint: disable=invalid-name
"""Performs the initialisation necessary for all AbstractModule instances.
Every subclass of AbstractModule must begin their constructor with a call to
this constructor, i.e.
`super(MySubModule, self).__init__(custom_getter=custom_getter, name=name)`.
If you instantiate sub-modules in __init__ you must create them within the
`_enter_variable_scope` context manager to ensure they are in the module's
variable scope. Alternatively, instantiate sub-modules in `_build`.
Args:
_sentinel: Variable that only carries a non-None value if `__init__` was
called without named parameters. If this is the case, a deprecation
warning is issued in form of a `ValueError`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of this module. Used to construct the Templated build function.
If `None` the module's class name is used (converted to snake case).
Raises:
TypeError: If `name` is not a string.
TypeError: If a given `custom_getter` is not callable.
ValueError: If `__init__` was called without named arguments.
"""
if _sentinel is not None:
raise ValueError("Calling AbstractModule.__init__ without named "
"arguments is not supported.")
if name is None:
name = util.to_snake_case(self.__class__.__name__)
elif not isinstance(name, six.string_types):
raise TypeError("Name must be a string, not {} of type {}.".format(
name, type(name)))
self._is_connected = False
self._connected_subgraphs = []
# If the given custom getter is a dictionary with a per-variable custom
# getter, wrap it into a single custom getter.
if isinstance(custom_getter, collections.Mapping):
self._custom_getter = util.custom_getter_router(
custom_getter_map=custom_getter,
name_fn=lambda name: name[len(self.scope_name) + 1:])
elif custom_getter is not None and not callable(custom_getter):
raise TypeError("Given custom_getter is not callable.")
else:
self._custom_getter = custom_getter
self._template = tf.make_template(name,
self._build_wrapper,
create_scope_now_=True,
custom_getter_=self._custom_getter)
self._original_name = name
self._unique_name = self._template.variable_scope.name.split("/")[-1]
# Copy signature of _build to __call__.
adapter_fn = getattr(self._build, "__func__", self._build)
@wrapt.decorator(adapter=adapter_fn)
def copy_signature(method, unused_instance, args, kwargs):
return method(*args, **kwargs)
@copy_signature
def __call__(instance, *args, **kwargs): # pylint: disable=invalid-name
return AbstractModule.__call__(instance, *args, **kwargs)
# use __dict__ instead of setting directly to avoid a Callable pytype error
self.__dict__["__call__"] = types.MethodType(__call__, self)
# Update __call__ and the object docstrings to enable better introspection.
self.__doc__ = self._build.__doc__
self.__call__.__func__.__doc__ = self._build.__doc__
# Keep track of which graph this module has been connected to. Sonnet
# modules cannot be connected to multiple graphs, as transparent variable
# sharing is impossible in that case.
self._graph = None
# Container for all variables created in this module and its sub-modules.
self._all_variables = set([])
# Calling `.defun()` causes the module's call method to become wrapped as
# a graph function.
self._defun_wrapped = False
def _build_wrapper(self, *args, **kwargs):
"""Function which will be wrapped in a Template to do variable sharing.
Passes through all arguments to the _build method, and returns the
corresponding outputs, plus the name_scope generated by this call of the
template.
Args:
*args: args list for self._build
**kwargs: kwargs dict for self._build
Returns:
A tuple containing (output from _build, scope_name).
"""
output = self._build(*args, **kwargs)
# Make a dummy subscope to check the name scope we are in. We could read
# the name scope from one of the outputs produced, except that the outputs
# could have been produced from a subscope instantiated by the build
# function, for example if inner modules are present. Calling name_scope
# here and creating a new subscope guarantees we get the right answer.
# Because we don't create an ops inside this dummy scope, no extra memory
# will be consumed.
with tf.name_scope("dummy") as scope_name:
this_scope_name = scope_name[:-len("/dummy/")]
return output, this_scope_name
def _check_init_called(self):
"""Checks that the base class's __init__ method has been called.
Raises:
NotInitializedError: `AbstractModule.__init__` has not been called.
"""
try:
self._template
except AttributeError:
raise NotInitializedError("You may have forgotten to call super at the "
"start of %s.__init__."
% self.__class__.__name__)
def _set_module_info(self):
"""Creates a `ModuleInfo` and adds it to the graph collections."""
self._module_info = base_info.ModuleInfo(
module_name=self.module_name,
scope_name=self.scope_name,
class_name="{}.{}".format(
self.__class__.__module__, self.__class__.__name__),
connected_subgraphs=self._connected_subgraphs)
self._graph.add_to_collection(base_info.SONNET_COLLECTION_NAME,
self._module_info)
def _check_same_graph(self):
"""Checks that the module is not being connect to multiple Graphs.
An instance of a Sonnet module 'owns' the variables it contains, and permits
seamless variable sharing. As such, connecting a single module instance to
multiple Graphs is not possible - this function will raise an error should
that occur.
Raises:
DifferentGraphError: if the module is connected to a different Graph than
it was previously used in.
"""
with ops.init_scope():
# We need `init_scope` incase we're running inside a defun. In that case
# what we want is information about where the function will be called not
# where the function is being built.
current_graph = tf.get_default_graph()
will_call_in_eager_context = tf.executing_eagerly()
if self._graph is None:
self._graph = current_graph
self._set_module_info()
if not will_call_in_eager_context:
# Same graph checks only make sense when calling from graph mode (in eager
# mode there is a single process level context where all modules are
# created).
if self._graph != current_graph:
raise DifferentGraphError("Cannot connect module to multiple Graphs.")
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Add elements to the Graph, computing output Tensors from input Tensors.
Subclasses must implement this method, which will be wrapped in a Template.
Args:
*args: Input Tensors.
**kwargs: Additional Python flags controlling connection.
Returns:
output Tensor(s).
"""
@contextlib.contextmanager
def _capture_variables(self):
"""Adds variables used by this module to self._all_variables.
Upon entering this context manager the module adds itself onto the top
of the module call stack. Any variables created with `tf.get_variable()`
inside `_build()` or `_enter_variable_scope()` while this module is on top
of the call stack will be added to `self._all_variables`.
Before exiting the context the module removes itself from the top of the
call stack, and adds all of the variables in `self._all_variables` to its
parent module (the new top) of the call stack.
Yields:
Nothing, the yield just transfers focus back to the inner context.
"""
_MODULE_STACK.append(self)
try:
with contextlib2.ExitStack() as stack:
# Ideally move re-entering store into Template.variable_scope.
template_store = getattr(self._template, "_template_store", None)
if template_store is not None:
# In eager mode, the template store keeps references to created
# variables such that they survive even if there are no references to
# them in Python code. Variables added to an eager template store are
# also added to TensorFlow global collections (unlike regular
# variables created in eager mode).
stack.enter_context(template_store.as_default())
stack.enter_context(
util.notify_about_variables(self._all_variables.add))
yield
finally:
# Remove `self` from `module_stack`, this happens as part of cleanup
# even if an error is raised.
_MODULE_STACK.pop()
if _MODULE_STACK:
# Peek into the stack to add created variables to the parent
parent_module = _MODULE_STACK[-1]
parent_module._all_variables.update(self._all_variables) # pylint: disable=protected-access
def _add_connected_subgraph(self, call_method, outputs, subgraph_name_scope,
*inputs_args, **inputs_kwargs):
"""Adds a newly connected subgraph.
Args:
call_method: the function used to connect this Sonnet module to the graph.
outputs: `call_method` outputs.
subgraph_name_scope: name scope of the newly connected subgraph.
*inputs_args: `self._build` inputs `*args`.
**inputs_kwargs: `self._build` inputs `*kwargs`.
"""
build_inputs = inspect.getcallargs(call_method,
*inputs_args, **inputs_kwargs)
# "self" should normally be in `build_inputs` but some people are decorating
# their `_build` function with `memoize`, in which case the function
# signature doesn't contain `self` anymore.
if "self" in build_inputs:
del build_inputs["self"]
connected_subgraph = base_info.ConnectedSubGraph(
module=self, name_scope=subgraph_name_scope,
inputs=build_inputs,
outputs=outputs)
self._connected_subgraphs.append(connected_subgraph)
for observer in _CONNECTION_OBSERVER_STACK:
observer(connected_subgraph)
@property
def defun_wrapped(self):
"""Returns boolean indicating whether this module is defun wrapped."""
return self._defun_wrapped
def defun(self):
"""Wraps this modules call method in a callable graph function."""
if not self._defun_wrapped:
self._defun_wrapped = True
self._call = tf.contrib.eager.defun(self._call)
def __call__(self, *args, **kwargs):
return self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
"""Entry point when a module is called to connect it to the graph.
This is the entry point when users connect a Module into the Graph. The
underlying _build method will have been wrapped in a Template by the
constructor, and we call this template with the provided inputs here.
Note we use `_call` instead of `__call__` to allow instance level monkey
patching (see `defun`).
Args:
*args: Arguments for underlying _build method.
**kwargs: Keyword arguments for underlying _build method.
Returns:
The result of the underlying _build method.
"""
self._check_init_called()
self._check_same_graph()
with self._capture_variables():
outputs, subgraph_name_scope = self._template(*args, **kwargs)
self._is_connected = True
if not tf.executing_eagerly():
# In eager mode the module is called a lot more frequently than in graph
# mode (for each training step) and so we don't keep track of connected
# subgraphs (since there will be orders of magnitude more of them).
self._add_connected_subgraph(self._build, outputs, subgraph_name_scope,
*args, **kwargs)
return outputs
@property
def name_scopes(self):
"""Returns a tuple of all name_scopes generated by this module."""
if tf.executing_eagerly():
raise NotSupportedError(
"The name_scopes property is not supported in eager mode.")
return tuple(subgraph.name_scope for subgraph in self._connected_subgraphs)
@property
def variable_scope(self):
"""Returns the variable_scope declared by the module.
It is valid for library users to access the internal templated
variable_scope, but only makes sense to do so after connection. Therefore we
raise an error here if the variable_scope is requested before connection.
The only case where it does make sense to access the variable_scope before
connection is to get the post-uniquification name, which we support using
the separate .scope_name property.
Returns:
variable_scope: `tf.VariableScope` instance of the internal `tf.Template`.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
return self._template.variable_scope
@property
def scope_name(self):
"""Returns the full name of the Module's variable scope."""
return self._template.variable_scope.name
@property
def module_name(self):
"""Returns the name of the Module."""
return self._unique_name
@property
def is_connected(self):
"""Returns true iff the Module been connected to the Graph at least once."""
return self._is_connected
@property
def graph(self):
"""Returns the Graph instance which the module is connected to, or None."""
return self._graph
@property
def connected_subgraphs(self):
"""Returns the subgraphs created by this module so far."""
if tf.executing_eagerly():
raise NotSupportedError(
"Connected sub-graphs are not tracked in eager mode.")
return tuple(self._connected_subgraphs)
@property
def last_connected_subgraph(self):
"""Returns the last subgraph created by this module.
Returns:
The last connected subgraph.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
if tf.executing_eagerly():
raise NotSupportedError(
"Connected sub-graphs are not tracked in eager mode.")
self._ensure_is_connected()
return self._connected_subgraphs[-1]
@classmethod
def get_possible_initializer_keys(cls):
"""Returns the keys the dictionary of variable initializers may contain.
This provides the user with a way of knowing the initializer keys that are
available without having to instantiate a sonnet module. Subclasses may
override this class method if they need additional arguments to determine
what initializer keys may be provided.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
return getattr(cls, "POSSIBLE_INITIALIZER_KEYS", set())
def _ensure_is_connected(self):
"""Raise an Error if the module has not been connected yet.
Until the module is connected into the Graph, any variables created do
not exist yet and cannot be created in advance due to not knowing the size
of the input Tensor(s). This assertion ensures that any variables contained
in this module must now exist.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
if not self.is_connected:
raise NotConnectedError(
"Variables in {} not instantiated yet, __call__ the module "
"first.".format(self.scope_name))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _enter_variable_scope(self, reuse=None, check_same_graph=True):
"""Returns a contextlib.contextmanager to enter the internal variable scope.
This is useful for situations where submodules must be declared in the
constructor, or somewhere else that is not called under the `_build` method.
If such a case arises, calling `with self._enter_variable_scope():` will
cause the variables in the submodule to be correctly scoped.
An example justification for this is to allow the `Transposable` interface
to be implemented - you might want to construct all the submodules at
construction time so that you can call `.transpose()` and connect the
result of that before connecting the non-transposed module.
```python
class SomeModule(snt.AbstractModule):
def __init__(self, name="some_module"):
super(SomeModule, self).__init__(name=name)
with self._enter_variable_scope():
# We need to construct this submodule before we get to the _build
# method, for some reason.
self._sub_mod = snt.SomeSubmodule(name="some_submodule")
def _build(self, input):
# Connect to the already constructed submodule.
return self._sub_mod(input)
```
If you omit this then the submodule and parent module will appear to
be "side by side" rather than nested when viewed in the Graph viewer, and
functions such as `snt.get_variables_in_module()` or the `get_variables()`
method will not know about variables defined in the submodule.
Args:
reuse: Boolean passed to `tf.variable_scope`.
check_same_graph: Boolean to determine if same graph check should run. If
you are only entering the scope to name other variable scopes (e.g. not
to create/reuse variables) then it is legitimate to set this to False.
Yields:
The variable_scope inside the template.
"""
self._check_init_called()
if check_same_graph:
self._check_same_graph()
with self._capture_variables():
with tf.variable_scope(self._template.variable_scope, reuse=reuse) as vs:
yield vs
# pylint: enable=g-doc-return-or-yield
@property
def variables(self):
"""**All** `tf.Variable`s used when the module is connected.
This property does not rely on global collections and should generally be
preferred vs. `get_variables` and `get_all_variables`.
See the documentation for `AbstractModule._capture_variables()` for more
information about what variables are captured.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
return util.sort_by_name(self._all_variables)
@property
def trainable_variables(self):
"""All **trainable** `tf.Variable`s used when the module is connected.
This property does not rely on global collections and should generally be
preferred vs. `get_variables` and `get_all_variables`.
See the documentation for `AbstractModule._capture_variables()` for more
information about what variables are captured.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
return tuple(v for v in self.variables if v.trainable)
@property
def non_trainable_variables(self):
"""All **non-trainable** `tf.Variable`s used when the module is connected.
This property does not rely on global collections and should generally be
preferred vs. `get_variables` and `get_all_variables`.
See the documentation for `AbstractModule._capture_variables()` for more
information about what variables are captured.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
return tuple(v for v in self.variables if not v.trainable)
def get_variables(self, collection=tf.GraphKeys.TRAINABLE_VARIABLES):
"""Returns tuple of `tf.Variable`s declared inside this module.
Note that this operates by searching this module's variable scope,
and so does not know about any modules that were constructed elsewhere but
used inside this module.
This method explicitly re-enters the Graph which this module has been
connected to.
Args:
collection: Collection to restrict query to. By default this is
`tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable
variables such as moving averages.
Returns:
A tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
# Explicitly re-enter Graph, in case the module is being queried with a
# different default Graph from the one it was connected to. If this was not
# here then querying the variables from a different graph scope would
# produce an empty tuple.
with self._graph.as_default():
return util.get_variables_in_scope(
self.variable_scope, collection=collection)
def get_all_variables(self, collection=tf.GraphKeys.TRAINABLE_VARIABLES):
"""Returns all `tf.Variable`s used when the module is connected.
See the documentation for `AbstractModule._capture_variables()` for more
information.
Args:
collection: Collection to restrict query to. By default this is
`tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable
variables such as moving averages.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
collection_variables = set(tf.get_collection(collection))
# Return variables in self._all_variables that are in `collection`
return util.sort_by_name(self._all_variables & collection_variables)
def __getstate__(self):
raise NotSupportedError(
"Sonnet AbstractModule instances cannot be serialized. You should "
"instead serialize all necessary configuration which will allow "
"modules to be rebuilt.")
@six.add_metaclass(abc.ABCMeta)
class Transposable(object):
"""Transposable module interface.
The Transposable interface requires that transposable modules implement
a method called `transpose`, returning a module that is the transposed
version of the one the method is called on.
Calling the method twice should return a module with the same specifications
as the original module.
When implementing a transposable module, special care is required to make
sure that parameters needed to instantiate the module are provided as
functions whose invocation is deferred to graph construction time.
For example, in Linear we might want to call:
```python
linear = snt.Linear(name="linear", output_size=output_size)
linear_transpose = linear.transpose()
```
where the output_size for linear_transpose is not known yet, as linear is
not yet connected to the graph: output_size is passed to linear_transpose's
constructor as a lambda returning linear.input_size. The lambda will return
the correct value once linear is given an input.
Notice that linear_transpose's output_size value does not need to be defined
until the module is connected to the graph.
"""
@abc.abstractmethod
def transpose(self, name=None, **kwargs):
"""Builds and returns transposed version of module.
Args:
name: Name of the transposed module.
**kwargs: Additional Python flags controlling transposition.
Returns:
Transposed version of the module.
"""
@abc.abstractmethod
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
class Module(AbstractModule):
"""Module wrapping a function provided by the user."""
def __init__(self, build, custom_getter=None, name=None):
"""Constructs a module with a given build function.
The Module class can be used to wrap a function assembling a network into a
module.
For example, the following code implements a simple one-hidden-layer MLP
model by defining a function called make_model and using a Module instance
to wrap it.
```python
def make_model(inputs):
lin1 = snt.Linear(name="lin1", output_size=10)(inputs)
relu1 = tf.nn.relu(lin1, name="relu1")
lin2 = snt.Linear(name="lin2", output_size=20)(relu1)
return lin2
model = snt.Module(name='simple_mlp', build=make_model)
outputs = model(inputs)
```
The `partial` package from `functools` can be used to bake configuration
parameters into the function at construction time, as shown in the following
example.
```python
from functools import partial
def make_model(inputs, output_sizes):
lin1 = snt.Linear(name="lin1", output_size=output_sizes[0])(inputs)
relu1 = tf.nn.relu(lin1, name="relu1")
lin2 = snt.Linear(name="lin2", output_size=output_sizes[1])(relu1)
return lin2
model = snt.Module(name='simple_mlp',
build=partial(make_model, output_size=[10, 20])
outputs = model(inputs)
```
Args:
build: Callable to be invoked when connecting the module to the graph.
The `build` function is invoked when the module is called, and its
role is to specify how to add elements to the Graph, and how to
compute output Tensors from input Tensors.
The `build` function signature can include the following parameters:
*args - Input Tensors.
**kwargs - Additional Python parameters controlling connection.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Module name. If set to `None` (the default), the name will be set to
that of the `build` callable converted to `snake_case`. If `build` has
no name, the name will be 'module'.
Raises:
TypeError: If build is not callable.
TypeError: If a given `custom_getter` is not callable.
"""
if not callable(build):
raise TypeError("Input 'build' must be callable.")
if name is None:
name = util.name_for_callable(build)
super(Module, self).__init__(custom_getter=custom_getter, name=name)
self._build_function = build
def _build(self, *args, **kwargs):
"""Forwards call to the passed-in build function."""
return self._build_function(*args, **kwargs)
|
the-stack_0_6718 | """Train script.
Usage:
train.py <hparams> <dataset_root> [--cuda=<id>]
train.py -h | --help
Options:
-h --help Show this screen.
--cuda=<id> Speed in knots [default: 0].
"""
import torch
import numpy as np
from docopt import docopt
from os.path import join
from irl_dcb.config import JsonConfig
from dataset import process_data
from irl_dcb.builder import build
from irl_dcb.trainer import Trainer
torch.manual_seed(42619)
np.random.seed(42619)
if __name__ == '__main__':
args = docopt(__doc__)
device = torch.device('cuda:{}'.format(args['--cuda']))
hparams = args["<hparams>"]
dataset_root = args["<dataset_root>"]
hparams = JsonConfig(hparams)
# dir of pre-computed beliefs
DCB_dir_HR = join(dataset_root, 'DCBs/HR/')
DCB_dir_LR = join(dataset_root, 'DCBs/LR/')
data_name = '{}x{}'.format(hparams.Data.im_w, hparams.Data.im_h)
# bounding box of the target object (for scanpath ratio evaluation)
bbox_annos = np.load(join(dataset_root,
'coco_search_annos_{}.npy'.format(data_name)),
allow_pickle=True).item()
# load ground-truth human scanpaths
fixation_path = join(dataset_root,
'processed_human_scanpaths_TP_trainval.npy')
human_scanpaths = np.load(fixation_path,
allow_pickle=True,
encoding='latin1')
# exclude incorrect scanpaths
if hparams.Train.exclude_wrong_trials:
human_scanpaths = list(filter(lambda x: x['correct'] == 1,
human_scanpaths))
# process fixation data
dataset = process_data(human_scanpaths, DCB_dir_HR, DCB_dir_LR, bbox_annos,
hparams)
built = build(hparams, True, device, dataset['catIds'])
trainer = Trainer(**built, dataset=dataset, device=device, hparams=hparams)
trainer.train()
|
the-stack_0_6720 | """
.. module:: CConstraintL1
:synopsis: L1 Constraint
.. moduleauthor:: Battista Biggio <[email protected]>
.. moduleauthor:: Ambra Demontis <[email protected]>
"""
from secml.array import CArray
from secml.optim.constraints import CConstraint
class CConstraintL1(CConstraint):
"""L1 Constraint.
Parameters
----------
center : scalar or CArray, optional
Center of the constraint. Use an array to specify a different
value for each dimension. Default 0.
radius : scalar, optional
The semidiagonal of the constraint. Default 1.
Attributes
----------
class_type : 'l1'
"""
__class_type = 'l1'
def __init__(self, center=0, radius=1):
super(CConstraintL1, self).__init__()
self.center = center
self.radius = radius
@property
def center(self):
"""Center of the constraint."""
return self._center
@center.setter
def center(self, value):
"""Center of the constraint."""
self._center = CArray(value)
@property
def radius(self):
"""Semidiagonal of the constraint."""
return self._radius
@radius.setter
def radius(self, value):
"""Semidiagonal of the constraint."""
self._radius = float(value)
def _constraint(self, x):
"""Returns the value of the constraint for the sample x.
The constraint value y is given by:
y = ||x - center||_1 - radius
Parameters
----------
x : CArray
Input array.
Returns
-------
float
Value of the constraint.
"""
return float((x - self.center).norm(order=1) - self.radius)
def _projection(self, x):
"""Project x onto feasible domain / within the given constraint.
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - x ||_2^2 , s.t. || w ||_1 <= s
Parameters
----------
x : CArray
Input sample.
Returns
-------
CArray
Projected x onto feasible domain if constraint is violated.
Notes
-----
Solves the problem by a reduction to the positive simplex case.
"""
s = float(self.radius)
v = (x - self.center).ravel()
# compute the vector of absolute values
u = abs(v)
# check if v is already a solution
if u.sum() <= s:
# l1-norm is <= s
out = v + self._center
return out.tosparse() if x.issparse else out
# v is not already a solution: optimum lies on the boundary (norm == s)
# project *u* on the simplex
w = self._euclidean_proj_simplex(u, s=s)
# compute the solution to the original problem on v
w *= v.sign()
out = w + self._center
return out.tosparse() if x.issparse else out
def _euclidean_proj_simplex(self, v, s=1):
"""Compute the Euclidean projection on a positive simplex.
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 ,
s.t. \\sum_i w_i = s, w_i >= 0
Parameters
----------
v : CArray
1-Dimensional vector
s : int, optional
Radius of the simplex. Default 1.
Returns
-------
w : CArray
Euclidean projection of v on the simplex.
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves
sorting v. Better alternatives exist for high-dimensional sparse
vectors (cf. [1]). However, this implementation still easily
scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the l1-Ball for
Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer,
and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
"""
v = CArray(v).ravel()
d = v.size
# check if we are already on the simplex
if v.sum() == s and (v >= 0).sum() == d:
return v # best projection: itself!
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = v.deepcopy()
u.sort(inplace=True)
u = u[::-1]
if u.issparse:
u_nnz = CArray(u.nnz_data).todense()
cssv = u_nnz.cumsum()
else:
cssv = u.cumsum()
# get the number of > 0 components of the optimal solution
# (only considering non-null elements in v
j = CArray.arange(1, cssv.size+1)
if u.issparse:
rho = (j * u_nnz > (cssv - s)).sum() - 1
else:
rho = (j * u > (cssv - s)).sum() - 1
# compute the Lagrange multiplier associated to the simplex constraint
theta = (cssv[rho] - s) / (rho + 1.0)
# compute the projection by thresholding v using theta
w = v
if w.issparse:
p = CArray(w.nnz_data)
p -= theta
w[w.nnz_indices] = p
else:
w -= theta
w[w < 0] = 0
return w
def _gradient(self, x):
"""Returns the gradient of c(x) in x.
Parameters
----------
x : CArray
Input sample.
Returns
-------
CArray
The gradient of the constraint computed on x.
"""
return (x - self.center).sign().ravel()
|
the-stack_0_6721 | import warnings
from typing import Any, Callable, Hashable, List, Mapping, Optional, Set, Tuple, Union
import numpy as np
from numba import guvectorize
from xarray import Dataset
from . import variables
from .typing import ArrayLike, DType
def check_array_like(
a: Any,
dtype: Union[None, DType, Set[DType]] = None,
kind: Union[None, str, Set[str]] = None,
ndim: Union[None, int, Set[int]] = None,
) -> None:
"""Raise an error if an array does not match given attributes (dtype, kind, dimensions).
Parameters
----------
a
Array of any type.
dtype
The dtype the array must have, by default None (don't check)
If a set, then the array must have one of the dtypes in the set.
kind
The dtype kind the array must be, by default None (don't check).
If a set, then the array must be one of the kinds in the set.
ndim
Number of dimensions the array must have, by default None (don't check)
If a set, then the array must have one of the number of dimensions in the set.
Raises
------
TypeError
* If `a` does not have the attibutes `dtype`, `shape`, and `ndim`.
* If `a` does not have a dtype that matches `dtype`.
* If `a` is not a dtype kind that matches `kind`.
ValueError
If the number of dimensions of `a` does not match `ndim`.
"""
array_attrs = "ndim", "dtype", "shape"
for k in array_attrs:
if not hasattr(a, k):
raise TypeError(f"Not an array. Missing attribute '{k}'")
if dtype is not None:
if isinstance(dtype, set):
dtype = {np.dtype(t) for t in dtype}
if a.dtype not in dtype:
raise TypeError(
f"Array dtype ({a.dtype}) does not match one of {dtype}"
)
elif a.dtype != np.dtype(dtype):
raise TypeError(f"Array dtype ({a.dtype}) does not match {np.dtype(dtype)}")
if kind is not None:
if isinstance(kind, set):
if a.dtype.kind not in kind:
raise TypeError(
f"Array dtype kind ({a.dtype.kind}) does not match one of {kind}"
)
elif a.dtype.kind != kind:
raise TypeError(f"Array dtype kind ({a.dtype.kind}) does not match {kind}")
if ndim is not None:
if isinstance(ndim, set):
if a.ndim not in ndim:
raise ValueError(
f"Number of dimensions ({a.ndim}) does not match one of {ndim}"
)
elif ndim != a.ndim:
raise ValueError(f"Number of dimensions ({a.ndim}) does not match {ndim}")
def encode_array(x: ArrayLike) -> Tuple[ArrayLike, List[Any]]:
"""Encode array values as integers indexing unique values.
The codes created for each unique element in the array correspond
to order of appearance, not the natural sort order for the array
dtype.
Examples
--------
>>> encode_array(['c', 'a', 'a', 'b']) # doctest: +SKIP
(array([0, 1, 1, 2], dtype=int64), array(['c', 'a', 'b'], dtype='<U1'))
Parameters
----------
x
[array-like, shape: (M,)]
Array of elements to encode of any type.
Returns
-------
indexes : (M,) ndarray
Encoded values as integer indices.
values : ndarray
Unique values in original array in order of appearance.
"""
# argsort not implemented in dask: https://github.com/dask/dask/issues/4368
names, index, inverse = np.unique(x, return_index=True, return_inverse=True) # type: ignore[no-untyped-call]
index = np.argsort(index)
rank = np.empty_like(index)
rank[index] = np.arange(len(index))
return rank[inverse], names[index]
class MergeWarning(UserWarning):
"""Warnings about merging datasets."""
pass
def merge_datasets(input: Dataset, output: Dataset) -> Dataset:
"""Merge the input and output datasets into a new dataset, giving precedence to variables
and attributes in the output.
Parameters
----------
input
The input dataset.
output
Dataset
The output dataset.
Returns
-------
Dataset
The merged dataset. If `input` and `output` have variables (or attributes) with the same name,
a `MergeWarning` is issued, and the corresponding variables (or attributes) from the `output`
dataset are used.
"""
input_vars = {str(v) for v in input.data_vars.keys()}
output_vars = {str(v) for v in output.data_vars.keys()}
clobber_vars = sorted(list(input_vars & output_vars))
if len(clobber_vars) > 0:
warnings.warn(
f"The following variables in the input dataset will be replaced in the output: {', '.join(clobber_vars)}",
MergeWarning,
)
ds = output.merge(input, compat="override")
# input attrs are ignored during merge, so combine them with output, and assign to the new dataset
input_attr_keys = {str(v) for v in input.attrs.keys()}
output_attr_keys = {str(v) for v in output.attrs.keys()}
clobber_attr_keys = sorted(list(input_attr_keys & output_attr_keys))
if len(clobber_attr_keys) > 0:
warnings.warn(
f"The following global attributes in the input dataset will be replaced in the output: {', '.join(clobber_attr_keys)}",
MergeWarning,
)
combined_attrs = {**input.attrs, **output.attrs}
return ds.assign_attrs(combined_attrs) # type: ignore[no-any-return, no-untyped-call]
def conditional_merge_datasets(input: Dataset, output: Dataset, merge: bool) -> Dataset:
"""Merge the input and output datasets only if `merge` is true, otherwise just return the output."""
return merge_datasets(input, output) if merge else output
def define_variable_if_absent(
ds: Dataset,
default_variable_name: Hashable,
variable_name: Optional[Hashable],
func: Callable[[Dataset], Dataset],
) -> Dataset:
"""Define a variable in a dataset using the given function if it's missing.
Parameters
----------
ds : Dataset
The dataset to look for the variable, and used by the function to calculate the variable.
default_variable_name
The default name of the variable.
variable_name
The actual name of the variable, or None to use the default.
func
The function to calculate the variable.
Returns
-------
A new dataset containing the variable.
Raises
------
ValueError
If a variable with a non-default name is missing from the dataset.
"""
variable_name = variable_name or default_variable_name
if variable_name in ds:
return ds
if variable_name != default_variable_name:
raise ValueError(
f"Variable '{variable_name}' with non-default name is missing and will not be automatically defined."
)
return func(ds)
def create_dataset(
data_vars: Mapping[Hashable, Any] = None, # type: ignore[assignment]
coords: Mapping[Hashable, Any] = None, # type: ignore[assignment]
attrs: Mapping[Hashable, Any] = None, # type: ignore[assignment]
) -> Dataset:
"""Create an Xarray dataset and validate its variables.
This is a wrapper around `xarray.Dataset`, with the additional
convenience of validating variables against the ones defined by sgkit,
and annotating these variables with a `comment` attribute containing
their doc comments.
Parameters
----------
data_vars
A mapping defining data variables.
coords
A mapping defining coordinates.
attrs
Global attributes.
Returns
-------
A new dataset.
"""
ds = Dataset(data_vars, coords, attrs)
ds = variables.annotate(ds)
return ds
def split_array_chunks(n: int, blocks: int) -> Tuple[int, ...]:
"""Compute chunk sizes for an array split into blocks.
This is similar to `numpy.split_array` except that it
will compute the sizes of the resulting splits rather
than explicitly partitioning an array.
Parameters
----------
n
Number of array elements.
blocks
Number of partitions to generate chunk sizes for.
Examples
--------
>>> split_array_chunks(7, 2)
(4, 3)
>>> split_array_chunks(7, 3)
(3, 2, 2)
>>> split_array_chunks(7, 1)
(7,)
>>> split_array_chunks(7, 7)
(1, 1, 1, 1, 1, 1, 1)
Raises
------
ValueError
* If `blocks` > `n`.
* If `n` <= 0.
* If `blocks` <= 0.
Returns
-------
chunks : Tuple[int, ...]
Number of elements associated with each block.
This will equal `n//blocks` or `n//blocks + 1` for
each block, depending on how many of the latter
are necessary to make the partitioning complete.
"""
if blocks > n:
raise ValueError(
f"Number of blocks ({blocks}) cannot be greater "
f"than number of elements ({n})"
)
if n <= 0:
raise ValueError(f"Number of elements ({n}) must be >= 0")
if blocks <= 0:
raise ValueError(f"Number of blocks ({blocks}) must be >= 0")
n_div, n_mod = np.divmod(n, blocks)
chunks = n_mod * (n_div + 1,) + (blocks - n_mod) * (n_div,)
return chunks # type: ignore[no-any-return]
def max_str_len(a: ArrayLike) -> ArrayLike:
"""Compute maximum string length for elements of an array
Parameters
----------
a
Array of any shape, must have string or object dtype
Returns
-------
max_length
Scalar array with same type as provided array
"""
if a.size == 0:
raise ValueError("Max string length cannot be calculated for empty array")
if a.dtype.kind == "O":
a = a.astype(str)
if a.dtype.kind not in {"U", "S"}:
raise ValueError(f"Array must have string dtype (got dtype {a.dtype})")
lens = np.frompyfunc(len, 1, 1)(a) # type: ignore[no-untyped-call]
if isinstance(a, np.ndarray):
lens = np.asarray(lens)
return lens.max()
@guvectorize( # type: ignore
[
"void(int8[:], int64[:])",
"void(int16[:], int64[:])",
"void(int32[:], int64[:])",
"void(int64[:], int64[:])",
],
"(n)->()",
nopython=True,
cache=True,
)
def hash_array(x: ArrayLike, out: ArrayLike) -> None: # pragma: no cover
"""Hash entries of ``x`` using the DJBX33A hash function.
This is ~5 times faster than calling ``tobytes()`` followed
by ``hash()`` on array columns. This function also does not
hold the GIL, making it suitable for use with the Dask
threaded scheduler.
Parameters
----------
x
1D array of type integer.
Returns
-------
Array containing a single hash value of type int64.
"""
out[0] = 5381
for i in range(x.shape[0]):
out[0] = out[0] * 33 + x[i]
|
the-stack_0_6723 | import json, urllib
import xmltodict
import logging
import concurrent.futures
from urllib import request, parse
from .parser import search_result
from .proj_convertor import ProjConvertor
from .address_factory import AddressFactory
logger = logging.getLogger(__name__)
OGCIO_RECORD_COUNT = 200
NEAR_THRESHOLD = 0.05 # 50 metres
def search_address_with_ogcio(address):
ogcio_url = "https://www.als.ogcio.gov.hk/lookup?q={}&n={}".format(
parse.quote(address), OGCIO_RECORD_COUNT
)
post_response = urllib.request.urlopen(url=ogcio_url)
res = post_response.read()
ogcio_data = json.dumps(xmltodict.parse(res), ensure_ascii=False)
ogcio_data = json.loads(ogcio_data)
searched_result = search_result(address, ogcio_data)
ocgio_records = []
for data in searched_result:
address_factory = AddressFactory("ogcio", data)
ocgio_records.append(address_factory.create_address())
return ocgio_records
def search_address_from_land(address):
land_url = "https://geodata.gov.hk/gs/api/v1.0.0/locationSearch?q={}".format(
parse.quote(address)
)
post_response = urllib.request.urlopen(url=land_url)
res = post_response.read()
land_data = json.loads(res)
land_records = []
for data in land_data:
# TODO: check if def is needed
proj = ProjConvertor("EPSG:2326", "EPSG:4326", data["x"], data["y"])
lat, lng = proj.transform_projection()
data["lat"] = float("{0:.4f}".format(lat))
data["lng"] = float("{0:.4f}".format(lng))
address_factory = AddressFactory("land", data)
land_records.append(address_factory.create_address())
return land_records
def query_address(address):
# Fetch records from OGCIO & Land Department
ogcio_records = search_address_with_ogcio(address)
land_records = search_address_from_land(address)
sorted_results = []
# if records from Land Department have any exception
if len(land_records) == 0:
return ogcio_records
# 1. Best Case: Top OGCIO result appears in land result(s)
# We compared with the first in land result but some cases that sometime the most accurate result does not appear at top
# so we should search among the whole list
for land_record in land_records:
if ogcio_records[0].distance_to(land_record) < NEAR_THRESHOLD:
# Best Case: Land result and ogcio return the same address
return ogcio_records
# 2. best result from OGCIO does not appears in the land results
# so we pick the first land result as our destination and search all the OGCIO results and see if some result is within the NEAR_DISTANCE
# and sort them with distance to the first land result
for ogcio_record in ogcio_records:
distance = ogcio_record.distance_to(land_records[0])
if distance < NEAR_THRESHOLD:
ogcio_record["distance"] = distance
sorted_results.append(ogcio_record)
if len(sorted_results) > 0:
return sorted_results.sort(key=lambda record: record.distance)
# 3. Not found in OGCIO but in land result.
# We try to search again from ogcio using the land result
assumed_land_result = land_records[0]
full_address_to_search = land_records[0].full_address("chi")
if full_address_to_search != "":
ogcio_records = search_address_with_ogcio(full_address_to_search)
if ogcio_records[0].distance_to(assumed_land_result) < NEAR_THRESHOLD:
# second round result is the nearest result
return ogcio_records
return land_records
def batch_query_addresses(addresses):
records = []
with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(query_address, address) for address in addresses]
for future in concurrent.futures.as_completed(futures):
records.append(future.result())
return records
|
the-stack_0_6724 | from aiogram import Bot
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.dispatcher import Dispatcher
from aiogram.utils.executor import start_webhook, start_polling
from loguru import logger as log
from abc import ABC, abstractmethod
from utils.singletone import SingletonABC
from utils.json_config_reader import parse_config
from aiogram.contrib.fsm_storage.memory import MemoryStorage
class AbstractModel(SingletonABC):
def __init__(self, config_file_name='project.json'):
self.config = parse_config(config_file_name)
self._bot = Bot(token=self.config.api.token)
self._memory_storage = MemoryStorage()
self._dispatcher = Dispatcher(self._bot, storage = self._memory_storage)
self._dispatcher.middleware.setup(LoggingMiddleware())
def get_dispatcher(self):
return self._dispatcher
def get_bot(self):
return self._bot
def get_storage(self):
return self._memory_storage
@abstractmethod
async def on_startup(self, _dispatcher):
pass
@abstractmethod
async def on_shutdown(self, _dispatcher):
log.info("Closing storage...")
await _dispatcher.storage.close()
await _dispatcher.storage.wait_closed()
log.info("Bot shutdown...")
@abstractmethod
def start(self):
pass
class WebhookModel(AbstractModel):
async def on_startup(self, _dispatcher):
await super().on_startup(_dispatcher)
await self._bot.set_webhook(self.config.webhook.host + self.config.webhook.path)
async def on_shutdown(self, _dispatcher):
await super().on_shutdown(_dispatcher)
await self._bot.delete_webhook()
def start(self):
log.warning("The application is running in webhook mode.")
start_webhook(
dispatcher=self._dispatcher,
webhook_path=self.config.webhook.path,
on_startup=self.on_startup,
on_shutdown=self.on_shutdown,
skip_updates=True,
host=self.config.webapp.host,
port=self.config.webapp.port,
)
class PollingModel(AbstractModel):
async def on_startup(self, _dispatcher):
await super().on_startup(_dispatcher)
async def on_shutdown(self, _dispatcher):
await super().on_shutdown(_dispatcher)
def start(self):
log.warning("The application is running in polling mode.")
start_polling(
dispatcher=self._dispatcher,
skip_updates=True,
on_shutdown=self.on_shutdown,
on_startup=self.on_startup
)
|
the-stack_0_6725 | import mock
import unittest
import manager
def create_mock_load_builder_fn(mock_rings):
"""To avoid the need for swift.common.ring library, mock a basic rings
dictionary, keyed by path. Each ring has enough logic to hold a dictionary
with a single 'devs' key, which stores the list of passed dev(s) by
add_dev().
If swift (actual) ring representation diverges (see _load_builder),
this mock will need to be adapted.
:param mock_rings: a dict containing the dict form of the rings
"""
def mock_load_builder_fn(path):
class mock_ring(object):
def __init__(self, path):
self.path = path
def to_dict(self):
return mock_rings[self.path]
def add_dev(self, dev):
mock_rings[self.path]['devs'].append(dev)
return mock_ring(path)
return mock_load_builder_fn
MOCK_SWIFT_RINGS = {
'account': 'account.builder',
'container': 'container.builder',
'object': 'object.builder'
}
class TestSwiftManager(unittest.TestCase):
@mock.patch('os.path.isfile')
@mock.patch.object(manager, '_load_builder')
def test_has_minimum_zones(self, mock_load_builder, mock_is_file):
mock_rings = {}
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
for ring in MOCK_SWIFT_RINGS:
mock_rings[ring] = {
'replicas': 3,
'devs': [{'zone': 1}, {'zone': 2}, None, {'zone': 3}],
}
ret = manager.has_minimum_zones(MOCK_SWIFT_RINGS)
self.assertTrue(ret['result'])
# Increase the replicas to make sure that it returns false
for ring in MOCK_SWIFT_RINGS:
mock_rings[ring]['replicas'] = 4
ret = manager.has_minimum_zones(MOCK_SWIFT_RINGS)
self.assertFalse(ret['result'])
@mock.patch.object(manager, '_load_builder')
def test_exists_in_ring(self, mock_load_builder):
mock_rings = {}
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
ring = 'account'
mock_rings[ring] = {
'devs': [
{'replication_port': 6000, 'zone': 1, 'weight': 100.0,
'ip': '172.16.0.2', 'region': 1, 'port': 6000,
'replication_ip': '172.16.0.2', 'parts': 2, 'meta': '',
'device': u'bcache10', 'parts_wanted': 0, 'id': 199},
None, # Ring can have holes, so add None to simulate
{'replication_port': 6000, 'zone': 1, 'weight': 100.0,
'ip': '172.16.0.2', 'region': 1, 'id': 198,
'replication_ip': '172.16.0.2', 'parts': 2, 'meta': '',
'device': u'bcache13', 'parts_wanted': 0, 'port': 6000},
]
}
node = {
'ip': '172.16.0.2',
'region': 1,
'account_port': 6000,
'zone': 1,
'replication_port': 6000,
'weight': 100.0,
'device': u'bcache10',
}
ret = manager.exists_in_ring(ring, node)
self.assertTrue(ret)
node['region'] = 2
ret = manager.exists_in_ring(ring, node)
self.assertFalse(ret)
@mock.patch.object(manager, '_write_ring')
@mock.patch.object(manager, '_load_builder')
def test_add_dev(self, mock_load_builder, mock_write_ring):
mock_rings = {}
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
ring = 'account'
mock_rings[ring] = {
'devs': []
}
new_dev = {
'meta': '',
'zone': 1,
'ip': '172.16.0.2',
'device': '/dev/sdb',
'port': 6000,
'weight': 100
}
manager.add_dev(ring, new_dev)
mock_write_ring.assert_called_once()
self.assertTrue('id' not in mock_rings[ring]['devs'][0])
|
the-stack_0_6728 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.operators.check_operator import \
CheckOperator, ValueCheckOperator, IntervalCheckOperator
from airflow.utils.decorators import apply_defaults
class BigQueryCheckOperator(CheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param bigquery_conn_id: reference to the BigQuery database
:type bigquery_conn_id: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('sql',)
template_ext = ('.sql', )
@apply_defaults
def __init__(self,
sql,
bigquery_conn_id='bigquery_default',
use_legacy_sql=True,
*args, **kwargs):
super(BigQueryCheckOperator, self).__init__(sql=sql, *args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('sql',)
template_ext = ('.sql', )
@apply_defaults
def __init__(self, sql,
pass_value,
tolerance=None,
bigquery_conn_id='bigquery_default',
use_legacy_sql=True,
*args, **kwargs):
super(BigQueryValueCheckOperator, self).__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_threshold: dict
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('table',)
@apply_defaults
def __init__(self, table, metrics_thresholds, date_filter_column='ds',
days_back=-7, bigquery_conn_id='bigquery_default',
use_legacy_sql=True, *args, **kwargs):
super(BigQueryIntervalCheckOperator, self).__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
|
the-stack_0_6729 | from .calling_conventions import DEFAULT_CC
class Callable(object):
"""
Callable is a representation of a function in the binary that can be
interacted with like a native python function.
If you set perform_merge=True (the default), the result will be returned to you, and
you can get the result state with callable.result_state.
Otherwise, you can get the resulting simulation manager at callable.result_path_group.
"""
def __init__(self, project, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
:param project: The project to operate on
:param addr: The address of the function to use
The following parameters are optional:
:param concrete_only: Throw an exception if the execution splits into multiple paths
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
"""
self._project = project
self._addr = addr
self._concrete_only = concrete_only
self._perform_merge = perform_merge
self._base_state = base_state
self._toc = toc
self._cc = cc if cc is not None else DEFAULT_CC[project.arch.name](project.arch)
self._deadend_addr = project.simos.return_deadend
self.result_path_group = None
self.result_state = None
def set_base_state(self, state):
"""
Swap out the state you'd like to use to perform the call
:param state: The state to use to perform the call
"""
self._base_state = state
def __call__(self, *args):
self.perform_call(*args)
if self.result_state is not None:
return self.result_state.solver.simplify(self._cc.get_return_val(self.result_state, stack_base=self.result_state.regs.sp - self._cc.STACKARG_SP_DIFF))
else:
return None
def perform_call(self, *args):
state = self._project.factory.call_state(self._addr, *args,
cc=self._cc,
base_state=self._base_state,
ret_addr=self._deadend_addr,
toc=self._toc)
def step_func(pg):
pg2 = pg.prune()
if len(pg2.active) > 1:
raise AngrCallableMultistateError("Execution split on symbolic condition!")
return pg2
caller = self._project.factory.simulation_manager(state)
caller.run(step_func=step_func if self._concrete_only else None).unstash(from_stash='deadended')
caller.prune(filter_func=lambda pt: pt.addr == self._deadend_addr)
if len(caller.active) == 0:
raise AngrCallableError("No paths returned from function")
self.result_path_group = caller.copy()
if self._perform_merge:
caller.merge()
self.result_state = caller.active[0]
from .errors import AngrCallableError, AngrCallableMultistateError
|
the-stack_0_6730 | """Implementation of the int type based on r_longlong.
Useful for 32-bit applications manipulating values a bit larger than
fits in an 'int'.
"""
import operator
from rpython.rlib.rarithmetic import LONGLONG_BIT, intmask, r_longlong, r_uint
from rpython.rlib.rbigint import rbigint
from rpython.tool.sourcetools import func_renamer, func_with_new_name
from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import WrappedDefault, unwrap_spec
from pypy.objspace.std.intobject import W_IntObject
from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject
from pypy.objspace.std.util import COMMUTATIVE_OPS
# XXX: breaks translation
#LONGLONG_MIN = r_longlong(-1 << (LONGLONG_BIT - 1))
class W_SmallLongObject(W_AbstractLongObject):
_immutable_fields_ = ['longlong']
def __init__(self, value):
assert isinstance(value, r_longlong)
self.longlong = value
@staticmethod
def fromint(value):
return W_SmallLongObject(r_longlong(value))
@staticmethod
def frombigint(bigint):
return W_SmallLongObject(bigint.tolonglong())
def asbigint(self):
return rbigint.fromrarith_int(self.longlong)
def longval(self):
return self.longlong
def __repr__(self):
return '<W_SmallLongObject(%d)>' % self.longlong
def _int_w(self, space):
a = self.longlong
b = intmask(a)
if b == a:
return b
raise oefmt(space.w_OverflowError,
"long int too large to convert to int")
def uint_w(self, space):
a = self.longlong
if a < 0:
raise oefmt(space.w_ValueError,
"cannot convert negative integer to unsigned int")
b = r_uint(a)
if r_longlong(b) == a:
return b
raise oefmt(space.w_OverflowError,
"long int too large to convert to unsigned int")
def bigint_w(self, space, allow_conversion=True):
return self.asbigint()
def _bigint_w(self, space):
return self.asbigint()
def _float_w(self, space):
return float(self.longlong)
def int(self, space):
if type(self) is W_SmallLongObject:
return self
if not space.is_overloaded(self, space.w_int, '__int__'):
return W_LongObject(self.num)
return W_Root.int(self, space)
def descr_float(self, space):
return space.newfloat(float(self.longlong))
def descr_neg(self, space):
a = self.longlong
try:
if a == r_longlong(-1 << (LONGLONG_BIT-1)):
raise OverflowError
x = -a
except OverflowError:
self = _small2long(space, self)
return self.descr_neg(space)
return W_SmallLongObject(x)
def descr_abs(self, space):
return self if self.longlong >= 0 else self.descr_neg(space)
def descr_bool(self, space):
return space.newbool(bool(self.longlong))
def descr_invert(self, space):
x = ~self.longlong
return W_SmallLongObject(x)
@unwrap_spec(w_modulus=WrappedDefault(None))
def descr_pow(self, space, w_exponent, w_modulus=None):
if isinstance(w_exponent, W_AbstractLongObject):
self = _small2long(space, self)
return self.descr_pow(space, w_exponent, w_modulus)
elif not isinstance(w_exponent, W_IntObject):
return space.w_NotImplemented
x = self.longlong
y = space.int_w(w_exponent)
if space.is_none(w_modulus):
try:
return _pow(space, x, y, r_longlong(0))
except ValueError:
self = self.descr_float(space)
return space.pow(self, w_exponent, space.w_None)
except OverflowError:
self = _small2long(space, self)
return self.descr_pow(space, w_exponent, w_modulus)
elif isinstance(w_modulus, W_IntObject):
w_modulus = w_modulus.as_w_long(space)
elif not isinstance(w_modulus, W_AbstractLongObject):
return space.w_NotImplemented
elif not isinstance(w_modulus, W_SmallLongObject):
self = _small2long(space, self)
return self.descr_pow(space, w_exponent, w_modulus)
z = w_modulus.longlong
if z == 0:
raise oefmt(space.w_ValueError, "pow() 3rd argument cannot be 0")
if y < 0:
# don't implement with smalllong
self = _small2long(space, self)
return self.descr_pow(space, w_exponent, w_modulus)
try:
return _pow(space, x, y, z)
except ValueError:
self = self.descr_float(space)
return space.pow(self, w_exponent, w_modulus)
except OverflowError:
self = _small2long(space, self)
return self.descr_pow(space, w_exponent, w_modulus)
@unwrap_spec(w_modulus=WrappedDefault(None))
def descr_rpow(self, space, w_base, w_modulus=None):
if isinstance(w_base, W_IntObject):
# Defer to w_base<W_SmallLongObject>.descr_pow
w_base = w_base.descr_long(space)
elif not isinstance(w_base, W_AbstractLongObject):
return space.w_NotImplemented
return w_base.descr_pow(space, self, w_modulus)
def _make_descr_cmp(opname):
op = getattr(operator, opname)
bigint_op = getattr(rbigint, opname)
@func_renamer('descr_' + opname)
def descr_cmp(self, space, w_other):
if isinstance(w_other, W_IntObject):
result = op(self.longlong, w_other.int_w(space))
elif not isinstance(w_other, W_AbstractLongObject):
return space.w_NotImplemented
elif isinstance(w_other, W_SmallLongObject):
result = op(self.longlong, w_other.longlong)
else:
result = bigint_op(self.asbigint(), w_other.asbigint())
return space.newbool(result)
return descr_cmp
descr_lt = _make_descr_cmp('lt')
descr_le = _make_descr_cmp('le')
descr_eq = _make_descr_cmp('eq')
descr_ne = _make_descr_cmp('ne')
descr_gt = _make_descr_cmp('gt')
descr_ge = _make_descr_cmp('ge')
def _make_descr_binop(func, ovf=True):
opname = func.__name__[1:]
descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname
long_op = getattr(W_LongObject, descr_name)
@func_renamer(descr_name)
def descr_binop(self, space, w_other):
if isinstance(w_other, W_IntObject):
w_other = w_other.as_w_long(space)
elif not isinstance(w_other, W_AbstractLongObject):
return space.w_NotImplemented
elif not isinstance(w_other, W_SmallLongObject):
self = _small2long(space, self)
return long_op(self, space, w_other)
if ovf:
try:
return func(self, space, w_other)
except OverflowError:
self = _small2long(space, self)
w_other = _small2long(space, w_other)
return long_op(self, space, w_other)
else:
return func(self, space, w_other)
if opname in COMMUTATIVE_OPS:
@func_renamer(descr_rname)
def descr_rbinop(self, space, w_other):
return descr_binop(self, space, w_other)
return descr_binop, descr_rbinop
long_rop = getattr(W_LongObject, descr_rname)
@func_renamer(descr_rname)
def descr_rbinop(self, space, w_other):
if isinstance(w_other, W_IntObject):
w_other = w_other.as_w_long(space)
elif not isinstance(w_other, W_AbstractLongObject):
return space.w_NotImplemented
elif not isinstance(w_other, W_SmallLongObject):
self = _small2long(space, self)
return long_rop(self, space, w_other)
if ovf:
try:
return func(w_other, space, self)
except OverflowError:
self = _small2long(space, self)
w_other = _small2long(space, w_other)
return long_rop(self, space, w_other)
else:
return func(w_other, space, self)
return descr_binop, descr_rbinop
def _add(self, space, w_other):
x = self.longlong
y = w_other.longlong
z = x + y
if ((z ^ x) & (z ^ y)) < 0:
raise OverflowError
return W_SmallLongObject(z)
descr_add, descr_radd = _make_descr_binop(_add)
def _sub(self, space, w_other):
x = self.longlong
y = w_other.longlong
z = x - y
if ((z ^ x) & (z ^ ~y)) < 0:
raise OverflowError
return W_SmallLongObject(z)
descr_sub, descr_rsub = _make_descr_binop(_sub)
def _mul(self, space, w_other):
x = self.longlong
y = w_other.longlong
z = _llong_mul_ovf(x, y)
return W_SmallLongObject(z)
descr_mul, descr_rmul = _make_descr_binop(_mul)
def _floordiv(self, space, w_other):
x = self.longlong
y = w_other.longlong
try:
if y == -1 and x == r_longlong(-1 << (LONGLONG_BIT-1)):
raise OverflowError
z = x // y
except ZeroDivisionError:
raise oefmt(space.w_ZeroDivisionError, "integer division by zero")
return W_SmallLongObject(z)
descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv)
def _mod(self, space, w_other):
x = self.longlong
y = w_other.longlong
try:
if y == -1 and x == r_longlong(-1 << (LONGLONG_BIT-1)):
raise OverflowError
z = x % y
except ZeroDivisionError:
raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero")
return W_SmallLongObject(z)
descr_mod, descr_rmod = _make_descr_binop(_mod)
def _divmod(self, space, w_other):
x = self.longlong
y = w_other.longlong
try:
if y == -1 and x == r_longlong(-1 << (LONGLONG_BIT-1)):
raise OverflowError
z = x // y
except ZeroDivisionError:
raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero")
# no overflow possible
m = x % y
return space.newtuple([W_SmallLongObject(z), W_SmallLongObject(m)])
descr_divmod, descr_rdivmod = _make_descr_binop(_divmod)
def _lshift(self, space, w_other):
a = self.longlong
# May overflow
b = space.int_w(w_other)
if r_uint(b) < LONGLONG_BIT: # 0 <= b < LONGLONG_BIT
c = a << b
if a != (c >> b):
raise OverflowError
return W_SmallLongObject(c)
if b < 0:
raise oefmt(space.w_ValueError, "negative shift count")
# b >= LONGLONG_BIT
if a == 0:
return self
raise OverflowError
descr_lshift, descr_rlshift = _make_descr_binop(_lshift)
def _rshift(self, space, w_other):
a = self.longlong
# May overflow
b = space.int_w(w_other)
if r_uint(b) >= LONGLONG_BIT: # not (0 <= b < LONGLONG_BIT)
if b < 0:
raise oefmt(space.w_ValueError, "negative shift count")
# b >= LONGLONG_BIT
if a == 0:
return self
a = -1 if a < 0 else 0
else:
a = a >> b
return W_SmallLongObject(a)
descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False)
def _and(self, space, w_other):
a = self.longlong
b = w_other.longlong
res = a & b
return W_SmallLongObject(res)
descr_and, descr_rand = _make_descr_binop(_and, ovf=False)
def _or(self, space, w_other):
a = self.longlong
b = w_other.longlong
res = a | b
return W_SmallLongObject(res)
descr_or, descr_ror = _make_descr_binop(_or, ovf=False)
def _xor(self, space, w_other):
a = self.longlong
b = w_other.longlong
res = a ^ b
return W_SmallLongObject(res)
descr_xor, descr_rxor = _make_descr_binop(_xor, ovf=False)
def _llong_mul_ovf(a, b):
# xxx duplication of the logic from translator/c/src/int.h
longprod = a * b
doubleprod = float(a) * float(b)
doubled_longprod = float(longprod)
# Fast path for normal case: small multiplicands, and no info
# is lost in either method.
if doubled_longprod == doubleprod:
return longprod
# Somebody somewhere lost info. Close enough, or way off? Note
# that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0).
# The difference either is or isn't significant compared to the
# true value (of which doubleprod is a good approximation).
diff = doubled_longprod - doubleprod
absdiff = abs(diff)
absprod = abs(doubleprod)
# absdiff/absprod <= 1/32 iff
# 32 * absdiff <= absprod -- 5 good bits is "close enough"
if 32.0 * absdiff <= absprod:
return longprod
raise OverflowError("integer multiplication")
def _small2long(space, w_small):
return W_LongObject(w_small.asbigint())
def _pow(space, iv, iw, iz):
if iw < 0:
if iz != 0:
raise oefmt(space.w_ValueError,
"pow() 2nd argument cannot be negative when 3rd "
"argument specified")
raise ValueError
temp = iv
ix = r_longlong(1)
while iw > 0:
if iw & 1:
ix = _llong_mul_ovf(ix, temp)
iw >>= 1 # Shift exponent down by 1 bit
if iw == 0:
break
temp = _llong_mul_ovf(temp, temp) # Square the value of temp
if iz:
# If we did a multiplication, perform a modulo
ix %= iz
temp %= iz
if iz:
ix %= iz
return W_SmallLongObject(ix)
|
the-stack_0_6731 | import pickle
import scipy.stats as st
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import auc
from sklearn.model_selection import StratifiedKFold
import xgboost as xgb
from sklearn.model_selection import KFold
from sklearn.metrics import matthews_corrcoef,make_scorer
def train_xgb(X,
y,
mod_number=1,
cv=None,
outfile="model.pickle",
n_iter_search=100,
nfolds=20,
random_state=42):
"""
Train an XGBoost model with hyper parameter optimization.
Parameters
----------
X : matrix
Matrix with all the features, every instance should be coupled to the y-value
y : vector
Vector with the class, every value should be coupled to an x-vector with features
Returns
-------
object
Trained XGBoost model
object
Cross-validation results
"""
xgb_handle = xgb.XGBClassifier()
one_to_left = st.beta(10, 1)
from_zero_positive = st.expon(0, 50)
#Define distributions to sample from for hyper parameter optimization
param_dist = {
"n_estimators": st.randint(25, 150),
"max_depth": st.randint(5, 10),
"learning_rate": st.uniform(0.05, 0.4),
#"colsample_bytree": one_to_left,
"subsample": one_to_left,
"gamma": st.uniform(0, 10),
"reg_alpha": from_zero_positive,
"min_child_weight": from_zero_positive,
}
if not cv: cv = KFold(n_splits=nfolds, shuffle=True,random_state=random_state)
mcc = make_scorer(matthews_corrcoef)
random_search = RandomizedSearchCV(xgb_handle, param_distributions=param_dist,
n_iter=n_iter_search,verbose=10,scoring="roc_auc",
n_jobs=1,refit=True,cv=cv)
random_search.fit(X, y)
random_search.feats = X.columns
pickle.dump(random_search,open(outfile,"wb"))
return(random_search.best_score_) |
the-stack_0_6734 | # Problem statement: Consider an array of integers. We perform queries of the following type on :
# Sort all the elements in the subsegment .
# Given , can you find and print the value at index (where ) after performing queries?
import sys
##### Read Data
dat = [x.split() for x in sys.stdin.readlines()]
N = int(dat[0][0])
Q = int(dat[0][1])
k = int(dat[0][2])
a = list(map(int, dat[1]))
q = [list(map(int, x)) for x in dat[2:len(dat)]]
##### Process Queries
b = sorted(a)
lmin, rmax, pmax, qmin = (N-1), 0, 0, (N-1)
pmin, qmax, flag = (N-1), 0, 1
count, span_q, ladder, revlad = [], 0, 0, 0
if Q >= 2:
ladder = all(q[i+1][0] > q[i][0] for i in range(Q-1))
revlad = all(q[i+1][1] < q[i][1] for i in range(Q-1))
if a != b and ladder < 1 and revlad < 1:
for i in range(Q):
l, r = q[i][0], q[i][1]
if (r-l) > (rmax-lmin):
lmin, rmax = l, r
if l < pmin:
pmin, pmax = l, r
elif l == pmin and pmax < r:
pmax = r
if r > qmax:
qmin, qmax = l, r
elif r == qmax and qmin > l:
qmin = l
for i in range(Q):
l, r = q[i][0], q[i][1]
if l > lmin and r < rmax: continue
if l > pmin and r < pmax: continue
if l > qmin and r < qmax: continue
if i < (Q-1):
if l >= q[i+1][0] and r <= q[i+1][1]:
continue
if i > 0:
if l >= q[i-flag][0] and r <= q[i-flag][1]:
flag += 1
continue
else:
flag = 1
count += [i]
span_q += r-l+1
# Perform Queries
if ladder > 0:
l, r, Qu = q[0][0], q[0][1], int((k+5)/5)
a[l:r+1] = sorted(a[l:r+1])
for i in range(1, Q):
l, r, r0, m, sig = q[i][0], q[i][1], q[i-1][1], 0, 0
if l > r0 or (r-r0) > 0.1*(r0-l):
a[l:r+1] = sorted(a[l:r+1])
continue
if k < l: break
count = list(range(r0+1, r+1))
for j in range(len(count)):
p, new_A = count[j], a[count[j]]
l, r0 = q[i][0], q[i-1][1]
if a[l] >= new_A:
del(a[p]); a[l:l] = [new_A]; continue
elif a[r0+j-1] <= new_A:
del(a[p]); a[r0+j:r0+j] = [new_A]; continue
while sig < 1:
m = int((l+r0)/2)
if a[m] > new_A:
r0 = m
elif a[m+1] < new_A:
l = m+1
else:
del(a[p]); a[m+1:m+1] = [new_A]
sig = 1
elif revlad > 0:
l, r, Qu = q[0][0], q[0][1], int((k+5)/5)
a[l:r+1] = sorted(a[l:r+1])
for i in range(1, Q):
l, r, l0, m, sig = q[i][0], q[i][1], q[i-1][0], 0, 0
if k > r: break
if r < l0:
a[l:r+1] = sorted(a[l:r+1]); continue
count = list(range(l, l0))
for j in range(len(count)):
p, new_A = count[j], a[count[j]]
if a[l0] >= new_A:
del(a[p]); a[l0:l0] = [new_A]; continue
elif a[r] <= new_A:
del(a[p]); a[r:r] = [new_A]; continue
while sig < 1:
m = int((l0+r)/2)
if a[m] > new_A:
r = m
elif a[m+1] < new_A:
l0 = m+1
else:
del(a[p]); a[m+1:m+1] = [new_A]
sig = 1
elif span_q < 1e9 and a != b:
for i in count:
l, r = q[i][0], q[i][1]
a[l:(r+1)] = sorted(a[l:(r+1)])
else:
a[pmin:qmax+1] = sorted(a[pmin:qmax+1])
print(a[k])
|
the-stack_0_6735 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_addition
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
add_operators = linear_operator_addition.add_operators
# pylint: disable=unused-argument
class _BadAdder(linear_operator_addition._Adder):
"""Adder that will fail if used."""
def can_add(self, op1, op2):
raise AssertionError("BadAdder.can_add called!")
def _add(self, op1, op2, operator_name, hints):
raise AssertionError("This line should not be reached")
# pylint: enable=unused-argument
class LinearOperatorAdditionCorrectnessTest(test.TestCase):
"""Tests correctness of addition with combinations of a few Adders.
Tests here are done with the _DEFAULT_ADDITION_TIERS, which means
add_operators should reduce all operators resulting in one single operator.
This shows that we are able to correctly combine adders using the tiered
system. All Adders should be tested separately, and there is no need to test
every Adder within this class.
"""
def test_one_operator_is_returned_unchanged(self):
op_a = linalg.LinearOperatorDiag([1., 1.])
op_sum = add_operators([op_a])
self.assertEqual(1, len(op_sum))
self.assertIs(op_sum[0], op_a)
def test_at_least_one_operators_required(self):
with self.assertRaisesRegex(ValueError, "must contain at least one"):
add_operators([])
def test_attempting_to_add_numbers_raises(self):
with self.assertRaisesRegex(TypeError, "contain only LinearOperator"):
add_operators([1, 2])
@test_util.run_deprecated_v1
def test_two_diag_operators(self):
op_a = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="A")
op_b = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="B")
with self.cached_session():
op_sum = add_operators([op_a, op_b])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorDiag)
self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
# Enforce particular name for this simple case
self.assertEqual("Add/B__A/", op.name)
@test_util.run_deprecated_v1
def test_three_diag_operators(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="op1")
op2 = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="op2")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_positive_definite=True, name="op3")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag))
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
@test_util.run_deprecated_v1
def test_diag_tril_diag(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_non_singular=True, name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [0., 2.]],
is_self_adjoint=True,
is_non_singular=True,
name="tril")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_non_singular=True, name="diag_b")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorLowerTriangular)
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# The diag operators will be self-adjoint (because real and diagonal).
# The TriL operator has the self-adjoint hint set.
self.assertTrue(op.is_self_adjoint)
# Even though op1/2/3 are non-singular, this does not imply op is.
# Since no custom hint was provided, we default to None (unknown).
self.assertEqual(None, op.is_non_singular)
@test_util.run_deprecated_v1
def test_matrix_diag_tril_diag_uses_custom_name(self):
op0 = linalg.LinearOperatorFullMatrix(
[[-1., -1.], [-1., -1.]], name="matrix")
op1 = linalg.LinearOperatorDiag([1., 1.], name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [1.5, 2.]], name="tril")
op3 = linalg.LinearOperatorDiag([3., 3.], name="diag_b")
with self.cached_session():
op_sum = add_operators([op0, op1, op2, op3], operator_name="my_operator")
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorFullMatrix)
self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense())
self.assertEqual("my_operator", op.name)
def test_incompatible_domain_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(2, 4))
with self.assertRaisesRegex(ValueError, "must.*same `domain_dimension`"):
add_operators([op1, op2])
def test_incompatible_range_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(3, 3))
with self.assertRaisesRegex(ValueError, "must.*same `range_dimension`"):
add_operators([op1, op2])
def test_non_broadcastable_batch_shape_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(4, 3, 3))
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
add_operators([op1, op2])
class LinearOperatorOrderOfAdditionTest(test.TestCase):
"""Test that the order of addition is done as specified by tiers."""
def test_tier_0_additions_done_in_tier_0(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
diag3 = linalg.LinearOperatorDiag([1.])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
]
# Should not raise since all were added in tier 0, and tier 1 (with the
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, diag3], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorDiag)
def test_tier_1_additions_done_by_tier_1(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[linear_operator_addition._AddAndReturnTriL()],
[_BadAdder()],
]
# Should not raise since all were added by tier 1, and the
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular)
def test_tier_1_additions_done_by_tier_1_with_order_flipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnTriL()],
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
]
# Tier 0 could convert to TriL, and this converted everything to TriL,
# including the Diags.
# Tier 1 was never used.
# Tier 2 was never used (therefore, _BadAdder didn't raise).
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular)
@test_util.run_deprecated_v1
def test_cannot_add_everything_so_return_more_than_one_operator(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([2.])
tril5 = linalg.LinearOperatorLowerTriangular([[5.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
]
# Tier 0 (the only tier) can only convert to Diag, so it combines the two
# diags, but the TriL is unchanged.
# Result should contain two operators, one Diag, one TriL.
op_sum = add_operators([diag1, diag2, tril5], addition_tiers=addition_tiers)
self.assertEqual(2, len(op_sum))
found_diag = False
found_tril = False
with self.cached_session():
for op in op_sum:
if isinstance(op, linalg.LinearOperatorDiag):
found_diag = True
self.assertAllClose([[3.]], op.to_dense())
if isinstance(op, linalg.LinearOperatorLowerTriangular):
found_tril = True
self.assertAllClose([[5.]], op.to_dense())
self.assertTrue(found_diag and found_tril)
def test_intermediate_tier_is_not_skipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
[linear_operator_addition._AddAndReturnTriL()],
]
# tril cannot be added in tier 0, and the intermediate tier 1 with the
# BadAdder will catch it and raise.
with self.assertRaisesRegex(AssertionError, "BadAdder.can_add called"):
add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
class AddAndReturnScaledIdentityTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnScaledIdentity()
@test_util.run_deprecated_v1
def test_identity_plus_identity(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2)
id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_identity_plus_scaled_identity(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=2.2)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(3.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_scaled_identity_plus_scaled_identity(self):
id1 = linalg.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[2.2, 2.2, 2.2])
id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=-1.0)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
with self.cached_session():
self.assertAllClose(1.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnDiagTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnDiag()
@test_util.run_deprecated_v1
def test_identity_plus_identity_returns_diag(self):
id1 = linalg.LinearOperatorIdentity(num_rows=2)
id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(id1, id2))
operator = self._adder.add(id1, id2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
with self.cached_session():
self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = rng.rand(2, 3, 4)
diag2 = rng.rand(4)
op1 = linalg.LinearOperatorDiag(diag1)
op2 = linalg.LinearOperatorDiag(diag2)
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(op1, op2))
operator = self._adder.add(op1, op2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
with self.cached_session():
self.assertAllClose(
linalg.LinearOperatorDiag(diag1 + diag2).to_dense(),
operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnTriLTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnTriL()
@test_util.run_deprecated_v1
def test_diag_plus_tril(self):
diag = linalg.LinearOperatorDiag([1., 2.])
tril = linalg.LinearOperatorLowerTriangular([[10., 0.], [30., 0.]])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(diag, diag))
self.assertTrue(self._adder.can_add(diag, tril))
operator = self._adder.add(diag, tril, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorLowerTriangular)
with self.cached_session():
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
class AddAndReturnMatrixTest(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnMatrix()
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = linalg.LinearOperatorDiag([1., 2.])
diag2 = linalg.LinearOperatorDiag([-1., 3.])
hints = linear_operator_addition._Hints(
is_positive_definite=False, is_non_singular=False)
self.assertTrue(self._adder.can_add(diag1, diag2))
operator = self._adder.add(diag1, diag2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorFullMatrix)
with self.cached_session():
self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense())
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
if __name__ == "__main__":
test.main()
|
the-stack_0_6736 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from io import BytesIO
from struct import pack
from random import randint, choice
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import create_coinbase, create_block
from test_framework.key import CECKey
from test_framework.messages import CTransaction, CTxOut, CTxIn, COIN, msg_block
from test_framework.mininode import network_thread_start
from test_framework.test_framework import BitcoinTestFramework
from test_framework.script import CScript, OP_CHECKSIG
from test_framework.util import hash256, bytes_to_hex_str, hex_str_to_bytes, connect_nodes_bi, p2p_port
from .util import TestNode, create_transaction, utxo_to_stakingPrevOuts, dir_size
''' -------------------------------------------------------------------------
StreamitCoin_FakeStakeTest CLASS ----------------------------------------------------
General Test Class to be extended by individual tests for each attack test
'''
class StreamitCoin_FakeStakeTest(BitcoinTestFramework):
def set_test_params(self):
''' Setup test environment
:param:
:return:
'''
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-staking=1', '-debug=net']]*self.num_nodes
def setup_network(self):
''' Can't rely on syncing all the nodes when staking=1
:param:
:return:
'''
self.setup_nodes()
for i in range(self.num_nodes - 1):
for j in range(i+1, self.num_nodes):
connect_nodes_bi(self.nodes, i, j)
def init_test(self):
''' Initializes test parameters
:param:
:return:
'''
self.log.info("\n\n*** Starting %s ***\n------------------------\n%s\n", self.__class__.__name__, self.description)
# Global Test parameters (override in run_test)
self.DEFAULT_FEE = 0.1
# Spam blocks to send in current test
self.NUM_BLOCKS = 30
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(TestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
self.node = self.nodes[0]
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def run_test(self):
''' Performs the attack of this test - run init_test first.
:param:
:return:
'''
self.description = ""
self.init_test()
return
def create_spam_block(self, hashPrevBlock, stakingPrevOuts, height, fStakeDoubleSpent=False, fZPoS=False, spendingPrevOuts={}):
''' creates a block to spam the network with
:param hashPrevBlock: (hex string) hash of previous block
stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake
height: (int) block height
fStakeDoubleSpent: (bool) spend the coinstake input inside the block
fZPoS: (bool) stake the block with zerocoin
spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake
:return block: (CBlock) generated block
'''
self.log.info("Creating Spam Block")
# If not given inputs to create spam txes, use a copy of the staking inputs
if len(spendingPrevOuts) == 0:
spendingPrevOuts = dict(stakingPrevOuts)
# Get current time
current_time = int(time.time())
nTime = current_time & 0xfffffff0
# Create coinbase TX
# Even if PoS blocks have empty coinbase vout, the height is required for the vin script
coinbase = create_coinbase(height)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b""
coinbase.nTime = nTime
coinbase.rehash()
# Create Block with coinbase
block = create_block(int(hashPrevBlock, 16), coinbase, nTime)
# Find valid kernel hash - Create a new private key used for block signing.
if not block.solve_stake(stakingPrevOuts):
raise Exception("Not able to solve for any prev_outpoint")
self.log.info("Stake found. Signing block...")
# Sign coinstake TX and add it to the block
signed_stake_tx = self.sign_stake_tx(block, stakingPrevOuts[block.prevoutStake][0], fZPoS)
block.vtx.append(signed_stake_tx)
# Remove coinstake input prevout unless we want to try double spending in the same block.
# Skip for zPoS as the spendingPrevouts are just regular UTXOs
if not fZPoS and not fStakeDoubleSpent:
del spendingPrevOuts[block.prevoutStake]
# remove a random prevout from the list
# (to randomize block creation if the same height is picked two times)
del spendingPrevOuts[choice(list(spendingPrevOuts))]
# Create spam for the block. Sign the spendingPrevouts
self.log.info("Creating spam TXes...")
for outPoint in spendingPrevOuts:
value_out = int(spendingPrevOuts[outPoint][0] - self.DEFAULT_FEE * COIN)
tx = create_transaction(outPoint, b"", value_out, nTime, scriptPubKey=CScript([self.block_sig_key.get_pubkey(), OP_CHECKSIG]))
# sign txes
signed_tx_hex = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
signed_tx = CTransaction()
signed_tx.deserialize(BytesIO(hex_str_to_bytes(signed_tx_hex)))
block.vtx.append(signed_tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# Sign block with coinstake key and return it
block.sign_block(self.block_sig_key)
return block
def spend_utxo(self, utxo, address_list):
''' spend amount from previously unspent output to a provided address
:param utxo: (JSON) returned from listunspent used as input
addresslist: (string) destination address
:return: txhash: (string) tx hash if successful, empty string otherwise
'''
try:
inputs = [{"txid":utxo["txid"], "vout":utxo["vout"]}]
out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)/len(address_list)
outputs = {}
for address in address_list:
outputs[address] = out_amount
spendingTx = self.node.createrawtransaction(inputs, outputs)
spendingTx_signed = self.node.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = self.node.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
self.log.warning("Error: %s" % str(spendingTx_signed["errors"]))
return ""
except JSONRPCException as e:
self.log.error("JSONRPCException: %s" % str(e))
return ""
def spend_utxos(self, utxo_list, address_list = []):
''' spend utxos to provided list of addresses or 10 new generate ones.
:param utxo_list: (JSON list) returned from listunspent used as input
address_list: (string list) [optional] recipient StreamitCoin addresses. if not set,
10 new addresses will be generated from the wallet for each tx.
:return: txHashes (string list) tx hashes
'''
txHashes = []
# If not given, get 10 new addresses from self.node wallet
if address_list == []:
for i in range(10):
address_list.append(self.node.getnewaddress())
for utxo in utxo_list:
try:
# spend current utxo to provided addresses
txHash = self.spend_utxo(utxo, address_list)
if txHash != "":
txHashes.append(txHash)
except JSONRPCException as e:
self.log.error("JSONRPCException: %s" % str(e))
continue
return txHashes
def stake_amplification_step(self, utxo_list, address_list = []):
''' spends a list of utxos providing the list of new outputs
:param utxo_list: (JSON list) returned from listunspent used as input
address_list: (string list) [optional] recipient StreamitCoin addresses.
:return: new_utxos: (JSON list) list of new (valid) inputs after the spends
'''
self.log.info("--> Stake Amplification step started with %d UTXOs", len(utxo_list))
txHashes = self.spend_utxos(utxo_list, address_list)
num_of_txes = len(txHashes)
new_utxos = []
if num_of_txes> 0:
self.log.info("Created %d transactions...Mining 2 blocks to include them..." % num_of_txes)
self.node.generate(2)
time.sleep(2)
new_utxos = self.node.listunspent()
self.log.info("Amplification step produced %d new \"Fake Stake\" inputs:" % len(new_utxos))
return new_utxos
def stake_amplification(self, utxo_list, iterations, address_list = []):
''' performs the "stake amplification" which gives higher chances at finding fake stakes
:param utxo_list: (JSON list) returned from listunspent used as input
iterations: (int) amount of stake amplification steps to perform
address_list: (string list) [optional] recipient StreamitCoin addresses.
:return: all_inputs: (JSON list) list of all spent inputs
'''
self.log.info("** Stake Amplification started with %d UTXOs", len(utxo_list))
valid_inputs = utxo_list
all_inputs = []
for i in range(iterations):
all_inputs = all_inputs + valid_inputs
old_inputs = valid_inputs
valid_inputs = self.stake_amplification_step(old_inputs, address_list)
self.log.info("** Stake Amplification ended with %d \"fake\" UTXOs", len(all_inputs))
return all_inputs
def sign_stake_tx(self, block, stake_in_value, fZPoS=False):
''' signs a coinstake transaction
:param block: (CBlock) block with stake to sign
stake_in_value: (int) staked amount
fZPoS: (bool) zerocoin stake
:return: stake_tx_signed: (CTransaction) signed tx
'''
self.block_sig_key = CECKey()
if fZPoS:
self.log.info("Signing zPoS stake...")
# Create raw zerocoin stake TX (signed)
raw_stake = self.node.createrawzerocoinstake(block.prevoutStake)
stake_tx_signed_raw_hex = raw_stake["hex"]
# Get stake TX private key to sign the block with
stake_pkey = raw_stake["private-key"]
self.block_sig_key.set_compressed(True)
self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey))
else:
# Create a new private key and get the corresponding public key
self.block_sig_key.set_secretbytes(hash256(pack('<I', 0xffff)))
pubkey = self.block_sig_key.get_pubkey()
# Create the raw stake TX (unsigned)
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
outNValue = int(stake_in_value + 2*COIN)
stake_tx_unsigned = CTransaction()
stake_tx_unsigned.nTime = block.nTime
stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake))
stake_tx_unsigned.vin[0].nSequence = 0xffffffff
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey))
# Sign the stake TX
stake_tx_signed_raw_hex = self.node.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
# Deserialize the signed raw tx into a CTransaction object and return it
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)))
return stake_tx_signed
def get_prevouts(self, utxo_list, blockHeight, zpos=False):
''' get prevouts (map) for each utxo in a list
:param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input
<if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input
blockHeight: (int) height of the previous block
zpos: (bool) type of utxo_list
:return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints to amount, block_time, nStakeModifier, hashStake
'''
zerocoinDenomList = [1, 5, 10, 50, 100, 500, 1000, 5000]
stakingPrevOuts = {}
for utxo in utxo_list:
if zpos:
# get mint checkpoint
checkpointHeight = blockHeight - 200
checkpointBlock = self.node.getblock(self.node.getblockhash(checkpointHeight), True)
checkpoint = int(checkpointBlock['acc_checkpoint'], 16)
# parse checksum and get checksumblock
pos = zerocoinDenomList.index(utxo['denomination'])
checksum = (checkpoint >> (32 * (len(zerocoinDenomList) - 1 - pos))) & 0xFFFFFFFF
checksumBlock = self.node.getchecksumblock(hex(checksum), utxo['denomination'], True)
# get block hash and block time
txBlockhash = checksumBlock['hash']
txBlocktime = checksumBlock['time']
else:
# get raw transaction for current input
utxo_tx = self.node.getrawtransaction(utxo['txid'], 1)
# get block hash and block time
txBlocktime = utxo_tx['blocktime']
txBlockhash = utxo_tx['blockhash']
# get Stake Modifier
stakeModifier = int(self.node.getblock(txBlockhash)['modifier'], 16)
# assemble prevout object
utxo_to_stakingPrevOuts(utxo, stakingPrevOuts, txBlocktime, stakeModifier, zpos)
return stakingPrevOuts
def log_data_dir_size(self):
''' Prints the size of the '/regtest/blocks' directory.
:param:
:return:
'''
init_size = dir_size(self.node.datadir + "/regtest/blocks")
self.log.info("Size of data dir: %s kilobytes" % str(init_size))
def test_spam(self, name, staking_utxo_list,
fRandomHeight=False, randomRange=0, randomRange2=0,
fDoubleSpend=False, fMustPass=False, fZPoS=False,
spending_utxo_list=[]):
''' General method to create, send and test the spam blocks
:param name: (string) chain branch (usually either "Main" or "Forked")
staking_utxo_list: (string list) utxos to use for staking
fRandomHeight: (bool) send blocks at random height
randomRange: (int) if fRandomHeight=True, height is >= current-randomRange
randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2
fDoubleSpend: (bool) if true, stake input is double spent in block.vtx
fMustPass: (bool) if true, the blocks must be stored on disk
fZPoS: (bool) stake the block with zerocoin
spending_utxo_list: (string list) utxos to use for spending
:return: err_msgs: (string list) reports error messages from the test
or an empty list if test is successful
'''
# Create empty error messages list
err_msgs = []
# Log initial datadir size
self.log_data_dir_size()
# Get latest block number and hash
block_count = self.node.getblockcount()
pastBlockHash = self.node.getblockhash(block_count)
randomCount = block_count
self.log.info("Current height: %d" % block_count)
for i in range(0, self.NUM_BLOCKS):
if i !=0:
self.log.info("Sent %d blocks out of %d" % (i, self.NUM_BLOCKS))
# if fRandomHeight=True get a random block number (in range) and corresponding hash
if fRandomHeight:
randomCount = randint(block_count - randomRange, block_count - randomRange2)
pastBlockHash = self.node.getblockhash(randomCount)
# Get spending prevouts and staking prevouts for the height of current block
current_block_n = randomCount + 1
stakingPrevOuts = self.get_prevouts(staking_utxo_list, randomCount, zpos=fZPoS)
spendingPrevOuts = self.get_prevouts(spending_utxo_list, randomCount)
# Create the spam block
block = self.create_spam_block(pastBlockHash, stakingPrevOuts, current_block_n,
fStakeDoubleSpent=fDoubleSpend, fZPoS=fZPoS, spendingPrevOuts=spendingPrevOuts)
# Log time and size of the block
block_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(block.nTime))
block_size = len(block.serialize())/1000
self.log.info("Sending block %d [%s...] - nTime: %s - Size (kb): %.2f",
current_block_n, block.hash[:7], block_time, block_size)
# Try submitblock
var = self.node.submitblock(bytes_to_hex_str(block.serialize()))
time.sleep(1)
if (not fMustPass and var not in [None, "bad-txns-invalid-zpiv"]) or (fMustPass and var != "inconclusive"):
self.log.error("submitblock [fMustPass=%s] result: %s" % (str(fMustPass), str(var)))
err_msgs.append("submitblock %d: %s" % (current_block_n, str(var)))
# Try sending the message block
msg = msg_block(block)
try:
self.test_nodes[0].handle_connect()
self.test_nodes[0].send_message(msg)
time.sleep(2)
block_ret = self.node.getblock(block.hash)
if not fMustPass and block_ret is not None:
self.log.error("Error, block stored in %s chain" % name)
err_msgs.append("getblock %d: result not None" % current_block_n)
if fMustPass:
if block_ret is None:
self.log.error("Error, block NOT stored in %s chain" % name)
err_msgs.append("getblock %d: result is None" % current_block_n)
else:
self.log.info("Good. Block IS stored on disk.")
except JSONRPCException as e:
exc_msg = str(e)
if exc_msg == "Can't read block from disk (-32603)":
if fMustPass:
self.log.warning("Bad! Block was NOT stored to disk.")
err_msgs.append(exc_msg)
else:
self.log.info("Good. Block was not stored on disk.")
else:
self.log.warning(exc_msg)
err_msgs.append(exc_msg)
except Exception as e:
exc_msg = str(e)
self.log.error(exc_msg)
err_msgs.append(exc_msg)
self.log.info("Sent all %s blocks." % str(self.NUM_BLOCKS))
# Log final datadir size
self.log_data_dir_size()
# Return errors list
return err_msgs
|
the-stack_0_6738 | load("//ruby/private:constants.bzl", "RULES_RUBY_WORKSPACE_NAME")
load("//ruby/private:providers.bzl", "RubyRuntimeContext")
DEFAULT_BUNDLER_VERSION = "2.1.2"
BUNDLE_BIN_PATH = "bin"
BUNDLE_PATH = "lib"
SCRIPT_INSTALL_BUNDLER = "download_bundler.rb"
SCRIPT_ACTIVATE_GEMS = "activate_gems.rb"
SCRIPT_BUILD_FILE_GENERATOR = "create_bundle_build_file.rb"
# Runs bundler with arbitrary arguments
# eg: run_bundler(runtime_ctx, [ "lock", " --gemfile", "Gemfile.rails5" ])
def run_bundler(runtime_ctx, bundler_arguments):
# Now we are running bundle install
args = [
runtime_ctx.interpreter, # ruby
"-I",
".",
"-I", # Used to tell Ruby where to load the library scripts
BUNDLE_PATH, # Add vendor/bundle to the list of resolvers
"bundler/gems/bundler-{}/exe/bundle".format(runtime_ctx.bundler_version), # our binary
] + bundler_arguments
kwargs = {}
if "BUNDLER_TIMEOUT" in runtime_ctx.ctx.os.environ:
timeout_in_secs = runtime_ctx.ctx.os.environ["BUNDLER_TIMEOUT"]
if timeout_in_secs.isdigit():
kwargs["timeout"] = int(timeout_in_secs)
else:
fail("'%s' is invalid value for BUNDLER_TIMEOUT. Must be an integer." % (timeout_in_secs))
return runtime_ctx.ctx.execute(
args,
quiet = False,
# Need to run this command with GEM_HOME set so tgat the bin stubs can load the correct bundler
environment = {"GEM_HOME": "bundler", "GEM_PATH": "bundler"},
**kwargs
)
def install_bundler(runtime_ctx):
args = [
runtime_ctx.interpreter,
SCRIPT_INSTALL_BUNDLER,
runtime_ctx.bundler_version,
]
result = runtime_ctx.ctx.execute(args, environment = runtime_ctx.environment, quiet = False)
if result.return_code:
fail("Error installing bundler: {} {}".format(result.stdout, result.stderr))
def bundle_install(runtime_ctx):
bundler_args = [
"install", # bundle install
"--standalone", # Makes a bundle that can work without depending on Rubygems or Bundler at runtime.
"--binstubs={}".format(BUNDLE_BIN_PATH), # Creates a directory and place any executables from the gem there.
"--path={}".format(BUNDLE_PATH), # The location to install the specified gems to.
"--jobs=10", # run a few jobs to ensure no gem install is blocking another
]
if runtime_ctx.ctx.attr.full_index:
bundler_args.append("--full-index")
result = run_bundler(runtime_ctx, bundler_args)
if result.return_code:
fail("bundle install failed: %s%s" % (result.stdout, result.stderr))
def generate_bundle_build_file(runtime_ctx):
# Create the BUILD file to expose the gems to the WORKSPACE
# USAGE: ./create_bundle_build_file.rb BUILD.bazel Gemfile.lock repo-name [excludes-json] workspace-name
args = [
runtime_ctx.interpreter, # ruby interpreter
SCRIPT_BUILD_FILE_GENERATOR, # The template used to created bundle file
"BUILD.bazel", # Bazel build file (can be empty)
"Gemfile.lock", # Gemfile.lock where we list all direct and transitive dependencies
runtime_ctx.ctx.name, # Name of the target
repr(runtime_ctx.ctx.attr.excludes),
RULES_RUBY_WORKSPACE_NAME,
runtime_ctx.bundler_version,
]
result = runtime_ctx.ctx.execute(
args,
# The build file generation script requires bundler so we add this to make
# the correct version of bundler available
environment = {"GEM_HOME": "bundler", "GEM_PATH": "bundler"},
quiet = False,
)
if result.return_code:
fail("build file generation failed: %s%s" % (result.stdout, result.stderr))
def _rb_bundle_impl(ctx):
ctx.symlink(ctx.attr.gemfile, "Gemfile")
ctx.symlink(ctx.attr.gemfile_lock, "Gemfile.lock")
ctx.symlink(ctx.attr._create_bundle_build_file, SCRIPT_BUILD_FILE_GENERATOR)
ctx.symlink(ctx.attr._install_bundler, SCRIPT_INSTALL_BUNDLER)
ctx.symlink(ctx.attr._activate_gems, SCRIPT_ACTIVATE_GEMS)
# Setup this provider that we pass around between functions for convenience
runtime_ctx = RubyRuntimeContext(
ctx = ctx,
interpreter = ctx.path(ctx.attr.ruby_interpreter),
environment = {"RUBYOPT": "--enable-gems"},
bundler_version = ctx.attr.bundler_version,
)
# 1. Install the right version of the Bundler Gem
install_bundler(runtime_ctx)
# Create label for the Bundler executable
bundler = Label("//:bundler/gems/bundler-{}/exe/bundle".format(runtime_ctx.bundler_version))
# Run bundle install
bundle_install(runtime_ctx)
# Generate the BUILD file for the bundle
generate_bundle_build_file(runtime_ctx)
rb_bundle = repository_rule(
implementation = _rb_bundle_impl,
attrs = {
"ruby_sdk": attr.string(
default = "@org_ruby_lang_ruby_toolchain",
),
"ruby_interpreter": attr.label(
default = "@org_ruby_lang_ruby_toolchain//:ruby",
),
"gemfile": attr.label(
allow_single_file = True,
mandatory = True,
),
"gemfile_lock": attr.label(
allow_single_file = True,
),
"version": attr.string(
mandatory = False,
),
"bundler_version": attr.string(
default = DEFAULT_BUNDLER_VERSION,
),
"excludes": attr.string_list_dict(
doc = "List of glob patterns per gem to be excluded from the library",
),
"full_index": attr.bool(
default = False,
doc = "Use --full-index for bundle install",
),
"_install_bundler": attr.label(
default = "%s//ruby/private/bundle:%s" % (
RULES_RUBY_WORKSPACE_NAME,
SCRIPT_INSTALL_BUNDLER,
),
allow_single_file = True,
),
"_create_bundle_build_file": attr.label(
default = "%s//ruby/private/bundle:%s" % (
RULES_RUBY_WORKSPACE_NAME,
SCRIPT_BUILD_FILE_GENERATOR,
),
doc = "Creates the BUILD file",
allow_single_file = True,
),
"_activate_gems": attr.label(
default = "%s//ruby/private/bundle:%s" % (
RULES_RUBY_WORKSPACE_NAME,
SCRIPT_ACTIVATE_GEMS,
),
allow_single_file = True,
),
},
)
|
the-stack_0_6739 | #!/usr/bin/env python
# Direct downwind faster than the wind vehicle simulation
#
import os
from math import pi
import numpy as np
from matplotlib import pyplot as plt
from airfoil import Airfoil
from blade import Blade
from rotor import Rotor
from vehicle import Vehicle
from rk4 import RK4
ddwfttw_vehicle = None
Vwind = None
rho = None
g = None
v_schedule = None
collective_schedule = None
# Equation of motion
# Inputs:
# t: time
# x: np.array([position, velocity])
# (global) ddwfttw_vehicle
# (global) Vwind
# (global) rho: air density
# (global) g: acceleration due to gravity
# (global) v_schedule: vehicle speeds for interpolating collective
# (global) collective_schedule: collective pitch at the speeds in v_schedule
# Returns:
# xdot: np.array([velocity, acceleration])
def motion(t, x):
global ddwfttw_vehicle
global Vwind
global rho
global g
global v_schedule
global collective_schedule
# Set vehicle velocity
ddwfttw_vehicle.setSpeed(x[1])
# Get collective pitch from schedule
theta0 = np.interp(x[1], v_schedule, collective_schedule)
forces = ddwfttw_vehicle.computeForces(Vwind, rho, theta0, g)
f = sum(forces.values())
# Equations of motion
xdot = np.zeros((2))
xdot[0] = x[1]
xdot[1] = f/ddwfttw_vehicle._m
return xdot
if __name__ == "__main__":
# Conversion factors
lbm2slug = 1./32.174
mph2fps = 5280./3600.
kg2slug = 0.06852177
m2in = 1./0.0254
in2ft = 1./12.
# Vehicle parameters:
# wheel_radius: wheel radius (ft)
# gear_ratio: ratio of wheel rpm to prop rpm
# gear_efficiency: transmission efficiency
# CDf: drag coefficient for air flowing over the vehicle from front to back
# CDb: drag coefficient for air flowing over the vehicle from back to front
# Crr: coefficient of rolling resistance
# A: projected frontal area (sq ft)
# m: total vehicle mass (slug)
wheel_radius = 1.25
gear_ratio = 1.5
gear_efficiency = 0.85
CDf = 0.3
CDb = 0.4
Crr = 0.01
A = 20.
m = 650.*lbm2slug
# Wind speed (ft/sec) (positive tailwind)
Vwind = 10.*mph2fps
# Air density (slug/ft^3)
rho = 1.225*kg2slug/(m2in**3)/(in2ft**3)
# Acceleration due to gravity
g = 32.174
# NACA 0012 airfoil
airfoil = Airfoil()
airfoil.readClCdTables(os.path.join("airfoil_tables","naca6412.cltable"),
os.path.join("airfoil_tables","naca6412.cdtable"))
# Rotor blade parameters
radial = [1., 2.5, 3.5, 8.75] # Radial stations (ft)
chord = [0.2, 1.1, 1.2, 0.3] # Chord (ft)
twist = [26., 18., 16., 8.0] # Twist (deg)
blade = Blade(radial, chord, twist, airfoil)
blade.plotChord()
blade.plotTwist()
# Rotor
rotor = Rotor(blade, 2)
rotor.discretize(100)
# collective schedule based on vehicle speed
v_schedule = np.array([0.5, 0.8, 1.0, 1.5, 2.0, 2.2, 2.5, 2.6])*Vwind
collective_schedule = np.array([0., 2., 4., 6., 8., 9., 9., 9.])
# Vehicle
ddwfttw_vehicle = Vehicle(wheel_radius, gear_ratio, gear_efficiency, CDf, CDb, Crr, A, m,
rotor)
initial_speed = 0.5*Vwind # Initial condition. Start rolling at half the wind
# speed since rotor model loses accuracy below V.
# Initialize some arrays for plotting later
time = []
position = []
speed = []
theta0 = []
thrust = []
fdrag_aero = []
fdrag_rotor = []
frolling_resistance = []
# Run the DDWFTTW simulation and store some things to plot later
maxsteps = 400
dt = 0.5
initial_condition = np.array([0.0, initial_speed])
integrator = RK4(motion, 0.0, initial_condition, dt)
for i in range(maxsteps):
integrator.step()
time.append(integrator.t)
position.append(integrator.y[0])
speed.append(integrator.y[1]/mph2fps)
theta0.append(np.interp(integrator.y[1], v_schedule, collective_schedule))
thrust.append(ddwfttw_vehicle._rotor._thrust)
fdrag_aero.append(ddwfttw_vehicle._Fdrag_aero)
fdrag_rotor.append(ddwfttw_vehicle._Fdrag_rotor)
frolling_resistance.append(ddwfttw_vehicle._Frr)
print("Time step {:d}, time = {:.1f}, speed = {:.2f} mph"\
.format(i+1, integrator.t, integrator.y[1]/mph2fps))
# Kick out early if net force becomes <= 0. That means we can't go any faster.
net = thrust[i] + fdrag_aero[i] + fdrag_rotor[i] + frolling_resistance[i]
if net <= 0.:
print("Max speed reached!")
break
# Plot
fig, ax = plt.subplots(figsize=(10,6))
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Position (ft)")
ax.plot(time, position)
ax.grid()
fig.savefig("position.png", bbox_inches="tight")
plt.clf()
plt.close()
fig, ax = plt.subplots(figsize=(10,6))
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Speed (mph)")
ax.plot([time[0], time[-1]], [Vwind/mph2fps, Vwind/mph2fps])
ax.plot(time, speed)
ax.grid()
ax.legend(["Wind", "Vehicle"])
fig.savefig("speed.png", bbox_inches="tight")
plt.clf()
plt.close()
fig, ax = plt.subplots(figsize=(10,6))
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Collective pitch (deg)")
ax.plot(time, theta0)
ax.grid()
fig.savefig("collective.png", bbox_inches="tight")
plt.clf()
plt.close()
fig, ax = plt.subplots(figsize=(10,6))
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Vehicle forces (lbf)")
ax.plot(time, thrust)
ax.plot(time, fdrag_aero)
ax.plot(time, fdrag_rotor)
ax.plot(time, frolling_resistance)
ax.grid()
ax.legend(["Rotor thrust", "Frame drag", "Drag to spin rotor", "Rolling resistance"])
fig.savefig("forces.png", bbox_inches="tight")
|
the-stack_0_6742 | # coding: utf8
from __future__ import unicode_literals, print_function
import os
import pkg_resources
import importlib
import re
from pathlib import Path
import random
from collections import OrderedDict
from thinc.neural._classes.model import Model
from thinc.neural.ops import NumpyOps
import functools
import itertools
import numpy.random
import srsly
try:
import jsonschema
except ImportError:
jsonschema = None
try:
import cupy.random
except ImportError:
cupy = None
from .symbols import ORTH
from .compat import cupy, CudaStream, path2str, basestring_, unicode_
from .compat import import_file
from .errors import Errors, Warnings, deprecation_warning
LANGUAGES = {}
_data_path = Path(__file__).parent / "data"
_PRINT_ENV = False
def set_env_log(value):
global _PRINT_ENV
_PRINT_ENV = value
def lang_class_is_loaded(lang):
"""Check whether a Language class is already loaded. Language classes are
loaded lazily, to avoid expensive setup code associated with the language
data.
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (bool): Whether a Language class has been loaded.
"""
global LANGUAGES
return lang in LANGUAGES
def get_lang_class(lang):
"""Import and load a Language class.
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (Language): Language class.
"""
global LANGUAGES
# Check if an entry point is exposed for the language code
entry_point = get_entry_point("spacy_languages", lang)
if entry_point is not None:
LANGUAGES[lang] = entry_point
return entry_point
if lang not in LANGUAGES:
try:
module = importlib.import_module(".lang.%s" % lang, "spacy")
except ImportError as err:
raise ImportError(Errors.E048.format(lang=lang, err=err))
LANGUAGES[lang] = getattr(module, module.__all__[0])
return LANGUAGES[lang]
def set_lang_class(name, cls):
"""Set a custom Language class name that can be loaded via get_lang_class.
name (unicode): Name of Language class.
cls (Language): Language class.
"""
global LANGUAGES
LANGUAGES[name] = cls
def get_data_path(require_exists=True):
"""Get path to spaCy data directory.
require_exists (bool): Only return path if it exists, otherwise None.
RETURNS (Path or None): Data path or None.
"""
if not require_exists:
return _data_path
else:
return _data_path if _data_path.exists() else None
def set_data_path(path):
"""Set path to spaCy data directory.
path (unicode or Path): Path to new data directory.
"""
global _data_path
_data_path = ensure_path(path)
def ensure_path(path):
"""Ensure string is converted to a Path.
path: Anything. If string, it's converted to Path.
RETURNS: Path or original argument.
"""
if isinstance(path, basestring_):
return Path(path)
else:
return path
def load_model(name, **overrides):
"""Load a model from a shortcut link, package or data path.
name (unicode): Package name, shortcut link or model path.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with the loaded model.
"""
data_path = get_data_path()
if not data_path or not data_path.exists():
raise IOError(Errors.E049.format(path=path2str(data_path)))
if isinstance(name, basestring_): # in data dir / shortcut
if name in set([d.name for d in data_path.iterdir()]):
return load_model_from_link(name, **overrides)
if is_package(name): # installed as package
return load_model_from_package(name, **overrides)
if Path(name).exists(): # path to model data directory
return load_model_from_path(Path(name), **overrides)
elif hasattr(name, "exists"): # Path or Path-like to model data
return load_model_from_path(name, **overrides)
raise IOError(Errors.E050.format(name=name))
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
path = get_data_path() / name / "__init__.py"
try:
cls = import_file(name, path)
except AttributeError:
raise IOError(Errors.E051.format(name=name))
return cls.load(**overrides)
def load_model_from_package(name, **overrides):
"""Load a model from an installed package."""
cls = importlib.import_module(name)
return cls.load(**overrides)
def load_model_from_path(model_path, meta=False, **overrides):
"""Load a model from a data directory path. Creates Language class with
pipeline from meta.json and then calls from_disk() with path."""
if not meta:
meta = get_model_meta(model_path)
cls = get_lang_class(meta["lang"])
nlp = cls(meta=meta, **overrides)
pipeline = meta.get("pipeline", [])
disable = overrides.get("disable", [])
if pipeline is True:
pipeline = nlp.Defaults.pipe_names
elif pipeline in (False, None):
pipeline = []
for name in pipeline:
if name not in disable:
config = meta.get("pipeline_args", {}).get(name, {})
component = nlp.create_pipe(name, config=config)
nlp.add_pipe(component, name=name)
return nlp.from_disk(model_path)
def load_model_from_init_py(init_file, **overrides):
"""Helper function to use in the `load()` method of a model package's
__init__.py.
init_file (unicode): Path to model's __init__.py, i.e. `__file__`.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with loaded model.
"""
model_path = Path(init_file).parent
meta = get_model_meta(model_path)
data_dir = "%s_%s-%s" % (meta["lang"], meta["name"], meta["version"])
data_path = model_path / data_dir
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(data_path)))
return load_model_from_path(data_path, meta, **overrides)
def get_model_meta(path):
"""Get model meta.json from a directory path and validate its contents.
path (unicode or Path): Path to model directory.
RETURNS (dict): The model's meta data.
"""
model_path = ensure_path(path)
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(model_path)))
meta_path = model_path / "meta.json"
if not meta_path.is_file():
raise IOError(Errors.E053.format(path=meta_path))
meta = srsly.read_json(meta_path)
for setting in ["lang", "name", "version"]:
if setting not in meta or not meta[setting]:
raise ValueError(Errors.E054.format(setting=setting))
return meta
def is_package(name):
"""Check if string maps to a package installed via pip.
name (unicode): Name of package.
RETURNS (bool): True if installed package, False if not.
"""
name = name.lower() # compare package name against lowercase name
packages = pkg_resources.working_set.by_key.keys()
for package in packages:
if package.lower().replace("-", "_") == name:
return True
return False
def get_package_path(name):
"""Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
"""
name = name.lower() # use lowercase version to be safe
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(name)
return Path(pkg.__file__).parent
def get_entry_points(key):
"""Get registered entry points from other packages for a given key, e.g.
'spacy_factories' and return them as a dictionary, keyed by name.
key (unicode): Entry point name.
RETURNS (dict): Entry points, keyed by name.
"""
result = {}
for entry_point in pkg_resources.iter_entry_points(key):
result[entry_point.name] = entry_point.load()
return result
def get_entry_point(key, value):
"""Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
"""
for entry_point in pkg_resources.iter_entry_points(key):
if entry_point.name == value:
return entry_point.load()
def is_in_jupyter():
"""Check if user is running spaCy from a Jupyter notebook by detecting the
IPython kernel. Mainly used for the displaCy visualizer.
RETURNS (bool): True if in Jupyter, False if not.
"""
# https://stackoverflow.com/a/39662359/6400719
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
except NameError:
return False # Probably standard Python interpreter
return False
def get_cuda_stream(require=False):
if CudaStream is None:
return None
elif isinstance(Model.ops, NumpyOps):
return None
else:
return CudaStream()
def get_async(stream, numpy_array):
if cupy is None:
return numpy_array
else:
array = cupy.ndarray(numpy_array.shape, order="C", dtype=numpy_array.dtype)
array.set(numpy_array, stream=stream)
return array
def env_opt(name, default=None):
if type(default) is float:
type_convert = float
else:
type_convert = int
if "SPACY_" + name.upper() in os.environ:
value = type_convert(os.environ["SPACY_" + name.upper()])
if _PRINT_ENV:
print(name, "=", repr(value), "via", "$SPACY_" + name.upper())
return value
elif name in os.environ:
value = type_convert(os.environ[name])
if _PRINT_ENV:
print(name, "=", repr(value), "via", "$" + name)
return value
else:
if _PRINT_ENV:
print(name, "=", repr(default), "by default")
return default
def read_regex(path):
path = ensure_path(path)
with path.open() as file_:
entries = file_.read().split("\n")
expression = "|".join(
["^" + re.escape(piece) for piece in entries if piece.strip()]
)
return re.compile(expression)
def compile_prefix_regex(entries):
"""Compile a sequence of prefix rules into a regex object.
entries (tuple): The prefix rules, e.g. spacy.lang.punctuation.TOKENIZER_PREFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.prefix_search.
"""
if "(" in entries:
# Handle deprecated data
expression = "|".join(
["^" + re.escape(piece) for piece in entries if piece.strip()]
)
return re.compile(expression)
else:
expression = "|".join(["^" + piece for piece in entries if piece.strip()])
return re.compile(expression)
def compile_suffix_regex(entries):
"""Compile a sequence of suffix rules into a regex object.
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
"""
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
return re.compile(expression)
def compile_infix_regex(entries):
"""Compile a sequence of infix rules into a regex object.
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
"""
expression = "|".join([piece for piece in entries if piece.strip()])
return re.compile(expression)
def add_lookups(default_func, *lookups):
"""Extend an attribute function with special cases. If a word is in the
lookups, the value is returned. Otherwise the previous function is used.
default_func (callable): The default function to execute.
*lookups (dict): Lookup dictionary mapping string to attribute value.
RETURNS (callable): Lexical attribute getter.
"""
# This is implemented as functools.partial instead of a closure, to allow
# pickle to work.
return functools.partial(_get_attr_unless_lookup, default_func, lookups)
def _get_attr_unless_lookup(default_func, lookups, string):
for lookup in lookups:
if string in lookup:
return lookup[string]
return default_func(string)
def update_exc(base_exceptions, *addition_dicts):
"""Update and validate tokenizer exceptions. Will overwrite exceptions.
base_exceptions (dict): Base exceptions.
*addition_dicts (dict): Exceptions to add to the base dict, in order.
RETURNS (dict): Combined tokenizer exceptions.
"""
exc = dict(base_exceptions)
for additions in addition_dicts:
for orth, token_attrs in additions.items():
if not all(isinstance(attr[ORTH], unicode_) for attr in token_attrs):
raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))
described_orth = "".join(attr[ORTH] for attr in token_attrs)
if orth != described_orth:
raise ValueError(Errors.E056.format(key=orth, orths=described_orth))
exc.update(additions)
exc = expand_exc(exc, "'", "’")
return exc
def expand_exc(excs, search, replace):
"""Find string in tokenizer exceptions, duplicate entry and replace string.
For example, to add additional versions with typographic apostrophes.
excs (dict): Tokenizer exceptions.
search (unicode): String to find and replace.
replace (unicode): Replacement.
RETURNS (dict): Combined tokenizer exceptions.
"""
def _fix_token(token, search, replace):
fixed = dict(token)
fixed[ORTH] = fixed[ORTH].replace(search, replace)
return fixed
new_excs = dict(excs)
for token_string, tokens in excs.items():
if search in token_string:
new_key = token_string.replace(search, replace)
new_value = [_fix_token(t, search, replace) for t in tokens]
new_excs[new_key] = new_value
return new_excs
def normalize_slice(length, start, stop, step=None):
if not (step is None or step == 1):
raise ValueError(Errors.E057)
if start is None:
start = 0
elif start < 0:
start += length
start = min(length, max(0, start))
if stop is None:
stop = length
elif stop < 0:
stop += length
stop = min(length, max(start, stop))
return start, stop
def minibatch(items, size=8):
"""Iterate over batches of items. `size` may be an iterator,
so that batch-size can vary on each step.
"""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = list(itertools.islice(items, int(batch_size)))
if len(batch) == 0:
break
yield list(batch)
def compounding(start, stop, compound):
"""Yield an infinite series of compounding values. Each time the
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5
"""
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr *= compound
def stepping(start, stop, steps):
"""Yield an infinite series of values that step from a start value to a
final value over some number of steps. Each step is (stop-start)/steps.
After the final value is reached, the generator continues yielding that
value.
EXAMPLE:
>>> sizes = stepping(1., 200., 100)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * (200.-1.) / 100
>>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100
"""
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr += (stop - start) / steps
def decaying(start, stop, decay):
"""Yield an infinite series of linearly decaying values."""
curr = float(start)
while True:
yield max(curr, stop)
curr -= decay
def minibatch_by_words(items, size, tuples=True, count_words=len):
"""Create minibatches of a given number of words."""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = []
while batch_size >= 0:
try:
if tuples:
doc, gold = next(items)
else:
doc = next(items)
except StopIteration:
if batch:
yield batch
return
batch_size -= count_words(doc)
if tuples:
batch.append((doc, gold))
else:
batch.append(doc)
if batch:
yield batch
def itershuffle(iterable, bufsize=1000):
"""Shuffle an iterator. This works by holding `bufsize` items back
and yielding them sometime later. Obviously, this is not unbiased –
but should be good enough for batching. Larger bufsize means less bias.
From https://gist.github.com/andres-erbsen/1307752
iterable (iterable): Iterator to shuffle.
bufsize (int): Items to hold back.
YIELDS (iterable): The shuffled iterator.
"""
iterable = iter(iterable)
buf = []
try:
while True:
for i in range(random.randint(1, bufsize - len(buf))):
buf.append(next(iterable))
random.shuffle(buf)
for i in range(random.randint(1, bufsize)):
if buf:
yield buf.pop()
else:
break
except StopIteration:
random.shuffle(buf)
while buf:
yield buf.pop()
raise StopIteration
def filter_spans(spans):
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
creating named entities (where one token can only be part of one entity) or
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
longest span is preferred over shorter spans.
spans (iterable): The spans to filter.
RETURNS (list): The filtered spans.
"""
get_sort_key = lambda span: (span.end - span.start, span.start)
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
result = []
seen_tokens = set()
for span in sorted_spans:
# Check for end - 1 here because boundaries are inclusive
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
result.append(span)
seen_tokens.update(range(span.start, span.end))
result = sorted(result, key=lambda span: span.start)
return result
def to_bytes(getters, exclude):
serialized = OrderedDict()
for key, getter in getters.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude:
serialized[key] = getter()
return srsly.msgpack_dumps(serialized)
def from_bytes(bytes_data, setters, exclude):
msg = srsly.msgpack_loads(bytes_data)
for key, setter in setters.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude and key in msg:
setter(msg[key])
return msg
def to_disk(path, writers, exclude):
path = ensure_path(path)
if not path.exists():
path.mkdir()
for key, writer in writers.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude:
writer(path / key)
return path
def from_disk(path, readers, exclude):
path = ensure_path(path)
for key, reader in readers.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude:
reader(path / key)
return path
def minify_html(html):
"""Perform a template-specific, rudimentary HTML minification for displaCy.
Disclaimer: NOT a general-purpose solution, only removes indentation and
newlines.
html (unicode): Markup to minify.
RETURNS (unicode): "Minified" HTML.
"""
return html.strip().replace(" ", "").replace("\n", "")
def escape_html(text):
"""Replace <, >, &, " with their HTML encoded representation. Intended to
prevent HTML errors in rendered displaCy markup.
text (unicode): The original text.
RETURNS (unicode): Equivalent text to be safely used within HTML.
"""
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace('"', """)
return text
def use_gpu(gpu_id):
try:
import cupy.cuda.device
except ImportError:
return None
from thinc.neural.ops import CupyOps
device = cupy.cuda.device.Device(gpu_id)
device.use()
Model.ops = CupyOps()
Model.Ops = CupyOps
return device
def fix_random_seed(seed=0):
random.seed(seed)
numpy.random.seed(seed)
if cupy is not None:
cupy.random.seed(seed)
def get_json_validator(schema):
# We're using a helper function here to make it easier to change the
# validator that's used (e.g. different draft implementation), without
# having to change it all across the codebase.
# TODO: replace with (stable) Draft6Validator, if available
if jsonschema is None:
raise ValueError(Errors.E136)
return jsonschema.Draft4Validator(schema)
def validate_schema(schema):
"""Validate a given schema. This just checks if the schema itself is valid."""
validator = get_json_validator(schema)
validator.check_schema(schema)
def validate_json(data, validator):
"""Validate data against a given JSON schema (see https://json-schema.org).
data: JSON-serializable data to validate.
validator (jsonschema.DraftXValidator): The validator.
RETURNS (list): A list of error messages, if available.
"""
errors = []
for err in sorted(validator.iter_errors(data), key=lambda e: e.path):
if err.path:
err_path = "[{}]".format(" -> ".join([str(p) for p in err.path]))
else:
err_path = ""
msg = err.message + " " + err_path
if err.context: # Error has suberrors, e.g. if schema uses anyOf
suberrs = [" - {}".format(suberr.message) for suberr in err.context]
msg += ":\n{}".format("".join(suberrs))
errors.append(msg)
return errors
def get_serialization_exclude(serializers, exclude, kwargs):
"""Helper function to validate serialization args and manage transition from
keyword arguments (pre v2.1) to exclude argument.
"""
exclude = list(exclude)
# Split to support file names like meta.json
options = [name.split(".")[0] for name in serializers]
for key, value in kwargs.items():
if key in ("vocab",) and value is False:
deprecation_warning(Warnings.W015.format(arg=key))
exclude.append(key)
elif key.split(".")[0] in options:
raise ValueError(Errors.E128.format(arg=key))
# TODO: user warning?
return exclude
class SimpleFrozenDict(dict):
"""Simplified implementation of a frozen dict, mainly used as default
function or method argument (for arguments that should default to empty
dictionary). Will raise an error if user or spaCy attempts to add to dict.
"""
def __setitem__(self, key, value):
raise NotImplementedError(Errors.E095)
def pop(self, key, default=None):
raise NotImplementedError(Errors.E095)
def update(self, other):
raise NotImplementedError(Errors.E095)
class DummyTokenizer(object):
# add dummy methods for to_bytes, from_bytes, to_disk and from_disk to
# allow serialization (see #1557)
def to_bytes(self, **kwargs):
return b""
def from_bytes(self, _bytes_data, **kwargs):
return self
def to_disk(self, _path, **kwargs):
return None
def from_disk(self, _path, **kwargs):
return self
|
the-stack_0_6746 | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for workflow object exports."""
from os.path import abspath, dirname, join
from flask.json import dumps
from ggrc import db
from ggrc.app import app # NOQA # pylint: disable=unused-import
from ggrc_workflows.models import Workflow, TaskGroup
from integration.ggrc import TestCase
from integration.ggrc_workflows.generator import WorkflowsGenerator
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = {
"export_to": "csv",
"objects": [{"object_name": "Workflow", "fields": "all"}]
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
request_body = {
"export_to": "csv",
"objects": data
}
response = self.client.post("/_service/export_csv",
data=dumps(request_body), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task,", response.data)
class TestExportMultipleObjects(TestCase):
""" Test data is found in the google sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=2035742544
"""
CSV_DIR = join(abspath(dirname(__file__)), "test_csvs/")
def activate(self):
""" activate workflows just once after the class has been initialized
This should be in setUpClass method, but we can't access the server
context from there."""
gen = WorkflowsGenerator()
# generate cycle for the only one time wf
wf1 = Workflow.query.filter_by(status="Draft", slug="wf-1").first()
if wf1:
gen.generate_cycle(wf1)
# Only workflows with at least one task group could be activated
workflows = db.session.query(Workflow).join(TaskGroup).filter(
Workflow.id == TaskGroup.workflow_id,
Workflow.status == 'Draft').all()
for workflow in workflows:
gen.activate_workflow(workflow)
def setUp(self):
self.clear_data()
# TODO: use here such a CSV that doesn't have errors or warnings
self.import_file("workflow_big_sheet.csv", safe=False)
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
self.activate()
def export_csv(self, data):
response = super(TestExportMultipleObjects, self).export_csv(data)
self.assert200(response)
return response
def test_workflow_task_group_mapping(self):
""" test workflow and task group mappings """
data = [
{
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 1 for wf and 1 on each tg
self.assertIn("tg-1", response)
self.assertIn("tg-6", response)
def test_tg_task(self):
""" test task group and task mappings """
data = [
{
"object_name": "TaskGroupTask", # task-1, task-7
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("tg-1")) # 2 for tasks and 1 for tg
self.assertIn("task-1", response)
self.assertIn("task-7", response)
def test_workflow_cycle_mapping(self):
""" test workflow and cycle mappings """
data = [
{
"object_name": "Cycle", # cycle with title wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": ["wf-1"],
},
},
"fields": "all",
}, {
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "Cycle", # sholud be same cycle as in first block
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
# Task mapped to any of the two task groups, 3 tasks
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["4"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 2 for cycles and 1 for wf
# 3rd block = 2, 5th block = 3, 6th block = 2.
self.assertEqual(7, response.count("CYCLEGROUP-"))
self.assertEqual(9, response.count("CYCLE-"))
self.assertEqual(3, response.count("CYCLETASK-"))
def test_cycle_taks_objects(self):
""" test cycle task and various objects """
data = [
{
"object_name": "CycleTaskGroupObjectTask", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, {
"object_name": "Policy", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title"],
},
]
response = self.export_csv(data).data
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(3, response.count(",p1,"))
def test_wf_indirect_relevant_filters(self):
""" test related filter for indirect relationships on wf objects """
def block(obj):
return {
"object_name": obj,
"fields": ["slug"],
"filters": {
"expression": {
"object_name": "Policy",
"op": {"name": "relevant"},
"slugs": ["p1"],
},
},
}
data = [
block("Workflow"),
block("Cycle"),
block("CycleTaskGroup"),
block("CycleTaskGroupObjectTask"),
]
response = self.export_csv(data).data
wf = Workflow.query.filter_by(slug="wf-1").first()
cycle = wf.cycles[0]
cycle_tasks = []
for cycle_task in cycle.cycle_task_group_object_tasks:
is_related = False
for related_object in cycle_task.related_objects():
if related_object.slug == "p1":
is_related = True
if is_related:
cycle_tasks.append(cycle_task)
cycle_task_groups = list({cycle_task.cycle_task_group
for cycle_task in cycle_tasks})
self.assertEqual(1, response.count("wf-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(wf.slug))
self.assertEqual(1, response.count("CYCLE-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(cycle.slug))
self.assertEqual(1, response.count("CYCLEGROUP-"))
self.assertEqual(1, len(cycle_task_groups))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task_groups[0].slug))
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(2, len(cycle_tasks))
for cycle_task in cycle_tasks:
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task.slug))
destinations = [
("Workflow", wf.slug, 3),
("Cycle", cycle.slug, 3),
("CycleTaskGroupObjectTask", cycle_tasks[0].slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[1].slug, 1),
]
for object_name, slug, count in destinations:
data = [{
"object_name": "Policy",
"fields": ["slug"],
"filters": {
"expression": {
"object_name": object_name,
"op": {"name": "relevant"},
"slugs": [slug],
},
},
}]
response = self.export_csv(data).data
self.assertEqual(count, response.count(",p"), "Count for " + object_name)
self.assertIn(",p1", response)
|
the-stack_0_6747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/23 4:56 下午
# @File : main.trainer_predict_api.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os, random, time
import numpy as np
import torch
from transformers import AlbertConfig
from pytorch_pretrained_bert import BertTokenizer
from modeling import AlbertSPC, BertForGLUESimpleAdaptorTraining
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from tqdm import tqdm
from utils_glue import InputExample, convert_examples_to_features
import argparse
from flask import Flask, request, jsonify, abort
######################################################
# 使用没有蒸馏的模型预测,改造成一个flask api,
######################################################
app = Flask(__name__)
def load_examples(contents, max_seq_length, tokenizer, label_list):
"""
:param contents: eg: [('苹果很好用', '苹果')]
:param max_seq_length:
:param tokenizer: 初始化后的tokenizer
:param label_list:
:return:
"""
examples = []
for guid, content in enumerate(contents):
sentence, aspect = content
examples.append(
InputExample(guid=guid, text_a=sentence, text_b=aspect))
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,
output_mode="classification",
cls_token_segment_id=0, pad_token_segment_id=0)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
return dataset
class TorchAsBertModel(object):
def __init__(self, verbose=0):
self.verbose = verbose
self.label_list = ["是", "否"]
self.num_labels = len(self.label_list)
# 判断使用的设备
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
self.tokenizer, self.model = self.load_model()
# 句子左右最大truncate序列长度
self.left_max_seq_len = 15
self.right_max_seq_len = 20
self.aspect_max_seq_len = 30
def load_model(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "albert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "albert_model/config.json"
self.tuned_checkpoint_S = "trained_teacher_model/test_components.pkl"
self.max_seq_length = 70
# 预测的batch_size大小
self.predict_batch_size = 64
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = AlbertConfig.from_json_file(self.bert_config_file_S)
bert_config_S.num_labels = self.num_labels
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = AlbertSPC(bert_config_S)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
return tokenizer, model_S
def truncate(self, input_text, max_len, trun_post='post'):
"""
实施截断数据
:param input_text:
:param max_len: eg: 15
:param trun_post: 截取方向,向前还是向后截取,
"pre":截取前面的, "post":截取后面的
:return:
"""
if max_len is not None and len(input_text) > max_len:
if trun_post == "post":
return input_text[-max_len:]
else:
return input_text[:max_len]
else:
return input_text
def clean(self, text_left, aspect, text_right):
"""
截断数据
:param text_left:
:param aspect:
:param text_right:
:return:
"""
text_left = self.truncate(text_left, self.left_max_seq_len)
aspect = self.truncate(aspect, self.aspect_max_seq_len)
text_right = self.truncate(text_right, self.right_max_seq_len, trun_post="pre")
return text_left, aspect, text_right
def predict_batch(self, data):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表
:return:
"""
contents = []
for one_data in data:
content, aspect, aspect_start, aspect_end = one_data
text_left = content[:aspect_start]
text_right = content[aspect_end:]
text_left, aspect, text_right = self.clean(text_left, aspect, text_right)
new_content = text_left + aspect + text_right
contents.append((new_content, aspect))
eval_dataset = load_examples(contents, self.max_seq_length, self.tokenizer, self.label_list)
if self.verbose:
print("评估数据集已加载")
res = self.do_predict(self.model, eval_dataset)
if self.verbose:
print(f"预测的结果是: {res}, {[self.label_list[id] for id in res]}")
# TODO 输入为一条数据,返回也只返回一条结果即可以了
return res
def predict_batch_without_turncate(self, data):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表[(content,aspect),...,]
:return:
"""
eval_dataset = load_examples(data, self.max_seq_length, self.tokenizer, self.label_list)
if self.verbose:
print("评估数据集已加载")
res = self.do_predict(self.model, eval_dataset)
if self.verbose:
print(f"预测的结果是: {res}, {[self.label_list[id] for id in res]}")
#把id变成标签
result = [self.label_list[r] for r in res]
return result
def do_predict(self, model, eval_dataset):
# 任务名字
results = []
if self.verbose:
print("***** 开始预测 *****")
print(" 样本数 = %d", len(eval_dataset))
print(" Batch size = %d", self.predict_batch_size)
# 评估样本
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)
model.eval()
model.to(self.device)
# 起始时间
start_time = time.time()
# 存储预测值
pred_logits = []
for batch in tqdm(eval_dataloader, desc="评估中", disable=True):
input_ids, input_mask, segment_ids = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
pred_logits = np.array(pred_logits)
# 找到最大的概率label
preds = np.argmax(pred_logits, axis=1)
if self.verbose:
print(f"preds: {preds}")
results.extend(preds.tolist())
cost_time = time.time() - start_time
if self.verbose:
print(
f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---")
return results
@app.route("/api", methods=['POST'])
def api():
"""
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]
Returns:
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
model = TorchAsBertModel()
results = model.predict_batch_without_turncate(test_data)
return jsonify(results)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
|
the-stack_0_6749 | """
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Lab 2B - Color Image Cone Parking
"""
########################################################################################
# Imports
########################################################################################
import sys
import cv2 as cv
import numpy as np
from enum import IntEnum
sys.path.insert(1, "../../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
# >> Constants
# The smallest contour we will recognize as a valid contour
MIN_CONTOUR_AREA = 30
# The HSV range for the color orange, stored as (hsv_min, hsv_max)
ORANGE = ((10, 100, 100), (20, 255, 255))
# >> Variables
speed = 0.0 # The current speed of the car
angle = 0.0 # The current angle of the car's wheels
contour_center = None # The (pixel row, pixel column) of contour
contour_area = 0 # The area of contour
########################################################################################
# Functions
########################################################################################
class State(IntEnum):
search = 0
obstacle = 1
approach = 2
stop = 3
curState = State.search
def update_contour():
"""
Finds contours in the current color image and uses them to update contour_center
and contour_area
"""
global contour_center
global contour_area
image = rc.camera.get_color_image()
if image is None:
contour_center = None
contour_area = 0
else:
# Find all of the orange contours
contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])
# Select the largest contour
contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)
if contour is not None:
# Calculate contour information
contour_center = rc_utils.get_contour_center(contour)
contour_area = rc_utils.get_contour_area(contour)
# Draw contour onto the image
rc_utils.draw_contour(image, contour)
rc_utils.draw_circle(image, contour_center)
else:
contour_center = None
contour_area = 0
# Display the image to the screen
rc.display.show_color_image(image)
def start():
"""
This function is run once every time the start button is pressed
"""
global speed
global angle
# Initialize variables
speed = 0
angle = 0
# Set initial driving speed and angle
rc.drive.set_speed_angle(speed, angle)
# Set update_slow to refresh every half second
rc.set_update_slow_time(0.5)
# Print start message
print(">> Lab 2B - Color Image Cone Parking")
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
global speed
global angle
global curState
# Search for contours in the current color image
update_contour()
imgX = rc.camera.get_width()
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)
# TODO: Park the car 30 cm away from the closest orange cone
if curState == State.search:
rc.drive.set_speed_angle(0.5, 1)
if contour_center is not None:
curState = State.approach
# elif curState == State.obstacle:
elif curState == State.approach:
# rc.drive.set_speed_angle(0.5, angle)
if contour_area < 3110:
rc.drive.set_speed_angle(0.35,angle)
elif contour_area >= 3110 and contour_area < 17670 :
rc.drive.set_speed_angle(0.2,angle)
elif contour_area >= 17670 and contour_area < 25000:
rc.drive.set_speed_angle(0.01,angle)
elif contour_area > 26450:
curState = State.stop
print("stop")
elif curState == State.stop:
rc.drive.set_speed_angle(0,0)
# 101m = 3110 pixels
# 30m = 27353 pixels
# 40m = 17670 pixels
# Print the current speed and angle when the A button is held down
if rc.controller.is_down(rc.controller.Button.A):
print("Speed:", speed, "Angle:", angle)
# Print the center and area of the largest contour when B is held down
if rc.controller.is_down(rc.controller.Button.B):
if contour_center is None:
print("No contour found")
else:
print("Center:", contour_center, "Area:", contour_area)
def update_slow():
"""
After start() is run, this function is run at a constant rate that is slower
than update(). By default, update_slow() is run once per second
"""
# Print a line of ascii text denoting the contour area and x position
if rc.camera.get_color_image() is None:
# If no image is found, print all X's and don't display an image
print("X" * 10 + " (No image) " + "X" * 10)
else:
# If an image is found but no contour is found, print all dashes
if contour_center is None:
print("-" * 32 + " : area = " + str(contour_area))
# Otherwise, print a line of dashes with a | indicating the contour x-position
else:
s = ["-"] * 32
s[int(contour_center[1] / 20)] = "|"
print("".join(s) + " : area = " + str(contour_area))
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, update_slow)
rc.go() |
the-stack_0_6751 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
##############################################################################
# Configuration parameters for Google App Engine
##############################################################################
KEEP_CACHED = False # request a dummy url every 10secs to force caching app
LOG_STATS = False # web2py level log statistics
APPSTATS = True # GAE level usage statistics and profiling
DEBUG = False # debug mode
AUTO_RETRY = True # force gae to retry commit on failure
#
# Read more about APPSTATS here
# http://googleappengine.blogspot.com/2010/03/easy-performance-profiling-with.html
# can be accessed from:
# http://localhost:8080/_ah/stats
##############################################################################
# All tricks in this file developed by Robin Bhattacharyya
##############################################################################
import time
import os
import sys
import logging
import cPickle
import pickle
import wsgiref.handlers
import datetime
path = os.path.dirname(os.path.abspath(__file__))
sys.path = [path]+[p for p in sys.path if not p==path]
sys.modules['cPickle'] = sys.modules['pickle']
from gluon.settings import global_settings
from google.appengine.api.labs import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
global_settings.web2py_runtime_gae = True
if os.environ.get('SERVER_SOFTWARE', '').startswith('Devel'):
(global_settings.web2py_runtime, DEBUG) = \
('gae:development', True)
else:
(global_settings.web2py_runtime, DEBUG) = \
('gae:production', False)
import gluon.main
def log_stats(fun):
"""Function that will act as a decorator to make logging"""
def newfun(env, res):
"""Log the execution time of the passed function"""
timer = lambda t: (t.time(), t.clock())
(t0, c0) = timer(time)
executed_function = fun(env, res)
(t1, c1) = timer(time)
log_info = """**** Request: %.2fms/%.2fms (real time/cpu time)"""
log_info = log_info % ((t1 - t0) * 1000, (c1 - c0) * 1000)
logging.info(log_info)
return executed_function
return newfun
logging.basicConfig(level=logging.INFO)
def wsgiapp(env, res):
"""Return the wsgiapp"""
if env['PATH_INFO'] == '/_ah/queue/default':
if KEEP_CACHED:
delta = datetime.timedelta(seconds=10)
taskqueue.add(eta=datetime.datetime.now() + delta)
res('200 OK',[('Content-Type','text/plain')])
return ['']
env['PATH_INFO'] = env['PATH_INFO'].encode('utf8')
return gluon.main.wsgibase(env, res)
if LOG_STATS or DEBUG:
wsgiapp = log_stats(wsgiapp)
if AUTO_RETRY:
from gluon.contrib.gae_retry import autoretry_datastore_timeouts
autoretry_datastore_timeouts()
def main():
"""Run the wsgi app"""
if APPSTATS:
run_wsgi_app(wsgiapp)
else:
wsgiref.handlers.CGIHandler().run(wsgiapp)
if __name__ == '__main__':
main()
|
the-stack_0_6753 | """A keyboard with a hint when you press it"""
import lvgl as lv
from ..decorators import feed_touch
from .theme import styles
class HintKeyboard(lv.btnm):
def __init__(self, scr, *args, **kwargs):
super().__init__(scr, *args, **kwargs)
self.hint = lv.btn(scr)
self.hint.set_size(50, 60)
self.hint_lbl = lv.label(self.hint)
self.hint_lbl.set_text(" ")
self.hint_lbl.set_style(0, styles["title"])
self.hint_lbl.set_size(50, 60)
self.hint.set_hidden(True)
self.callback = None
super().set_event_cb(self.cb)
def set_event_cb(self, callback):
self.callback = callback
def get_event_cb(self):
return self.callback
def cb(self, obj, event):
if event == lv.EVENT.PRESSING:
feed_touch()
c = obj.get_active_btn_text()
if c is not None and len(c) <= 2:
self.hint.set_hidden(False)
self.hint_lbl.set_text(c)
point = lv.point_t()
indev = lv.indev_get_act()
lv.indev_get_point(indev, point)
self.hint.set_pos(point.x-25, point.y-130)
elif event == lv.EVENT.RELEASED:
self.hint.set_hidden(True)
if self.callback is not None:
self.callback(obj, event)
|
the-stack_0_6757 | import torch
from torch.utils.data import Dataset
import os
import pickle
class ToxicData(Dataset):
def __init__(self, root, dcat, max_len) -> None:
super(ToxicData, self).__init__()
src_path = os.path.join(root, dcat + '/src.pkl')
tgt_path = os.path.join(root, dcat + '/tgt.pkl')
self.src = pickle.load(open(src_path, 'rb'))
self.tgt = pickle.load(open(tgt_path, 'rb'))
self.max_len = max_len
def __getitem__(self, index):
input = torch.tensor(self.src[index][:self.max_len])
output = torch.tensor(self.tgt[index])
return input, output
def __len__(self):
return len(self.src)
|
the-stack_0_6758 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
import oneflow._oneflow_internal
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework import id_util as id_util
def api_fused_self_attention_query_mul_key_and_value(
x: oneflow._oneflow_internal.BlobDesc,
head_size: int,
alpha: float = 1.0,
name: typing.Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
if name is None:
name = id_util.UniqueStr("FusedSelfAttentionQueryMulKeyAndValue_")
op = (
flow.user_op_builder(name)
.Op("fused_self_attention_query_mul_key_and_value")
.Input("hidden_states", [x])
.Attr("head_size", int(head_size))
.Attr("alpha", float(alpha))
.Output("query_mul_key")
.Output("value")
.Build()
)
(qmk, v) = op.InferAndTryRun().RemoteBlobList()
return (qmk, v)
|
the-stack_0_6759 | import copy
import json
import os
import os.path
import zstd
import hlt
ARBITRARY_ID = -1
def parse_replay_file(file_name, player_name):
print("Load Replay: " + file_name)
with open(file_name, 'rb') as f:
data = json.loads(zstd.loads(f.read()))
print("Load Basic Information")
player = [p for p in data['players'] if p['name'].split(" ")[0] == player_name][0]
player_id = int(player['player_id'])
my_shipyard = hlt.Shipyard(player_id, ARBITRARY_ID,
hlt.Position(player['factory_location']['x'], player['factory_location']['y']))
other_shipyards = [
hlt.Shipyard(p['player_id'], ARBITRARY_ID, hlt.Position(p['factory_location']['x'], p['factory_location']['y']))
for p in data['players'] if int(p['player_id']) != player_id]
width = data['production_map']['width']
height = data['production_map']['height']
print("Load Cell Information")
first_cells = []
for x in range(len(data['production_map']['grid'])):
row = []
for y in range(len(data['production_map']['grid'][x])):
row += [hlt.MapCell(hlt.Position(x, y), data['production_map']['grid'][x][y]['energy'])]
first_cells.append(row)
frames = []
for f in data['full_frames']:
prev_cells = first_cells if len(frames) == 0 else frames[-1]._cells
new_cells = copy.deepcopy(prev_cells)
for c in f['cells']:
new_cells[c['y']][c['x']].halite_amount = c['production']
frames.append(hlt.GameMap(new_cells, width, height))
print("Load Player Ships")
moves = [{} if str(player_id) not in f['moves'] else {m['id']: m['direction'] for m in f['moves'][str(player_id)] if
m['type'] == "m"} for f in data['full_frames']]
ships = [{} if str(player_id) not in f['entities'] else {
int(sid): hlt.Ship(player_id, int(sid), hlt.Position(ship['x'], ship['y']), ship['energy']) for sid, ship in
f['entities'][str(player_id)].items()} for f in data['full_frames']]
print("Load Other Player Ships")
other_ships = [
{int(sid): hlt.Ship(int(pid), int(sid), hlt.Position(ship['x'], ship['y']), ship['energy']) for pid, p in
f['entities'].items() if
int(pid) != player_id for sid, ship in p.items()} for f in data['full_frames']]
print("Load Droppoff Information")
first_my_dropoffs = [my_shipyard]
first_them_dropoffs = other_shipyards
my_dropoffs = []
them_dropoffs = []
for f in data['full_frames']:
new_my_dropoffs = copy.deepcopy(first_my_dropoffs if len(my_dropoffs) == 0 else my_dropoffs[-1])
new_them_dropoffs = copy.deepcopy(first_them_dropoffs if len(them_dropoffs) == 0 else them_dropoffs[-1])
for e in f['events']:
if e['type'] == 'construct':
if int(e['owner_id']) == player_id:
new_my_dropoffs.append(
hlt.Dropoff(player_id, ARBITRARY_ID, hlt.Position(e['location']['x'], e['location']['y'])))
else:
new_them_dropoffs.append(
hlt.Dropoff(e['owner_id'], ARBITRARY_ID, hlt.Position(e['location']['x'], e['location']['y'])))
my_dropoffs.append(new_my_dropoffs)
them_dropoffs.append(new_them_dropoffs)
return list(zip(frames, moves, ships, other_ships, my_dropoffs, them_dropoffs))
def parse_replay_folder(folder_name, player_name, max_files=None):
replay_buffer = []
for file_name in sorted(os.listdir(folder_name)):
if not file_name.endswith(".hlt"):
continue
elif max_files is not None and len(replay_buffer) >= max_files:
break
else:
replay_buffer.append(parse_replay_file(os.path.join(folder_name, file_name), player_name))
return replay_buffer
|
the-stack_0_6760 | # Autores:
# Diego Carballido Álvarez ([email protected])
# José Antonio Figueiras Martínez ([email protected])
import matplotlib.pyplot as plt
import numpy as np
#funcion recurvisa
def B(coorArr, i, j, t):
if j == 0:
return coorArr[i]
return B(coorArr, i, j - 1, t) * (1 - t) + B(coorArr, i + 1, j - 1, t) * t
#Puntos de control
P=np.array([[0.75, 1.5],[1., 1.],[2.,1.],[2.75,1.],[3.,1.5],[3.1,1.75],
[3.,2.],[2.75,2.5],[2.,2.5],[1.,2.5],[0.75,2.],[0.75,1.75],
[0.75,1.5],[0.75,1.],[1.,0.5],[1.5,0.],[2.,0.],[2.75,0.],[3.,0.25]])
fig=plt.figure("Letra e")
ini=0; fin=3
#Una iteración del for por cada curva
for k in range(0,9):
x=P[ini:fin,0]
y=P[ini:fin,1]
n=x.size
xb=[]
yb=[]
for t in np.linspace(0.,1.,25):
a = B(x, 0, n - 1, t)
b = B(y, 0, n - 1, t)
xb.append(a)
yb.append(b)
plt.plot(xb,yb)
ini=fin-1
fin=ini+n
plt.plot(P[:,0],P[:,1],'c--',P[:,0],P[:,1],'ko',ms=8)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.axis([min(P[:,0])-0.065,max(P[:,0])+.05,min(P[:,1])-0.05,max(P[:,1])+0.05])
plt.show() |
the-stack_0_6761 | import re
import glob
import pandas as pd
from . import clean
class Corpus(object):
"""Docstring"""
def __init__(self, docs_paths):
self.corpus = []
self.docs_names = docs_paths
def load(self):
"""
:return A list of Strings each one being a document
"""
for file_name in self.docs_names:
with open(file_name, 'r') as fh:
new_discussion = {}
text = fh.read().split("\n\n")
new_discussion["id"] = file_name[-12:-4]
new_discussion["question_title"] = text[0]
new_discussion["question_body"] = text[1]
new_discussion["answers"] = text[2:]
self.corpus.append(new_discussion)
def get_discussions_text(self):
"""docstring"""
return [
" ".join([
doc["question_title"],
doc["question_body"],
" ".join(doc["answers"])]
)
for doc in self.corpus
]
def corpus_word_frequency(self):
"""docstring"""
corpus_text = self.get_discussions_text()
bag_of_words = " ".join(corpus_text).split()
tokens = set(bag_of_words)
words_frequency = {}
for doc in corpus_text:
text = doc.split()
for word in tokens:
if word in text:
if word in words_frequency.keys():
words_frequency[word] += 1
else:
words_frequency[word] = 1
return words_frequency
def export_pruned(self, limits, destination):
"""docstring"""
if not self.corpus:
print("A corpus need to be loaded first")
return
upper_pruning = None
lower_pruning = None
word_count = self.corpus_word_frequency()
word_count_df = pd.DataFrame.from_dict(
word_count, orient="index", columns=["w_count"])
if "upper" in limits.keys():
upper_pruning = word_count_df.loc[
word_count_df.w_count > limits["upper"]
]
if "lower" in limits.keys():
lower_pruning = word_count_df.loc[
word_count_df.w_count < limits["lower"]
]
print(list(upper_pruning.index))
for doc in self.corpus:
file_name = destination + "instance_" + doc["id"] + ".txt"
question_title = doc["question_title"]
question_body = doc["question_body"]
answers = doc["answers"]
if "upper" in limits.keys():
question_title = " ".join(
[word for word in question_title.split()
if word not in list(upper_pruning.index)])
question_body = " ".join(
[word for word in question_body.split()
if word not in list(upper_pruning.index)])
answers = [
[word for word in answer.split()
if word not in list(upper_pruning.index)]
for answer in answers
]
answers = [" ".join(txt) for txt in answers if txt]
if "lower" in limits.keys():
question_title = " ".join(
[word for word in question_title.split()
if word not in list(lower_pruning.index)])
question_body = " ".join(
[word for word in question_body.split()
if word not in list(lower_pruning.index)])
answers = [
[word for word in answer.split()
if word not in list(lower_pruning.index)]
for answer in answers
]
answers = [" ".join(txt) for txt in answers if txt]
with open(file_name, 'w') as fh:
fh.write(question_title)
fh.write("\n\n" + question_body)
for answer in answers:
fh.write("\n\n" + answer)
print("Writen " + doc["id"])
return upper_pruning, lower_pruning
# TODO DEPRECATED
# def remove_single_quotes(word):
# word = word.strip()
# if word[0] == "'" and word[-1] == "'":
# word = word[1:-1]
# return word
def remove_block_tag(tags_exp, text):
"""
Receives a text and tag pair for opening and closing
and eliminates all occurrences of the tags and its text
in between. The tags must be passed as regex.
"""
tag_open, tag_close = tags_exp[0], tags_exp[1]
while True:
start_match = re.search(tag_open, text)
end_match = re.search(tag_close, text)
if not (start_match and end_match):
break
text = text[:start_match.start()] + " " + text[end_match.end():]
return text
def remove_single_tag(tag_exp, text):
"""
Receives a tag as regex and remove all occurrences in the text.
"""
while True:
matched = re.search(tag_exp, text)
if not matched: break
text = text[:matched.start()] + " " + text[matched.end():]
return text
def filter_by_words(questions_df, answers_df, simple_words, compound_words):
""" docstring """
matched_ids = []
not_matched_ids = []
simple_word_set = set(simple_words)
punctuation_rgx = r"[^()[\]<>+\-_=\*|\^{}$&%#@!?.,:;/\"]+"
for index, row in questions_df.iterrows():
print(index)
found_flag = False
title = row.Title.lower()
in_title_compound = [
True if re.compile(compound_word).search(title) else False
for compound_word in compound_words]
clean_text = re.findall(punctuation_rgx, title)
clean_text = [word for line in clean_text for word in line.split()]
clean_text = list(map(clean.remove_quotation_marks, clean_text))
simple_matched = simple_word_set.intersection(set(clean_text))
in_title_simple = [True] * len(simple_matched)
in_title = in_title_compound + in_title_simple
if any(in_title):
found_flag = True
else:
body = row.Body.lower()
in_body_compound = [
True if re.compile(compound_word).search(body) else False
for compound_word in compound_words]
clean_text = re.findall(punctuation_rgx, body)
clean_text = [word for line in clean_text for word in line.split()]
clean_text = list(map(clean.remove_quotation_marks, clean_text))
simple_matched = simple_word_set.intersection(set(clean_text))
in_body_simple = [True] * len(simple_matched)
in_body = in_body_compound + in_body_simple
if any(in_body):
found_flag = True
else:
answers = answers_df.loc[answers_df.ParentId == row.Id]
for idx, line in answers.iterrows():
answer = line.Body.lower()
in_answers_compound = [
True if re.compile(compound_word).search(answer) else
False for compound_word in compound_words]
clean_text = re.findall(punctuation_rgx, answer)
clean_text = [
word for line in clean_text for word in line.split()]
clean_text = list(
map(clean.remove_quotation_marks, clean_text))
simple_matched = simple_word_set.intersection(
set(clean_text))
in_answers_simple = [True] * len(simple_matched)
in_answers = in_answers_compound + in_answers_simple
if any(in_answers):
found_flag = True
break
if found_flag:
matched_ids.append(row.Id)
else:
not_matched_ids.append(row.Id)
return matched_ids, not_matched_ids
|
the-stack_0_6763 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 5 14:31:00 2016
@author: mtkessel
"""
import matplotlib.pyplot as plt
from numpy.random import random, randint
import pandas as pd
dates = [
1665,
1674,
1838,
1839,
1855]
values = [1,2,3,4,5]
X = dates #pd.to_datetime(dates)
fig, ax = plt.subplots(figsize=(6,1))
ax.scatter(X, [1]*len(X), c=values,
marker='s', s=100)
fig.autofmt_xdate()
# everything after this is turning off stuff that's plotted by default
ax.yaxis.set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.get_yaxis().set_ticklabels([])
day = 10 #pd.to_timedelta("1", unit='D')
plt.xlim(X[0] - day, X[-1] + day)
plt.show()
|
the-stack_0_6764 | from __future__ import annotations
from typing import Any, Callable
from sqlalchemy.ext.hybrid import hybrid_property
from .expression import Expression
from .resolver import AttributeResolver, PrefetchedAttributeResolver
from .typing import ColumnDefaults
class DerivedColumn:
def __init__(
self,
expression: Expression,
default: Any = None,
prefetch_attribute_names: bool = True,
):
self.expression = expression
self.default = default
if not prefetch_attribute_names:
self.resolver = AttributeResolver(expression.columns)
else:
self.resolver = PrefetchedAttributeResolver(expression.columns)
if len(self.expression.columns) > 1 and self.default is not None:
raise TypeError("Cannot use default for multi-column expression.")
def _default_functions(self) -> ColumnDefaults:
setter = self.default
if not callable(setter):
setter = lambda: self.default # noqa
return {True: setter, False: lambda: None}
def make_getter(self) -> Callable[[Any], Any]:
"""Returns a getter function, evaluating the expression in bound scope."""
evaluate = self.expression.evaluate
values = self.resolver.values
return lambda orm_obj: evaluate(values(orm_obj))
def make_setter(self) -> Callable[[Any, Any], None]:
"""Returns a setter function setting default values based on given booleans."""
defaults = self._default_functions()
target_name = self.resolver.single_name
def _fset(self: Any, value: Any) -> None:
if not isinstance(value, bool):
raise TypeError("Flag only accepts boolean values")
setattr(self, target_name(self), defaults[value]())
return _fset
def create_hybrid(self) -> hybrid_property:
return hybrid_property(
fget=self.make_getter(),
fset=self.make_setter() if self.default is not None else None,
expr=lambda cls: self.expression.sql,
)
|
the-stack_0_6765 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from azure.core.exceptions import HttpResponseError
class AccessPolicy(Model):
"""An Access policy.
:param start: the date-time the policy is active
:type start: str
:param expiry: the date-time the policy expires
:type expiry: str
:param permission: the permissions for the acl policy
:type permission: str
"""
_attribute_map = {
'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(AccessPolicy, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.expiry = kwargs.get('expiry', None)
self.permission = kwargs.get('permission', None)
class AppendPositionAccessConditions(Model):
"""Additional parameters for a set of operations, such as:
AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal.
:param max_size: Optional conditional header. The max length in bytes
permitted for the append blob. If the Append Block operation would cause
the blob to exceed that limit or if the blob size is already greater than
the value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition
Failed).
:type max_size: long
:param append_position: Optional conditional header, used only for the
Append Block operation. A number indicating the byte offset to compare.
Append Block will succeed only if the append position is equal to this
number. If it is not, the request will fail with the
AppendPositionConditionNotMet error (HTTP status code 412 - Precondition
Failed).
:type append_position: long
"""
_attribute_map = {
'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}},
'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(AppendPositionAccessConditions, self).__init__(**kwargs)
self.max_size = kwargs.get('max_size', None)
self.append_position = kwargs.get('append_position', None)
class BlobFlatListSegment(Model):
"""BlobFlatListSegment.
All required parameters must be populated in order to send to Azure.
:param blob_items: Required.
:type blob_items: list[~azure.storage.blob.models.BlobItemInternal]
"""
_validation = {
'blob_items': {'required': True},
}
_attribute_map = {
'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}},
}
_xml_map = {
'name': 'Blobs'
}
def __init__(self, **kwargs):
super(BlobFlatListSegment, self).__init__(**kwargs)
self.blob_items = kwargs.get('blob_items', None)
class BlobHierarchyListSegment(Model):
"""BlobHierarchyListSegment.
All required parameters must be populated in order to send to Azure.
:param blob_prefixes:
:type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
:param blob_items: Required.
:type blob_items: list[~azure.storage.blob.models.BlobItemInternal]
"""
_validation = {
'blob_items': {'required': True},
}
_attribute_map = {
'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}},
'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}},
}
_xml_map = {
'name': 'Blobs'
}
def __init__(self, **kwargs):
super(BlobHierarchyListSegment, self).__init__(**kwargs)
self.blob_prefixes = kwargs.get('blob_prefixes', None)
self.blob_items = kwargs.get('blob_items', None)
class BlobHTTPHeaders(Model):
"""Additional parameters for a set of operations.
:param blob_cache_control: Optional. Sets the blob's cache control. If
specified, this property is stored with the blob and returned with a read
request.
:type blob_cache_control: str
:param blob_content_type: Optional. Sets the blob's content type. If
specified, this property is stored with the blob and returned with a read
request.
:type blob_content_type: str
:param blob_content_md5: Optional. An MD5 hash of the blob content. Note
that this hash is not validated, as the hashes for the individual blocks
were validated when each was uploaded.
:type blob_content_md5: bytearray
:param blob_content_encoding: Optional. Sets the blob's content encoding.
If specified, this property is stored with the blob and returned with a
read request.
:type blob_content_encoding: str
:param blob_content_language: Optional. Set the blob's content language.
If specified, this property is stored with the blob and returned with a
read request.
:type blob_content_language: str
:param blob_content_disposition: Optional. Sets the blob's
Content-Disposition header.
:type blob_content_disposition: str
"""
_attribute_map = {
'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}},
'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}},
'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}},
'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}},
'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}},
'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(BlobHTTPHeaders, self).__init__(**kwargs)
self.blob_cache_control = kwargs.get('blob_cache_control', None)
self.blob_content_type = kwargs.get('blob_content_type', None)
self.blob_content_md5 = kwargs.get('blob_content_md5', None)
self.blob_content_encoding = kwargs.get('blob_content_encoding', None)
self.blob_content_language = kwargs.get('blob_content_language', None)
self.blob_content_disposition = kwargs.get('blob_content_disposition', None)
class BlobItemInternal(Model):
"""An Azure Storage blob.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param deleted: Required.
:type deleted: bool
:param snapshot: Required.
:type snapshot: str
:param version_id:
:type version_id: str
:param is_current_version:
:type is_current_version: bool
:param properties: Required.
:type properties: ~azure.storage.blob.models.BlobPropertiesInternal
:param metadata:
:type metadata: ~azure.storage.blob.models.BlobMetadata
:param blob_tags:
:type blob_tags: ~azure.storage.blob.models.BlobTags
:param object_replication_metadata:
:type object_replication_metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'deleted': {'required': True},
'snapshot': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}},
'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}},
'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}},
'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}},
'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}},
'object_replication_metadata': {'key': 'ObjectReplicationMetadata', 'type': '{str}', 'xml': {'name': 'ObjectReplicationMetadata'}},
}
_xml_map = {
'name': 'Blob'
}
def __init__(self, **kwargs):
super(BlobItemInternal, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.deleted = kwargs.get('deleted', None)
self.snapshot = kwargs.get('snapshot', None)
self.version_id = kwargs.get('version_id', None)
self.is_current_version = kwargs.get('is_current_version', None)
self.properties = kwargs.get('properties', None)
self.metadata = kwargs.get('metadata', None)
self.blob_tags = kwargs.get('blob_tags', None)
self.object_replication_metadata = kwargs.get('object_replication_metadata', None)
class BlobMetadata(Model):
"""BlobMetadata.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, str]
:param encrypted:
:type encrypted: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}},
'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}},
}
_xml_map = {
'name': 'Metadata'
}
def __init__(self, **kwargs):
super(BlobMetadata, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.encrypted = kwargs.get('encrypted', None)
class BlobPrefix(Model):
"""BlobPrefix.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(BlobPrefix, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class BlobPropertiesInternal(Model):
"""Properties of a blob.
All required parameters must be populated in order to send to Azure.
:param creation_time:
:type creation_time: datetime
:param last_modified: Required.
:type last_modified: datetime
:param etag: Required.
:type etag: str
:param content_length: Size in bytes
:type content_length: long
:param content_type:
:type content_type: str
:param content_encoding:
:type content_encoding: str
:param content_language:
:type content_language: str
:param content_md5:
:type content_md5: bytearray
:param content_disposition:
:type content_disposition: str
:param cache_control:
:type cache_control: str
:param blob_sequence_number:
:type blob_sequence_number: long
:param blob_type: Possible values include: 'BlockBlob', 'PageBlob',
'AppendBlob'
:type blob_type: str or ~azure.storage.blob.models.BlobType
:param lease_status: Possible values include: 'locked', 'unlocked'
:type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
:param lease_state: Possible values include: 'available', 'leased',
'expired', 'breaking', 'broken'
:type lease_state: str or ~azure.storage.blob.models.LeaseStateType
:param lease_duration: Possible values include: 'infinite', 'fixed'
:type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
:param copy_id:
:type copy_id: str
:param copy_status: Possible values include: 'pending', 'success',
'aborted', 'failed'
:type copy_status: str or ~azure.storage.blob.models.CopyStatusType
:param copy_source:
:type copy_source: str
:param copy_progress:
:type copy_progress: str
:param copy_completion_time:
:type copy_completion_time: datetime
:param copy_status_description:
:type copy_status_description: str
:param server_encrypted:
:type server_encrypted: bool
:param incremental_copy:
:type incremental_copy: bool
:param destination_snapshot:
:type destination_snapshot: str
:param deleted_time:
:type deleted_time: datetime
:param remaining_retention_days:
:type remaining_retention_days: int
:param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15',
'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
:type access_tier: str or ~azure.storage.blob.models.AccessTier
:param access_tier_inferred:
:type access_tier_inferred: bool
:param archive_status: Possible values include:
'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool'
:type archive_status: str or ~azure.storage.blob.models.ArchiveStatus
:param customer_provided_key_sha256:
:type customer_provided_key_sha256: str
:param encryption_scope: The name of the encryption scope under which the
blob is encrypted.
:type encryption_scope: str
:param access_tier_change_time:
:type access_tier_change_time: datetime
:param tag_count:
:type tag_count: int
:param expires_on:
:type expires_on: datetime
:param is_sealed:
:type is_sealed: bool
:param rehydrate_priority: Possible values include: 'High', 'Standard'
:type rehydrate_priority: str or
~azure.storage.blob.models.RehydratePriority
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
}
_attribute_map = {
'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}},
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}},
'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}},
'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}},
'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}},
'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}},
'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}},
'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}},
'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}},
'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}},
'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}},
'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}},
'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}},
'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}},
'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}},
'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}},
'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}},
'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}},
'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}},
'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}},
'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}},
'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}},
'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}},
'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}},
'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}},
'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}},
'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}},
}
_xml_map = {
'name': 'Properties'
}
def __init__(self, **kwargs):
super(BlobPropertiesInternal, self).__init__(**kwargs)
self.creation_time = kwargs.get('creation_time', None)
self.last_modified = kwargs.get('last_modified', None)
self.etag = kwargs.get('etag', None)
self.content_length = kwargs.get('content_length', None)
self.content_type = kwargs.get('content_type', None)
self.content_encoding = kwargs.get('content_encoding', None)
self.content_language = kwargs.get('content_language', None)
self.content_md5 = kwargs.get('content_md5', None)
self.content_disposition = kwargs.get('content_disposition', None)
self.cache_control = kwargs.get('cache_control', None)
self.blob_sequence_number = kwargs.get('blob_sequence_number', None)
self.blob_type = kwargs.get('blob_type', None)
self.lease_status = kwargs.get('lease_status', None)
self.lease_state = kwargs.get('lease_state', None)
self.lease_duration = kwargs.get('lease_duration', None)
self.copy_id = kwargs.get('copy_id', None)
self.copy_status = kwargs.get('copy_status', None)
self.copy_source = kwargs.get('copy_source', None)
self.copy_progress = kwargs.get('copy_progress', None)
self.copy_completion_time = kwargs.get('copy_completion_time', None)
self.copy_status_description = kwargs.get('copy_status_description', None)
self.server_encrypted = kwargs.get('server_encrypted', None)
self.incremental_copy = kwargs.get('incremental_copy', None)
self.destination_snapshot = kwargs.get('destination_snapshot', None)
self.deleted_time = kwargs.get('deleted_time', None)
self.remaining_retention_days = kwargs.get('remaining_retention_days', None)
self.access_tier = kwargs.get('access_tier', None)
self.access_tier_inferred = kwargs.get('access_tier_inferred', None)
self.archive_status = kwargs.get('archive_status', None)
self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None)
self.encryption_scope = kwargs.get('encryption_scope', None)
self.access_tier_change_time = kwargs.get('access_tier_change_time', None)
self.tag_count = kwargs.get('tag_count', None)
self.expires_on = kwargs.get('expires_on', None)
self.is_sealed = kwargs.get('is_sealed', None)
self.rehydrate_priority = kwargs.get('rehydrate_priority', None)
class BlobTag(Model):
"""BlobTag.
All required parameters must be populated in order to send to Azure.
:param key: Required.
:type key: str
:param value: Required.
:type value: str
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}},
'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
}
_xml_map = {
'name': 'Tag'
}
def __init__(self, **kwargs):
super(BlobTag, self).__init__(**kwargs)
self.key = kwargs.get('key', None)
self.value = kwargs.get('value', None)
class BlobTags(Model):
"""Blob tags.
All required parameters must be populated in order to send to Azure.
:param blob_tag_set: Required.
:type blob_tag_set: list[~azure.storage.blob.models.BlobTag]
"""
_validation = {
'blob_tag_set': {'required': True},
}
_attribute_map = {
'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}},
}
_xml_map = {
'name': 'Tags'
}
def __init__(self, **kwargs):
super(BlobTags, self).__init__(**kwargs)
self.blob_tag_set = kwargs.get('blob_tag_set', None)
class Block(Model):
"""Represents a single block in a block blob. It describes the block's ID and
size.
All required parameters must be populated in order to send to Azure.
:param name: Required. The base64 encoded block ID.
:type name: str
:param size: Required. The block size in bytes.
:type size: int
"""
_validation = {
'name': {'required': True},
'size': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(Block, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.size = kwargs.get('size', None)
class BlockList(Model):
"""BlockList.
:param committed_blocks:
:type committed_blocks: list[~azure.storage.blob.models.Block]
:param uncommitted_blocks:
:type uncommitted_blocks: list[~azure.storage.blob.models.Block]
"""
_attribute_map = {
'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(BlockList, self).__init__(**kwargs)
self.committed_blocks = kwargs.get('committed_blocks', None)
self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None)
class BlockLookupList(Model):
"""BlockLookupList.
:param committed:
:type committed: list[str]
:param uncommitted:
:type uncommitted: list[str]
:param latest:
:type latest: list[str]
"""
_attribute_map = {
'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}},
'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}},
'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}},
}
_xml_map = {
'name': 'BlockList'
}
def __init__(self, **kwargs):
super(BlockLookupList, self).__init__(**kwargs)
self.committed = kwargs.get('committed', None)
self.uncommitted = kwargs.get('uncommitted', None)
self.latest = kwargs.get('latest', None)
class ClearRange(Model):
"""ClearRange.
All required parameters must be populated in order to send to Azure.
:param start: Required.
:type start: long
:param end: Required.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
}
_xml_map = {
'name': 'ClearRange'
}
def __init__(self, **kwargs):
super(ClearRange, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
class ContainerCpkScopeInfo(Model):
"""Additional parameters for create operation.
:param default_encryption_scope: Optional. Version 2019-07-07 and later.
Specifies the default encryption scope to set on the container and use for
all future writes.
:type default_encryption_scope: str
:param prevent_encryption_scope_override: Optional. Version 2019-07-07
and newer. If true, prevents any request from specifying a different
encryption scope than the scope set on the container.
:type prevent_encryption_scope_override: bool
"""
_attribute_map = {
'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}},
'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(ContainerCpkScopeInfo, self).__init__(**kwargs)
self.default_encryption_scope = kwargs.get('default_encryption_scope', None)
self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None)
class ContainerItem(Model):
"""An Azure Storage container.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param deleted:
:type deleted: bool
:param version:
:type version: str
:param properties: Required.
:type properties: ~azure.storage.blob.models.ContainerProperties
:param metadata:
:type metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}},
'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
}
_xml_map = {
'name': 'Container'
}
def __init__(self, **kwargs):
super(ContainerItem, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.deleted = kwargs.get('deleted', None)
self.version = kwargs.get('version', None)
self.properties = kwargs.get('properties', None)
self.metadata = kwargs.get('metadata', None)
class ContainerProperties(Model):
"""Properties of a container.
All required parameters must be populated in order to send to Azure.
:param last_modified: Required.
:type last_modified: datetime
:param etag: Required.
:type etag: str
:param lease_status: Possible values include: 'locked', 'unlocked'
:type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
:param lease_state: Possible values include: 'available', 'leased',
'expired', 'breaking', 'broken'
:type lease_state: str or ~azure.storage.blob.models.LeaseStateType
:param lease_duration: Possible values include: 'infinite', 'fixed'
:type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
:param public_access: Possible values include: 'container', 'blob'
:type public_access: str or ~azure.storage.blob.models.PublicAccessType
:param has_immutability_policy:
:type has_immutability_policy: bool
:param has_legal_hold:
:type has_legal_hold: bool
:param default_encryption_scope:
:type default_encryption_scope: str
:param prevent_encryption_scope_override:
:type prevent_encryption_scope_override: bool
:param deleted_time:
:type deleted_time: datetime
:param remaining_retention_days:
:type remaining_retention_days: int
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
}
_attribute_map = {
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}},
'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}},
'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}},
'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}},
'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(ContainerProperties, self).__init__(**kwargs)
self.last_modified = kwargs.get('last_modified', None)
self.etag = kwargs.get('etag', None)
self.lease_status = kwargs.get('lease_status', None)
self.lease_state = kwargs.get('lease_state', None)
self.lease_duration = kwargs.get('lease_duration', None)
self.public_access = kwargs.get('public_access', None)
self.has_immutability_policy = kwargs.get('has_immutability_policy', None)
self.has_legal_hold = kwargs.get('has_legal_hold', None)
self.default_encryption_scope = kwargs.get('default_encryption_scope', None)
self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None)
self.deleted_time = kwargs.get('deleted_time', None)
self.remaining_retention_days = kwargs.get('remaining_retention_days', None)
class CorsRule(Model):
"""CORS is an HTTP feature that enables a web application running under one
domain to access resources in another domain. Web browsers implement a
security restriction known as same-origin policy that prevents a web page
from calling APIs in a different domain; CORS provides a secure way to
allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param allowed_origins: Required. The origin domains that are permitted to
make a request against the storage service via CORS. The origin domain is
the domain from which the request originates. Note that the origin must be
an exact case-sensitive match with the origin that the user age sends to
the service. You can also use the wildcard character '*' to allow all
origin domains to make requests via CORS.
:type allowed_origins: str
:param allowed_methods: Required. The methods (HTTP request verbs) that
the origin domain may use for a CORS request. (comma separated)
:type allowed_methods: str
:param allowed_headers: Required. the request headers that the origin
domain may specify on the CORS request.
:type allowed_headers: str
:param exposed_headers: Required. The response headers that may be sent in
the response to the CORS request and exposed by the browser to the request
issuer
:type exposed_headers: str
:param max_age_in_seconds: Required. The maximum amount time that a
browser should cache the preflight OPTIONS request.
:type max_age_in_seconds: int
"""
_validation = {
'allowed_origins': {'required': True},
'allowed_methods': {'required': True},
'allowed_headers': {'required': True},
'exposed_headers': {'required': True},
'max_age_in_seconds': {'required': True, 'minimum': 0},
}
_attribute_map = {
'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(CorsRule, self).__init__(**kwargs)
self.allowed_origins = kwargs.get('allowed_origins', None)
self.allowed_methods = kwargs.get('allowed_methods', None)
self.allowed_headers = kwargs.get('allowed_headers', None)
self.exposed_headers = kwargs.get('exposed_headers', None)
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
class CpkInfo(Model):
"""Additional parameters for a set of operations.
:param encryption_key: Optional. Specifies the encryption key to use to
encrypt the data provided in the request. If not specified, encryption is
performed with the root account encryption key. For more information, see
Encryption at Rest for Azure Storage Services.
:type encryption_key: str
:param encryption_key_sha256: The SHA-256 hash of the provided encryption
key. Must be provided if the x-ms-encryption-key header is provided.
:type encryption_key_sha256: str
:param encryption_algorithm: The algorithm used to produce the encryption
key hash. Currently, the only accepted value is "AES256". Must be provided
if the x-ms-encryption-key header is provided. Possible values include:
'AES256'
:type encryption_algorithm: str or
~azure.storage.blob.models.EncryptionAlgorithmType
"""
_attribute_map = {
'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}},
'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}},
'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(CpkInfo, self).__init__(**kwargs)
self.encryption_key = kwargs.get('encryption_key', None)
self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None)
self.encryption_algorithm = kwargs.get('encryption_algorithm', None)
class CpkScopeInfo(Model):
"""Additional parameters for a set of operations.
:param encryption_scope: Optional. Version 2019-07-07 and later.
Specifies the name of the encryption scope to use to encrypt the data
provided in the request. If not specified, encryption is performed with
the default account encryption scope. For more information, see
Encryption at Rest for Azure Storage Services.
:type encryption_scope: str
"""
_attribute_map = {
'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(CpkScopeInfo, self).__init__(**kwargs)
self.encryption_scope = kwargs.get('encryption_scope', None)
class DataLakeStorageError(Model):
"""DataLakeStorageError.
:param data_lake_storage_error_details: The service error response object.
:type data_lake_storage_error_details:
~azure.storage.blob.models.DataLakeStorageErrorError
"""
_attribute_map = {
'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(DataLakeStorageError, self).__init__(**kwargs)
self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None)
class DataLakeStorageErrorException(HttpResponseError):
"""Server responsed with exception of type: 'DataLakeStorageError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, response, deserialize, *args):
model_name = 'DataLakeStorageError'
self.error = deserialize(model_name, response)
if self.error is None:
self.error = deserialize.dependencies[model_name]()
super(DataLakeStorageErrorException, self).__init__(response=response)
class DataLakeStorageErrorError(Model):
"""The service error response object.
:param code: The service error code.
:type code: str
:param message: The service error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}},
'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(DataLakeStorageErrorError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class DelimitedTextConfiguration(Model):
"""delimited text configuration.
All required parameters must be populated in order to send to Azure.
:param column_separator: Required. column separator
:type column_separator: str
:param field_quote: Required. field quote
:type field_quote: str
:param record_separator: Required. record separator
:type record_separator: str
:param escape_char: Required. escape char
:type escape_char: str
:param headers_present: Required. has headers
:type headers_present: bool
"""
_validation = {
'column_separator': {'required': True},
'field_quote': {'required': True},
'record_separator': {'required': True},
'escape_char': {'required': True},
'headers_present': {'required': True},
}
_attribute_map = {
'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}},
'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}},
'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}},
'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}},
'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}},
}
_xml_map = {
'name': 'DelimitedTextConfiguration'
}
def __init__(self, **kwargs):
super(DelimitedTextConfiguration, self).__init__(**kwargs)
self.column_separator = kwargs.get('column_separator', None)
self.field_quote = kwargs.get('field_quote', None)
self.record_separator = kwargs.get('record_separator', None)
self.escape_char = kwargs.get('escape_char', None)
self.headers_present = kwargs.get('headers_present', None)
class DirectoryHttpHeaders(Model):
"""Additional parameters for a set of operations, such as: Directory_create,
Directory_rename, Blob_rename.
:param cache_control: Cache control for given resource
:type cache_control: str
:param content_type: Content type for given resource
:type content_type: str
:param content_encoding: Content encoding for given resource
:type content_encoding: str
:param content_language: Content language for given resource
:type content_language: str
:param content_disposition: Content disposition for given resource
:type content_disposition: str
"""
_attribute_map = {
'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}},
'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}},
'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}},
'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}},
'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(DirectoryHttpHeaders, self).__init__(**kwargs)
self.cache_control = kwargs.get('cache_control', None)
self.content_type = kwargs.get('content_type', None)
self.content_encoding = kwargs.get('content_encoding', None)
self.content_language = kwargs.get('content_language', None)
self.content_disposition = kwargs.get('content_disposition', None)
class FilterBlobItem(Model):
"""Blob info from a Filter Blobs API call.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param container_name: Required.
:type container_name: str
:param tag_value: Required.
:type tag_value: str
"""
_validation = {
'name': {'required': True},
'container_name': {'required': True},
'tag_value': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}},
'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}},
}
_xml_map = {
'name': 'Blob'
}
def __init__(self, **kwargs):
super(FilterBlobItem, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.container_name = kwargs.get('container_name', None)
self.tag_value = kwargs.get('tag_value', None)
class FilterBlobSegment(Model):
"""The result of a Filter Blobs API call.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param where: Required.
:type where: str
:param blobs: Required.
:type blobs: list[~azure.storage.blob.models.FilterBlobItem]
:param next_marker:
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'where': {'required': True},
'blobs': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}},
'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, **kwargs):
super(FilterBlobSegment, self).__init__(**kwargs)
self.service_endpoint = kwargs.get('service_endpoint', None)
self.where = kwargs.get('where', None)
self.blobs = kwargs.get('blobs', None)
self.next_marker = kwargs.get('next_marker', None)
class GeoReplication(Model):
"""Geo-Replication information for the Secondary Storage Service.
All required parameters must be populated in order to send to Azure.
:param status: Required. The status of the secondary location. Possible
values include: 'live', 'bootstrap', 'unavailable'
:type status: str or ~azure.storage.blob.models.GeoReplicationStatusType
:param last_sync_time: Required. A GMT date/time value, to the second. All
primary writes preceding this value are guaranteed to be available for
read operations at the secondary. Primary writes after this point in time
may or may not be available for reads.
:type last_sync_time: datetime
"""
_validation = {
'status': {'required': True},
'last_sync_time': {'required': True},
}
_attribute_map = {
'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(GeoReplication, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.last_sync_time = kwargs.get('last_sync_time', None)
class JsonTextConfiguration(Model):
"""json text configuration.
All required parameters must be populated in order to send to Azure.
:param record_separator: Required. record separator
:type record_separator: str
"""
_validation = {
'record_separator': {'required': True},
}
_attribute_map = {
'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}},
}
_xml_map = {
'name': 'JsonTextConfiguration'
}
def __init__(self, **kwargs):
super(JsonTextConfiguration, self).__init__(**kwargs)
self.record_separator = kwargs.get('record_separator', None)
class KeyInfo(Model):
"""Key information.
All required parameters must be populated in order to send to Azure.
:param start: Required. The date-time the key is active in ISO 8601 UTC
time
:type start: str
:param expiry: Required. The date-time the key expires in ISO 8601 UTC
time
:type expiry: str
"""
_validation = {
'start': {'required': True},
'expiry': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(KeyInfo, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.expiry = kwargs.get('expiry', None)
class LeaseAccessConditions(Model):
"""Additional parameters for a set of operations.
:param lease_id: If specified, the operation only succeeds if the
resource's lease is active and matches this ID.
:type lease_id: str
"""
_attribute_map = {
'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(LeaseAccessConditions, self).__init__(**kwargs)
self.lease_id = kwargs.get('lease_id', None)
class ListBlobsFlatSegmentResponse(Model):
"""An enumeration of blobs.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param container_name: Required.
:type container_name: str
:param prefix:
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param segment: Required.
:type segment: ~azure.storage.blob.models.BlobFlatListSegment
:param next_marker:
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'container_name': {'required': True},
'segment': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, **kwargs):
super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs)
self.service_endpoint = kwargs.get('service_endpoint', None)
self.container_name = kwargs.get('container_name', None)
self.prefix = kwargs.get('prefix', None)
self.marker = kwargs.get('marker', None)
self.max_results = kwargs.get('max_results', None)
self.segment = kwargs.get('segment', None)
self.next_marker = kwargs.get('next_marker', None)
class ListBlobsHierarchySegmentResponse(Model):
"""An enumeration of blobs.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param container_name: Required.
:type container_name: str
:param prefix:
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param delimiter:
:type delimiter: str
:param segment: Required.
:type segment: ~azure.storage.blob.models.BlobHierarchyListSegment
:param next_marker:
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'container_name': {'required': True},
'segment': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}},
'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, **kwargs):
super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
self.service_endpoint = kwargs.get('service_endpoint', None)
self.container_name = kwargs.get('container_name', None)
self.prefix = kwargs.get('prefix', None)
self.marker = kwargs.get('marker', None)
self.max_results = kwargs.get('max_results', None)
self.delimiter = kwargs.get('delimiter', None)
self.segment = kwargs.get('segment', None)
self.next_marker = kwargs.get('next_marker', None)
class ListContainersSegmentResponse(Model):
"""An enumeration of containers.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param prefix:
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param container_items: Required.
:type container_items: list[~azure.storage.blob.models.ContainerItem]
:param next_marker:
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'container_items': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, **kwargs):
super(ListContainersSegmentResponse, self).__init__(**kwargs)
self.service_endpoint = kwargs.get('service_endpoint', None)
self.prefix = kwargs.get('prefix', None)
self.marker = kwargs.get('marker', None)
self.max_results = kwargs.get('max_results', None)
self.container_items = kwargs.get('container_items', None)
self.next_marker = kwargs.get('next_marker', None)
class Logging(Model):
"""Azure Analytics Logging settings.
All required parameters must be populated in order to send to Azure.
:param version: Required. The version of Storage Analytics to configure.
:type version: str
:param delete: Required. Indicates whether all delete requests should be
logged.
:type delete: bool
:param read: Required. Indicates whether all read requests should be
logged.
:type read: bool
:param write: Required. Indicates whether all write requests should be
logged.
:type write: bool
:param retention_policy: Required.
:type retention_policy: ~azure.storage.blob.models.RetentionPolicy
"""
_validation = {
'version': {'required': True},
'delete': {'required': True},
'read': {'required': True},
'write': {'required': True},
'retention_policy': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(Logging, self).__init__(**kwargs)
self.version = kwargs.get('version', None)
self.delete = kwargs.get('delete', None)
self.read = kwargs.get('read', None)
self.write = kwargs.get('write', None)
self.retention_policy = kwargs.get('retention_policy', None)
class Metrics(Model):
"""a summary of request statistics grouped by API in hour or minute aggregates
for blobs.
All required parameters must be populated in order to send to Azure.
:param version: The version of Storage Analytics to configure.
:type version: str
:param enabled: Required. Indicates whether metrics are enabled for the
Blob service.
:type enabled: bool
:param include_apis: Indicates whether metrics should generate summary
statistics for called API operations.
:type include_apis: bool
:param retention_policy:
:type retention_policy: ~azure.storage.blob.models.RetentionPolicy
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(Metrics, self).__init__(**kwargs)
self.version = kwargs.get('version', None)
self.enabled = kwargs.get('enabled', None)
self.include_apis = kwargs.get('include_apis', None)
self.retention_policy = kwargs.get('retention_policy', None)
class ModifiedAccessConditions(Model):
"""Additional parameters for a set of operations.
:param if_modified_since: Specify this header value to operate only on a
blob if it has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header value to operate only on a
blob if it has not been modified since the specified date/time.
:type if_unmodified_since: datetime
:param if_match: Specify an ETag value to operate only on blobs with a
matching value.
:type if_match: str
:param if_none_match: Specify an ETag value to operate only on blobs
without a matching value.
:type if_none_match: str
:param if_tags: Specify a SQL where clause on blob tags to operate only on
blobs with a matching value.
:type if_tags: str
"""
_attribute_map = {
'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}},
'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}},
'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}},
'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(ModifiedAccessConditions, self).__init__(**kwargs)
self.if_modified_since = kwargs.get('if_modified_since', None)
self.if_unmodified_since = kwargs.get('if_unmodified_since', None)
self.if_match = kwargs.get('if_match', None)
self.if_none_match = kwargs.get('if_none_match', None)
self.if_tags = kwargs.get('if_tags', None)
class PageList(Model):
"""the list of pages.
:param page_range:
:type page_range: list[~azure.storage.blob.models.PageRange]
:param clear_range:
:type clear_range: list[~azure.storage.blob.models.ClearRange]
"""
_attribute_map = {
'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}},
'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(PageList, self).__init__(**kwargs)
self.page_range = kwargs.get('page_range', None)
self.clear_range = kwargs.get('clear_range', None)
class PageRange(Model):
"""PageRange.
All required parameters must be populated in order to send to Azure.
:param start: Required.
:type start: long
:param end: Required.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
}
_xml_map = {
'name': 'PageRange'
}
def __init__(self, **kwargs):
super(PageRange, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
class QueryFormat(Model):
"""QueryFormat.
:param type: Possible values include: 'delimited', 'json'
:type type: str or ~azure.storage.blob.models.QueryFormatType
:param delimited_text_configuration:
:type delimited_text_configuration:
~azure.storage.blob.models.DelimitedTextConfiguration
:param json_text_configuration:
:type json_text_configuration:
~azure.storage.blob.models.JsonTextConfiguration
"""
_attribute_map = {
'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}},
'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}},
'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(QueryFormat, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None)
self.json_text_configuration = kwargs.get('json_text_configuration', None)
class QueryRequest(Model):
"""the quick query body.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar query_type: Required. the query type. Default value: "SQL" .
:vartype query_type: str
:param expression: Required. a query statement
:type expression: str
:param input_serialization:
:type input_serialization: ~azure.storage.blob.models.QuerySerialization
:param output_serialization:
:type output_serialization: ~azure.storage.blob.models.QuerySerialization
"""
_validation = {
'query_type': {'required': True, 'constant': True},
'expression': {'required': True},
}
_attribute_map = {
'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}},
'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}},
'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}},
'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}},
}
_xml_map = {
'name': 'QueryRequest'
}
query_type = "SQL"
def __init__(self, **kwargs):
super(QueryRequest, self).__init__(**kwargs)
self.expression = kwargs.get('expression', None)
self.input_serialization = kwargs.get('input_serialization', None)
self.output_serialization = kwargs.get('output_serialization', None)
class QuerySerialization(Model):
"""QuerySerialization.
All required parameters must be populated in order to send to Azure.
:param format: Required.
:type format: ~azure.storage.blob.models.QueryFormat
"""
_validation = {
'format': {'required': True},
}
_attribute_map = {
'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(QuerySerialization, self).__init__(**kwargs)
self.format = kwargs.get('format', None)
class RetentionPolicy(Model):
"""the retention policy which determines how long the associated data should
persist.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Indicates whether a retention policy is enabled
for the storage service
:type enabled: bool
:param days: Indicates the number of days that metrics or logging or
soft-deleted data should be retained. All data older than this value will
be deleted
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'minimum': 1},
}
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.days = kwargs.get('days', None)
class SequenceNumberAccessConditions(Model):
"""Additional parameters for a set of operations, such as:
PageBlob_upload_pages, PageBlob_clear_pages,
PageBlob_upload_pages_from_url.
:param if_sequence_number_less_than_or_equal_to: Specify this header value
to operate only on a blob if it has a sequence number less than or equal
to the specified.
:type if_sequence_number_less_than_or_equal_to: long
:param if_sequence_number_less_than: Specify this header value to operate
only on a blob if it has a sequence number less than the specified.
:type if_sequence_number_less_than: long
:param if_sequence_number_equal_to: Specify this header value to operate
only on a blob if it has the specified sequence number.
:type if_sequence_number_equal_to: long
"""
_attribute_map = {
'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}},
'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}},
'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(SequenceNumberAccessConditions, self).__init__(**kwargs)
self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None)
self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None)
self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None)
class SignedIdentifier(Model):
"""signed identifier.
All required parameters must be populated in order to send to Azure.
:param id: Required. a unique id
:type id: str
:param access_policy:
:type access_policy: ~azure.storage.blob.models.AccessPolicy
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
}
_xml_map = {
'name': 'SignedIdentifier'
}
def __init__(self, **kwargs):
super(SignedIdentifier, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.access_policy = kwargs.get('access_policy', None)
class SourceModifiedAccessConditions(Model):
"""Additional parameters for a set of operations.
:param source_if_modified_since: Specify this header value to operate only
on a blob if it has been modified since the specified date/time.
:type source_if_modified_since: datetime
:param source_if_unmodified_since: Specify this header value to operate
only on a blob if it has not been modified since the specified date/time.
:type source_if_unmodified_since: datetime
:param source_if_match: Specify an ETag value to operate only on blobs
with a matching value.
:type source_if_match: str
:param source_if_none_match: Specify an ETag value to operate only on
blobs without a matching value.
:type source_if_none_match: str
:param source_if_tags: Specify a SQL where clause on blob tags to operate
only on blobs with a matching value.
:type source_if_tags: str
"""
_attribute_map = {
'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}},
'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}},
'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}},
'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}},
'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(SourceModifiedAccessConditions, self).__init__(**kwargs)
self.source_if_modified_since = kwargs.get('source_if_modified_since', None)
self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None)
self.source_if_match = kwargs.get('source_if_match', None)
self.source_if_none_match = kwargs.get('source_if_none_match', None)
self.source_if_tags = kwargs.get('source_if_tags', None)
class StaticWebsite(Model):
"""The properties that enable an account to host a static website.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Indicates whether this account is hosting a
static website
:type enabled: bool
:param index_document: The default name of the index page under each
directory
:type index_document: str
:param error_document404_path: The absolute path of the custom 404 page
:type error_document404_path: str
:param default_index_document_path: Absolute path of the default index
page
:type default_index_document_path: str
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}},
'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}},
'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(StaticWebsite, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.index_document = kwargs.get('index_document', None)
self.error_document404_path = kwargs.get('error_document404_path', None)
self.default_index_document_path = kwargs.get('default_index_document_path', None)
class StorageError(Model):
"""StorageError.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(StorageError, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
class StorageErrorException(HttpResponseError):
"""Server responsed with exception of type: 'StorageError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, response, deserialize, *args):
model_name = 'StorageError'
self.error = deserialize(model_name, response)
if self.error is None:
self.error = deserialize.dependencies[model_name]()
super(StorageErrorException, self).__init__(response=response)
class StorageServiceProperties(Model):
"""Storage Service Properties.
:param logging:
:type logging: ~azure.storage.blob.models.Logging
:param hour_metrics:
:type hour_metrics: ~azure.storage.blob.models.Metrics
:param minute_metrics:
:type minute_metrics: ~azure.storage.blob.models.Metrics
:param cors: The set of CORS rules.
:type cors: list[~azure.storage.blob.models.CorsRule]
:param default_service_version: The default version to use for requests to
the Blob service if an incoming request's version is not specified.
Possible values include version 2008-10-27 and all more recent versions
:type default_service_version: str
:param delete_retention_policy:
:type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
:param static_website:
:type static_website: ~azure.storage.blob.models.StaticWebsite
"""
_attribute_map = {
'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}},
'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}},
'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(StorageServiceProperties, self).__init__(**kwargs)
self.logging = kwargs.get('logging', None)
self.hour_metrics = kwargs.get('hour_metrics', None)
self.minute_metrics = kwargs.get('minute_metrics', None)
self.cors = kwargs.get('cors', None)
self.default_service_version = kwargs.get('default_service_version', None)
self.delete_retention_policy = kwargs.get('delete_retention_policy', None)
self.static_website = kwargs.get('static_website', None)
class StorageServiceStats(Model):
"""Stats for the storage service.
:param geo_replication:
:type geo_replication: ~azure.storage.blob.models.GeoReplication
"""
_attribute_map = {
'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(StorageServiceStats, self).__init__(**kwargs)
self.geo_replication = kwargs.get('geo_replication', None)
class UserDelegationKey(Model):
"""A user delegation key.
All required parameters must be populated in order to send to Azure.
:param signed_oid: Required. The Azure Active Directory object ID in GUID
format.
:type signed_oid: str
:param signed_tid: Required. The Azure Active Directory tenant ID in GUID
format
:type signed_tid: str
:param signed_start: Required. The date-time the key is active
:type signed_start: datetime
:param signed_expiry: Required. The date-time the key expires
:type signed_expiry: datetime
:param signed_service: Required. Abbreviation of the Azure Storage service
that accepts the key
:type signed_service: str
:param signed_version: Required. The service version that created the key
:type signed_version: str
:param value: Required. The key as a base64 string
:type value: str
"""
_validation = {
'signed_oid': {'required': True},
'signed_tid': {'required': True},
'signed_start': {'required': True},
'signed_expiry': {'required': True},
'signed_service': {'required': True},
'signed_version': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}},
'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}},
'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}},
'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}},
'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}},
'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}},
'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
}
_xml_map = {
}
def __init__(self, **kwargs):
super(UserDelegationKey, self).__init__(**kwargs)
self.signed_oid = kwargs.get('signed_oid', None)
self.signed_tid = kwargs.get('signed_tid', None)
self.signed_start = kwargs.get('signed_start', None)
self.signed_expiry = kwargs.get('signed_expiry', None)
self.signed_service = kwargs.get('signed_service', None)
self.signed_version = kwargs.get('signed_version', None)
self.value = kwargs.get('value', None)
|
the-stack_0_6768 | import pygame
from pygame.cursors import tri_left
import pygame_gui
import time
import serial.tools.list_ports
import os, sys
import math
from collections import deque
from pygame_gui import UIManager
from pygame_gui.elements import UIButton
from pygame_gui.elements import UITextEntryLine
from pygame_gui.elements import UIDropDownMenu
from pygame_gui.elements import UILabel
from pygame_gui.elements.ui_text_box import UITextBox
from pygame_gui.windows import UIConfirmationDialog
from serial import *
from pathlib import Path
def find_data_file(filename):
if getattr(sys, "frozen", False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(__file__)
return os.path.join(datadir, filename)
try: # Needed for macOS "py2app"
base_path = Path(__file__).parent
image_path = (base_path / "./PTSApp-Icon.png").resolve()
gameIcon = pygame.image.load(image_path)
pygame.display.set_icon(gameIcon)
except: # Needed for Windows "cx_freeze"
imageFile = "PTSApp-Icon.png"
imageFilePath = find_data_file(imageFile)
gameIcon = pygame.image.load(imageFilePath)
pygame.display.set_icon(gameIcon)
pygame.font.init()
myfont = pygame.font.SysFont('Trebuchet MS', 30)
myfontsmall = pygame.font.SysFont('Trebuchet MS', 20)
clk = pygame.time.Clock()
interval = 200
intervalReport = 100
baudRate = 38400 #57600 or 38400
speedFastX = 's20'
speedFastY = 'S20'
speedFastZ = 'X60'
speedSlowX = 's10'
speedSlowY = 'S10'
speedSlowZ = 'X30'
ser = ''
serBuffer = ''
serialText = ''
joystick = ''
joystickName = ''
button0Pressed = False
button1Pressed = False
button2Pressed = False
button3Pressed = False
button4Pressed = False
button5Pressed = False
button6Pressed = False
button7Pressed = False
button8Pressed = False
button9Pressed = False
button10Pressed = False
button11Pressed = False
button12Pressed = False
button13Pressed = False
button14Pressed = False
button15Pressed = False
button16Pressed = False
pos1set = False
pos2set = False
pos3set = False
pos4set = False
pos5set = False
pos6set = False
pos1run = False
pos2run = False
pos3run = False
pos4run = False
pos5run = False
pos6run = False
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
speedIsFast = True
speedRec = False
blinkSet = False
canSendReport = False
textBoxJoystickNames = None
joyCircle_draging = False
sliderCircle_draging = False
offset_x = 0.0
sliderOffset_x = 0.0
offset_y = 0.0
sliderOffset_y = 0.0
textBoxSerial = None
arr = []
oldAxisX = 0
oldAxisY = 0
oldAxisZ = 0
axisX = 0
axisY = 0
axisZ = 0
data = bytearray(7)
hat = ()
oldHatX = 0
oldHatY = 0
previousTime = time.time()
RED = (255, 0, 0)
GREEN = (0, 255, 0)
OFF = (33, 40, 45)
mouseBorder = 360
radius = 15
mouseMoving = False
joyXreadDOT = 0.0
joyYreadDOT = 0.0
joyZreadDOT = 0.0
panKeyPresseed = False
tiltKeyPresseed = False
sliderKeyPresseed = False
isZooming = False
colour_light = (99,104,107)
colour_dark = (76,80,82)
colour = (255,255,255)
zoomINtext = myfontsmall.render('IN' , True , colour)
zoomOUTtext = myfontsmall.render('OUT' , True , colour)
textsurfaceW = myfont.render('w', False, (89, 89, 89))
textsurfaceA = myfont.render('a', False, (89, 89, 89))
textsurfaceS = myfont.render('s', False, (89, 89, 89))
textsurfaceD = myfont.render('d', False, (89, 89, 89))
textsurfaceLeft = myfont.render(',', False, (89, 89, 89))
textsurfaceRight = myfont.render('.', False, (89, 89, 89))
resolution = (1200, 660)
fullscreen = False
pygame.init()
pygame.display.set_caption("PTSApp")
previousTicks = pygame.time.get_ticks() + interval
previousTicksReport = pygame.time.get_ticks() + intervalReport
def sendUP1():
temp='^T1'
sendSerial(temp)
def sendDOWN1():
temp='^T-1'
sendSerial(temp)
def sendLEFT1():
temp='^P-0.5'
sendSerial(temp)
def sendRIGHT1():
temp='^P0.5'
sendSerial(temp)
def sendUP10():
temp='^T10'
sendSerial(temp)
def sendDOWN10():
temp='^T-10'
sendSerial(temp)
def sendLEFT10():
temp='^P-10'
sendSerial(temp)
def sendRIGHT10():
temp='^P10'
sendSerial(temp)
def sendRESETpos():
temp='^h'
sendSerial(temp)
def sendSR1():
temp='^L10'
sendSerial(temp)
def sendSR10():
temp='^L100'
sendSerial(temp)
def sendSL1():
temp='^L-10'
sendSerial(temp)
def sendSL10():
temp='^L-100'
sendSerial(temp)
def sendZOOMin():
temp='^Z'
sendSerial(temp)
def sendZOOMout():
temp='^z'
sendSerial(temp)
def sendZOOMstop():
temp='^N'
sendSerial(temp)
def sendSET1():
temp='^a'
sendSerial(temp)
def sendSET2():
temp='^b'
sendSerial(temp)
def sendSET3():
temp='^c'
sendSerial(temp)
def sendSET4():
temp='^d'
sendSerial(temp)
def sendSET5():
temp='^e'
sendSerial(temp)
def sendSET6():
temp='^f'
sendSerial(temp)
def sendGO1():
temp='^A'
sendSerial(temp)
def sendGO2():
temp='^B'
sendSerial(temp)
def sendGO3():
temp='^C'
sendSerial(temp)
def sendGO4():
temp='^D'
sendSerial(temp)
def sendGO5():
temp='^E'
sendSerial(temp)
def sendGO6():
temp='^F'
sendSerial(temp)
def sendSPEEDfast():
temp='^V'
sendSerial(temp)
def sendSPEEDslow():
temp='^v'
sendSerial(temp)
def sendREPORTall():
temp='^R'
sendSerial(temp)
def sendREPORTpos():
global canSendReport
global previousTicksReport
temp='^W'
sendSerial(temp)
canSendReport = True
previousTicksReport = pygame.time.get_ticks() + intervalReport
def clearPosConfirm():
message_window = UIConfirmationDialog(pygame.Rect((650, 200), (300, 200)),
ui_manager,
action_long_desc='Clear All Position Data?')
def sendCLEARALLpos():
temp='^Y'
sendSerial(temp)
def sendCLEARtext():
global serialText
serialText = ''
textBoxSerial.kill()
serialPortTextBox()
def serialPort_changed():
global ser
global baudRate
global current_serialPort
global serialText
global drop_down_serial
serialPortSelect = drop_down_serial.selected_option
try:
ser = Serial(serialPortSelect , baudRate, timeout=0, writeTimeout=0)
temp='^W'
sendSerial(temp)
readSerial()
except:
ser = ''
serialNotSel = 'Serial port not available!<br>'
textBoxSerial.kill()
serialText = serialNotSel + serialText
serialPortTextBox()
drop_down_serial.kill()
drop_down_serial = UIDropDownMenu(available_ports, # Recreate serial port drop down list
current_serialPort[0], # Currently selected port
pygame.Rect((620,95),
(250, 30)),
ui_manager)
def tohex(val, nbits):
return hex((val + (1 << nbits)) % (1 << nbits))
def sendJoystick(arr):
global ser
global data
sliderInt = int(arr[1], 16)
panInt = int(arr[2], 16)
tiltInt = int(arr[3], 16)
data[0] = 4
if ((sliderInt > 0) and (sliderInt < 256)):
data[1] = 0
data[2] = sliderInt
elif sliderInt > 257:
data[1] = 255
data[2] = (sliderInt-65281)
else:
data[1] = 0
data[2] = 0
if ((panInt > 0) and (panInt < 256)):
data[3] = 0
data[4] = panInt
elif panInt > 257:
data[3] = 255
data[4] = (panInt-65281)
else:
data[3] = 0
data[4] = 0
if ((tiltInt > 0) and (tiltInt < 256)):
data[5] = 0
data[6] = tiltInt
elif tiltInt > 257:
data[5] = 255
data[6] = (tiltInt-65281)
else:
data[5] = 0
data[6] = 0
if ser == '':
pass
else:
ser.write(data)
#print(data)
def serialPortTextBox():
global textBoxSerial
textBoxSerial = UITextBox('<font face=roboto size=5 color=#F0F0F0>' + serialText + '</font>',
pygame.Rect((620, 130), (560, 510)),
ui_manager)
#wrap_to_height=False)
def textBoxJoystickName():
global joystickName
global textBoxJoystickNames
textBoxJoystickNames = UITextBox(joystickName,
pygame.Rect((620, 30), (560, 35)),
ui_manager)
def readSerial():
global ser
global serBuffer
global serialText
global atPos1
global atPos2
global atPos3
global atPos4
global atPos5
global atPos6
global pos1set
global pos2set
global pos3set
global pos4set
global pos5set
global pos6set
global pos1run
global pos2run
global pos3run
global pos4run
global pos5run
global pos6run
global speedIsFast
global speedRec
if (ser == ''):
return
else:
while True:
c = ser.read()
if len(c) == 0:
break
if (c == b'\x04'): # Ignore received joystick commands from other remote
c = ser.read()
c = ser.read()
c = ser.read()
c = ser.read()
c = ser.read()
c = ser.read()
c = ''
elif (c == b'^'):
c = ser.read()
c = ''
elif (c == b'\xb0'): # Change / remove characters that cause error
c = '°'
elif (c == b'\xb2'):
c = '²'
elif (c == b'\xba') or (c == b'\xc2') or (c == b'\xc9'):
c = ''
elif (c == b'\x23'): # c = # Remove HASHTAG commands
c = ser.read()
if c == b'A':
#atPos1 = True
pos1set = True
elif c == b'B':
#atPos2 = True
pos2set = True
elif c == b'C':
#atPos3 = True
pos3set = True
elif c == b'D':
#atPos4 = True
pos4set = True
elif c == b'E':
pos5set = True
#atPos5 = True
elif c == b'F':
#atPos6 = True
pos6set = True
elif c == b'J':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos1run = True
elif c == b'K':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos2run = True
elif c == b'L':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos3run = True
elif c == b'M':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos4run = True
elif c == b'N':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos5run = True
elif c == b'O':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos6run = True
elif c == b'a':
pos1run = False
atPos1 = True
elif c == b'b':
pos2run = False
atPos2 = True
elif c == b'c':
pos3run = False
atPos3 = True
elif c == b'd':
pos4run = False
atPos4 = True
elif c == b'e':
pos5run = False
atPos5 = True
elif c == b'f':
pos6run = False
atPos6 = True
elif c == b'Y':
pos1run = False
pos1set = False
atPos1 = False
pos2run = False
pos2set = False
atPos2 = False
pos3run = False
pos3set = False
atPos3 = False
pos4run = False
pos4set = False
atPos4 = False
pos5run = False
pos5set = False
atPos5 = False
pos6run = False
pos6set = False
atPos6 = False
elif c == b'y':
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
elif c == b'V':
speedIsFast = True
speedRec = True
elif c == b'v':
speedIsFast = False
speedRec = True
#c = '\n'
c = ''
else:
c = c.decode('ascii')
if (c == '\r'): # check if character is a delimeter
c = '' # don't want returns. chuck it
if (c == '\t'): # check if character is a tab
c = '<br>'#' - ' # adjust
if c == '\n':
serBuffer += '<br>' # replace \n with HTML <br>
#textOUTPUT.insert(END, serBuffer) # code for tkinter
#textOUTPUT.see(END) # code for tkinter
#serialText += serBuffer # code for tkinter
textBoxSerial.kill()
serialText = serBuffer + serialText
serialPortTextBox()
serBuffer = '' # empty the buffer
else:
serBuffer += c # add to the buffer
def sendSerial(sendValue):
global ser
global serialText
if (ser == ''): # Checks to see if com port has been selected
serialNotSel = 'Serial port not selected!<br>'
textBoxSerial.kill()
serialText = serialNotSel + serialText
serialPortTextBox()
#textOUTPUT.insert(END, 'Serial port not selected!\n') # code for tkinter
#textOUTPUT.see(END) # code for tkinter
else:
ser.write(sendValue.encode()) # Send button value to coneected com port
def scale(val, src, dst):
# Scale the given value from the scale of src to the scale of dst.
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def initialiseJoysticks():
global joystick
global joystickName
available_joysticks = [] # for returning
pygame.joystick.init() # Initialise the Joystick sub-module
joystick_count = pygame.joystick.get_count() # Get count of joysticks
for i in range( joystick_count ): # For each joystick:
joystick = pygame.joystick.Joystick( i )
joystick.init()
available_joysticks.append( joystick )
if ( len( available_joysticks ) == 0 ):
joystickName = "No joystick found."
#print( "No joystick found." )
else:
for i,joystk in enumerate( available_joysticks ):
joystickName = joystk.get_name()
#print("Joystick %d is named [%s]" % ( i, joystickName ) )
return available_joysticks
def int_to_bytes(number: int) -> bytes:
return number.to_bytes(length=(8 + (number + (number < 0)).bit_length()) // 8, byteorder='big', signed=True)
def doRefresh():
global drop_down_serial
global ser
global current_serialPort
global baudRate
usb_port = 'usbserial'
wchusb_port = 'wchusbserial'
current_serialPort = ' - '
drop_down_serial.kill() # Clear serial port drop down box
ports = serial.tools.list_ports.comports() # Search for attached serial ports
available_ports = []
for p in ports:
available_ports.append(p.device) # Append each found serial port to array available_ports
if current_serialPort == ' - ':
if (wchusb_port in '\t'.join(available_ports)):
try:
current_serialPort = [string for string in available_ports if wchusb_port in string]
ser = Serial(current_serialPort[0], baudRate, timeout=0, writeTimeout=0)
temp='^W'
sendSerial(temp)
readSerial()
except:
current_serialPort = [' - ']
elif (usb_port in '\t'.join(available_ports)):
try:
current_serialPort = [string for string in available_ports if usb_port in string]
ser = Serial(current_serialPort[0], baudRate, timeout=0, writeTimeout=0)
temp='^W'
sendSerial(temp)
readSerial()
except:
current_serialPort = [' - ']
else:
current_serialPort = [' - ']
drop_down_serial = UIDropDownMenu(available_ports, # Recreate serial port drop down list
current_serialPort[0], # Currently selected port
pygame.Rect((620,95),
(250, 30)),
ui_manager)
initialiseJoysticks()
textBoxJoystickName()
initialiseJoysticks()
if fullscreen:
window_surface = pygame.display.set_mode(resolution,
pygame.FULLSCREEN)
else:
window_surface = pygame.display.set_mode(resolution)
background_surface = None
try:
base_path = Path(__file__).parent # Needed for macOS "py2app"
file_path = (base_path / "./theme.json").resolve()
ui_manager = UIManager(resolution, file_path)
except:
themeFile = "theme.json"
themeFilePath = find_data_file(themeFile)
ui_manager = UIManager(resolution, themeFilePath)
running = True
clock = pygame.time.Clock()
time_delta_stack = deque([])
button_response_timer = pygame.time.Clock()
ui_manager.set_window_resolution(resolution)
ui_manager.clear_and_reset()
background_surface = pygame.Surface(resolution)
background_surface.fill(ui_manager.get_theme().get_colour('dark_bg'))
rel_button_L1 = UIButton(pygame.Rect((120, 180), (60, 60)), '.5', ui_manager, object_id='#everything_button')
rel_button_L10 = UIButton(pygame.Rect((60, 180), (60, 60)), '10', ui_manager, object_id='#everything_button')
rel_button_R1 = UIButton(pygame.Rect((240, 180), (60, 60)), '.5', ui_manager, object_id='#everything_button')
rel_button_R10 = UIButton(pygame.Rect((300, 180), (60, 60)), '10', ui_manager, object_id='#everything_button')
rel_button_U1 = UIButton(pygame.Rect((180, 120), (60, 60)), '.5', ui_manager, object_id='#everything_button')
rel_button_U10 = UIButton(pygame.Rect((180, 60), (60, 60)), '10', ui_manager, object_id='#everything_button')
rel_button_D1 = UIButton(pygame.Rect((180, 240), (60, 60)), '.5', ui_manager, object_id='#everything_button')
rel_button_D10 = UIButton(pygame.Rect((180, 300), (60, 60)), '10', ui_manager, object_id='#everything_button')
#rel_button_set0 = UIButton(pygame.Rect((190, 190), (40, 40)), '0', ui_manager) # Resets position back to zero
rel_button_SL10 = UIButton(pygame.Rect((120, 400), (60, 60)), '10', ui_manager, object_id='#everything_button')
rel_button_SL100 = UIButton(pygame.Rect((60, 400), (60, 60)), '100', ui_manager, object_id='#everything_button')
rel_button_SR10 = UIButton(pygame.Rect((240, 400), (60, 60)), '10', ui_manager, object_id='#everything_button')
rel_button_SR100 = UIButton(pygame.Rect((300, 400), (60, 60)), '100', ui_manager, object_id='#everything_button')
rel_button_SET1 = UIButton(pygame.Rect((30, 560), (60, 60)), 'SET 1', ui_manager, object_id='#everything_button')
rel_button_SET2 = UIButton(pygame.Rect((90, 560), (60, 60)), 'SET 2', ui_manager, object_id='#everything_button')
rel_button_SET3 = UIButton(pygame.Rect((150, 560), (60, 60)), 'SET 3', ui_manager, object_id='#everything_button')
rel_button_SET4 = UIButton(pygame.Rect((210, 560), (60, 60)), 'SET 4', ui_manager, object_id='#everything_button')
rel_button_SET5 = UIButton(pygame.Rect((270, 560), (60, 60)), 'SET 5', ui_manager, object_id='#everything_button')
rel_button_SET6 = UIButton(pygame.Rect((330, 560), (60, 60)), 'SET 6', ui_manager, object_id='#everything_button')
rel_button_GO1 = UIButton(pygame.Rect((30, 500), (60, 60)), 'GO 1', ui_manager, object_id='#everything_button')
rel_button_GO2 = UIButton(pygame.Rect((90, 500), (60, 60)), 'GO 2', ui_manager, object_id='#everything_button')
rel_button_GO3 = UIButton(pygame.Rect((150, 500), (60, 60)), 'GO 3', ui_manager, object_id='#everything_button')
rel_button_GO4 = UIButton(pygame.Rect((210, 500), (60, 60)), 'GO 4', ui_manager, object_id='#everything_button')
rel_button_GO5 = UIButton(pygame.Rect((270, 500), (60, 60)), 'GO 5', ui_manager, object_id='#everything_button')
rel_button_GO6 = UIButton(pygame.Rect((330, 500), (60, 60)), 'GO 6', ui_manager, object_id='#everything_button')
rel_button_CLEARALL = UIButton(pygame.Rect((390, 545), (100, 30)), 'Clear ALL', ui_manager, object_id='#everything_button')
rel_button_Refresh = UIButton(pygame.Rect((430, 35), (160, 35)), 'Refresh Ports', ui_manager, object_id='#everything_button')
rel_button_FAST = UIButton(pygame.Rect((480, 100), (60, 60)), 'FAST', ui_manager, object_id='#everything_button')
rel_button_SLOW = UIButton(pygame.Rect((480, 160), (60, 60)), 'SLOW', ui_manager, object_id='#everything_button')
rel_button_REPORT = UIButton(pygame.Rect((510, 470), (100, 60)), 'Report All', ui_manager, object_id='#everything_button')
rel_button_REPORTPOS = UIButton(pygame.Rect((510, 530), (100, 60)), 'Report Pos', ui_manager, object_id='#everything_button')
rel_button_CLEARtext = UIButton(pygame.Rect((510, 600), (100, 40)), 'Clear Text', ui_manager, object_id='#everything_button')
joystick_label = UILabel(pygame.Rect(540, 10, 230, 24), "Joystick", ui_manager)#, object_id='#main_text_entry')
serial_text_entry = UITextEntryLine(pygame.Rect((930, 95), (250, 35)), ui_manager, object_id='#main_text_entry')
serial_port_label = UILabel(pygame.Rect(550, 70, 230, 24), "Serial Port", ui_manager)
serial_command_label = UILabel(pygame.Rect(870, 70, 230, 24), "Serial Command", ui_manager)
usb_port = 'usbserial'
wchusb_port = 'wchusbserial'
current_serialPort = ' - '
ports = serial.tools.list_ports.comports() # Search for attached serial ports
available_ports = []
for p in ports:
available_ports.append(p.device) # Append each found serial port to array available_ports
if current_serialPort == ' - ':
if (wchusb_port in '\t'.join(available_ports)):
try:
current_serialPort = [string for string in available_ports if wchusb_port in string]
ser = Serial(current_serialPort[0], baudRate, timeout=0, writeTimeout=0)
temp='^W'
sendSerial(temp)
readSerial()
except:
current_serialPort = [' - ']
elif (usb_port in '\t'.join(available_ports)):
try:
current_serialPort = [string for string in available_ports if usb_port in string]
ser = Serial(current_serialPort[0], baudRate, timeout=0, writeTimeout=0)
temp='^W'
sendSerial(temp)
readSerial()
except:
current_serialPort = [' - ']
else:
current_serialPort = [' - ']
drop_down_serial = UIDropDownMenu(available_ports, # Recreate serial port drop down list
current_serialPort[0], # Currently selected port
pygame.Rect((620,95),
(250, 30)),
ui_manager)
serialPortTextBox()
textBoxJoystickName()
joyCircle = pygame.draw.circle(window_surface, pygame.Color("blue"), (225,225), radius)
joyCircle_draging = False
joyCircle.x = 195
joyCircle.y = 195
# Generate crosshair
crosshair = pygame.surface.Surface((30, 30))
crosshair.fill(pygame.Color("magenta"))
pygame.draw.circle(crosshair, pygame.Color("blue"), (radius,radius), radius)
crosshair.set_colorkey(pygame.Color("magenta"))#, pygame.RLEACCEL)
#crosshair = crosshair.convert()
sliderCircle = pygame.draw.circle(window_surface, pygame.Color("blue"), (225,415), radius)
sliderCircle_draging = False
sliderCircle.x = 195
sliderCircle.y = 415
# Generate crosshair
crosshairSlider = pygame.surface.Surface((30, 30))
crosshairSlider.fill(pygame.Color("magenta"))
pygame.draw.circle(crosshairSlider, pygame.Color("blue"), (radius,radius), radius)
crosshairSlider.set_colorkey(pygame.Color("magenta"))#, pygame.RLEACCEL)
#crosshair = crosshair.convert()
ui_manager.set_focus_set(textBoxSerial) # Sets focus so focus can be tested
def process_events():
global arr
global joystick
global joystickName
global button0Pressed
global button1Pressed
global button2Pressed
global button3Pressed
global button4Pressed
global button5Pressed
global button6Pressed
global button7Pressed
global button8Pressed
global button9Pressed
global button10Pressed
global button11Pressed
global button12Pressed
global button13Pressed
global button14Pressed
global button15Pressed
global button16Pressed
global oldAxisX
global oldAxisY
global oldAxisZ
global oldHatX
global oldHatY
global axisX
global axisY
global axisZ
global previousTime
global mouseMoving
global joyCircle_draging
global sliderCircle_draging
global offset_x
global offset_y
global sliderOffset_x
global sliderOffset_y
global running
global joyXreadDOT
global joyYreadDOT
global joyZreadDOT
global panKeyPresseed
global tiltKeyPresseed
global sliderKeyPresseed
global drop_down_serial
global isZooming
joyPS4 = "Sony"
joyPS4BT = "DUALSHOCK"
joyPS4Win = "PS4"
joy360 = "360"
joyNimbus = "Nimbus"
joySN30 = "SN30"
joySN30BT = "Unknown Wireless Controller"
UITextEntry = "UITextEntryLine"
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if 482 <= mouse[0] <= 482+56 and 262 <= mouse[1] <= 262+56:
isZooming = True
sendZOOMin()
#print("IN pressed")
if 482 <= mouse[0] <= 482+56 and 322 <= mouse[1] <= 322+56:
isZooming = True
sendZOOMout()
#print("OUT pressed")
if event.type == pygame.MOUSEBUTTONUP and isZooming:
isZooming = False
sendZOOMstop()
ui_manager.process_events(event)
deadRangeLow = -0.2
deadRangeHigh = 0.2
whereIsFocus = str(ui_manager.get_focus_set())
if (event.type == pygame.KEYDOWN) and not (UITextEntry in whereIsFocus):
if event.key == ord('a'):
axisX = int(-255)
panKeyPresseed = True
#print('Left')
if event.key == ord('d'):
axisX = int(255)
panKeyPresseed = True
#print('Right')
if event.key == ord('w'):
axisY = int(-255)
tiltKeyPresseed = True
#print('Up')
if event.key == ord('s'):
axisY = int(255)
tiltKeyPresseed = True
#print('Down')
if event.key == ord(','):
axisZ = int(-255)
sliderKeyPresseed = True
#print('Slider Left')
if event.key == ord('.'):
axisZ = int(255)
sliderKeyPresseed = True
#print('Slider Right')
if (event.type == pygame.KEYUP) and not (UITextEntry in whereIsFocus):
if event.key == ord('a'):
axisX = int(0)
panKeyPresseed = False
#print('Left stop')
if event.key == ord('d'):
axisX = int(0)
panKeyPresseed = False
#print('Right stop')
if event.key == ord('w'):
axisY = int(0)
tiltKeyPresseed = False
#print('Up stop')
if event.key == ord('s'):
axisY = int(0)
tiltKeyPresseed = False
#print('Down stop')
if event.key == ord(','):
axisZ = int(0)
sliderKeyPresseed = False
#print('Slider Left stop')
if event.key == ord('.'):
axisZ = int(0)
sliderKeyPresseed = False
#print('Slider Right stop')
# left 1
# right 2
# down 3
# up 4
if joystick == '':
pass
else:
if (joyPS4 in joystickName) or (joyPS4BT in joystickName) or (joyPS4Win in joystickName):
#print ("PS4 Controller Found")
hat = joystick.get_hat(0)
hatX = hat[0]
hatY = hat[1]
if (hatX != oldHatX):
oldHatX = hatX
if hatX == 1: # PS4 RIGHT
sendSET2()
if hatX == -1: # PS4 LEFT
sendSET1()
if (hatY != oldHatY):
oldHatY = hatY
if hatY == 1: # PS4 UP
sendSET4()
if hatY == -1: # PS4 DOWN
sendSET3()
if event.type == pygame.JOYBUTTONDOWN:
if (joystick.get_button(0) and not button0Pressed): # PS4 Square
button0Pressed = True
sendGO1()
#print("0 - Squ")
elif (joystick.get_button(1) and not button1Pressed): # PS4 Cross
button1Pressed = True
sendGO3()
#print("1 - Cro")
elif (joystick.get_button(2) and not button2Pressed): # PS4 Circle
button2Pressed = True
sendGO2()
#print("2" - Cir)
elif (joystick.get_button(3) and not button3Pressed): # PS4 Triangle
button3Pressed = True
sendGO4()
#print("3 - Tri")
elif (joystick.get_button(4) and not button4Pressed): # PS4 L1
button4Pressed = True
sendSPEEDslow()
#print("4 - L1")
elif (joystick.get_button(5) and not button5Pressed): # PS4 R1
button5Pressed = True
sendSPEEDfast()
#print("5 - R1")
elif (joystick.get_button(6) and not button6Pressed): # PS4 L2
button6Pressed = True
sendZOOMout()
isZooming = True
#print("6 - L2")
elif (joystick.get_button(7) and not button7Pressed): # PS4 R2
button7Pressed = True
sendZOOMin()
isZooming = True
#print("7 - R2")
elif (joystick.get_button(8) and not button8Pressed): # PS4 Share
button8Pressed = True
sendREPORTpos()
#print("8 - Sha")
elif (joystick.get_button(9) and not button9Pressed): # PS4 Option
button9Pressed = True
sendREPORTpos()
#print("9 - Opt")
elif (joystick.get_button(10) and not button10Pressed): # PS4 L3
buttonL10ressed = True
sendREPORTpos()
#print("10 - L3")
elif (joystick.get_button(11) and not button11Pressed): # PS4 R3
button11Pressed = True
sendREPORTpos()
#print("11 - R3")
if event.type == pygame.JOYBUTTONUP:
if button6Pressed and not joystick.get_button(6):
button6Pressed = False
sendZOOMstop()
isZooming = False
elif button7Pressed and not joystick.get_button(7):
button7Pressed = False
sendZOOMstop()
isZooming = False
if not panKeyPresseed and not tiltKeyPresseed and not joyCircle_draging:
joyXread = joystick.get_axis(0)
joyYread = joystick.get_axis(1)
if (joyXread < deadRangeLow):
axisX = int(scale(joyXread, (-1.0,deadRangeLow), (-255,0)))
elif (joyXread > deadRangeHigh):
axisX = int(scale(joyXread, (deadRangeHigh,1.0), (0,255)))
else:
axisX = 0
if (joyYread < deadRangeLow):
axisY = int(scale(joyYread, (-1.0,deadRangeLow), (-255,0)))
elif (joyYread > deadRangeHigh):
axisY = int(scale(joyYread, (deadRangeHigh,1.0), (0,255)))
else:
axisY = 0
if not sliderKeyPresseed and not sliderCircle_draging:
joyZread = joystick.get_axis(2)
if (joyZread < deadRangeLow):
axisZ = int(scale(joyZread, (-1.0,deadRangeLow), (-255,0)))
elif (joyZread > deadRangeHigh):
axisZ = int(scale(joyZread, (deadRangeHigh,1.0), (0,255)))
else:
axisZ = 0
elif joy360 in joystickName:
#print ("360 Controller Found")
if event.type == pygame.JOYBUTTONDOWN:
if (joystick.get_button(0) and not button0Pressed): # 360 - A
button0Pressed = True
sendGO3()
#print("0 - A")
elif (joystick.get_button(1) and not button1Pressed): # 360 - B
button1Pressed = True
sendGO2()
#print("1 - B")
elif (joystick.get_button(2) and not button2Pressed): # 360 - X
button2Pressed = True
sendGO1()
#print("2 - X")
elif (joystick.get_button(3) and not button3Pressed): # 360 - Y
button3Pressed = True
sendGO4()
#print("3 - Y")
elif (joystick.get_button(4) and not button4Pressed): # 360 - L1
button4Pressed = True
sendSPEEDslow()
#print("4 - L1")
elif (joystick.get_button(5) and not button5Pressed): # 360 - R1
button5Pressed = True
sendSPEEDfast()
#print("5 - R1")
elif (joystick.get_button(6) and not button6Pressed): # 360 - L3
button6Pressed = True
sendREPORTall()
#print("6 - L3")
elif (joystick.get_button(7) and not button7Pressed): # 360 - R3
button7Pressed = True
sendREPORTall()
#print("7 - R3")
elif (joystick.get_button(8) and not button8Pressed): # 360 - Start
button8Pressed = True
sendREPORTall()
#print("8 - Start")
elif (joystick.get_button(9) and not button9Pressed): # 360 - Back
button9Pressed = True
sendREPORTall()
#print("9 - Back")
elif (joystick.get_button(10) and not button10Pressed): # 360 - XBOX
button10Pressed = True
sendREPORTall()
#print("10 - XBOX")
elif (joystick.get_button(11) and not button11Pressed): # 360 - Up
button11Pressed = True
sendSET4()
#print("11 - Up")
elif (joystick.get_button(12) and not button12Pressed): # 360 - Down
button12Pressed = True
sendSET3()
#print("12 - Down")
elif (joystick.get_button(13) and not button13Pressed): # 360 - Left
button13Pressed = True
sendSET1()
#print("13 - Left")
elif (joystick.get_button(14) and not button14Pressed): # 360 - Right
button14Pressed = True
sendSET2()
#print("14 - Right")
if not panKeyPresseed and not tiltKeyPresseed and not joyCircle_draging:
joyXread = joystick.get_axis(0)
joyYread = joystick.get_axis(1)
joyL2read = joystick.get_axis(2)
joyR2read = joystick.get_axis(5)
if (joyXread < deadRangeLow):
axisX = int(scale(joyXread, (-1.0,deadRangeLow), (-255,0)))
elif (joyXread > deadRangeHigh):
axisX = int(scale(joyXread, (deadRangeHigh,1.0), (0,255)))
else:
axisX = 0
if (joyYread < deadRangeLow):
axisY = int(scale(joyYread, (-1.0,deadRangeLow), (-255,0)))
elif (joyYread > deadRangeHigh):
axisY = int(scale(joyYread, (deadRangeHigh,1.0), (0,255)))
else:
axisY = 0
if (joyL2read > 0) and not button15Pressed:
isZooming = True
sendZOOMout()
button15Pressed = True
if (joyR2read > 0) and not button16Pressed:
isZooming = True
sendZOOMin()
button16Pressed = True
if (button15Pressed and (joyL2read < 0)):
isZooming = False
sendZOOMstop()
button15Pressed = False
if (button16Pressed and (joyR2read < 0)):
isZooming = False
sendZOOMstop()
button16Pressed = False
if not sliderKeyPresseed and not sliderCircle_draging:
joyZread = joystick.get_axis(3)
if (joyZread < deadRangeLow):
axisZ = int(scale(joyZread, (-1.0,deadRangeLow), (-255,0)))
elif (joyZread > deadRangeHigh):
axisZ = int(scale(joyZread, (deadRangeHigh,1.0), (0,255)))
else:
axisZ = 0
elif joyNimbus in joystickName:
#print ("Nimbus Controller Found")
if event.type == pygame.JOYBUTTONDOWN:
if (joystick.get_button(0) and not button0Pressed): # Nimbus - A
button0Pressed = True
sendGO3()
#print("0 - A")
elif (joystick.get_button(1) and not button1Pressed): # Nimbus - B
button1Pressed = True
sendGO2()
#print("1 - B")
elif (joystick.get_button(2) and not button2Pressed): # Nimbus - X
button2Pressed = True
sendGO1()
#print("2 - X")
elif (joystick.get_button(3) and not button3Pressed): # Nimbus - Y
button3Pressed = True
sendGO4()
#print("3 - Y")
elif (joystick.get_button(4) and not button4Pressed): # Nimbus - L1
button4Pressed = True
sendSPEEDslow()
#print("4 - L1")
elif (joystick.get_button(5) and not button5Pressed): # Nimbus - R1
button5Pressed = True
sendSPEEDfast()
#print("5 - R1")
elif (joystick.get_button(6) and not button6Pressed): # Nimbus - L2
button6Pressed = True
isZooming = True
sendZOOMout()
#print("6 - L2")
elif (joystick.get_button(7) and not button7Pressed): # Nimbus - R2
button7Pressed = True
isZooming = True
sendZOOMin()
#print("7 - R2")
elif (joystick.get_button(8) and not button8Pressed): # Nimbus - Up
button8Pressed = True
sendSET4()
#print("8 - Up")
elif (joystick.get_button(9) and not button9Pressed): # Nimbus - Down
button9Pressed = True
sendSET3()
#print("9 - Down")
elif (joystick.get_button(10) and not button10Pressed): # Nimbus - Right
button10Pressed = True
sendSET2()
#print("10 - Right")
elif (joystick.get_button(11) and not button11Pressed): # Nimbus - Left
button11Pressed = True
sendSET1()
#print("11 - Left")
elif (joystick.get_button(12) and not button12Pressed): # Nimbus - Menu
button12Pressed = True
sendREPORTall()
#print("12 - Menu")
if event.type == pygame.JOYBUTTONUP:
if button6Pressed and not joystick.get_button(6):
button6Pressed = False
sendZOOMstop()
isZooming = False
elif button7Pressed and not joystick.get_button(7):
button7Pressed = False
sendZOOMstop()
isZooming = False
if not panKeyPresseed and not tiltKeyPresseed and not joyCircle_draging:
joyXread = joystick.get_axis(0)
joyYread = -(joystick.get_axis(1))
if (joyXread < deadRangeLow):
axisX = int(scale(joyXread, (-1.0,deadRangeLow), (-255,0)))
elif (joyXread > deadRangeHigh):
axisX = int(scale(joyXread, (deadRangeHigh,1.0), (0,255)))
else:
axisX = 0
if (joyYread < deadRangeLow):
axisY = int(scale(joyYread, (-1.0,deadRangeLow), (-255,0)))
elif (joyYread > deadRangeHigh):
axisY = int(scale(joyYread, (deadRangeHigh,1.0), (0,255)))
else:
axisY = 0
if not sliderKeyPresseed and not sliderCircle_draging:
joyZread = joystick.get_axis(2)
if (joyZread < deadRangeLow):
axisZ = int(scale(joyZread, (-1.0,deadRangeLow), (-255,0)))
elif (joyZread > deadRangeHigh):
axisZ = int(scale(joyZread, (deadRangeHigh,1.0), (0,255)))
else:
axisZ = 0
elif (joySN30 in joystickName) or (joySN30BT in joystickName):
#print ("SN30 Controller Found")
hat = joystick.get_hat(0)
hatX = hat[0]
hatY = hat[1]
if (hatX != oldHatX):
oldHatX = hatX
if hatX == 1: # SN30 RIGHT
sendSET2()
if hatX == -1: # SN30 LEFT
sendSET1()
if (hatY != oldHatY):
oldHatY = hatY
if hatY == 1: # SN30 UP
sendSET4()
if hatY == -1: # SN30 DOWN
sendSET3()
if event.type == pygame.JOYBUTTONDOWN:
if (joystick.get_button(0) and not button0Pressed): # SN30 - B
button0Pressed = True
sendGO3()
#print("0 - B")
elif (joystick.get_button(1) and not button1Pressed): # SN30 - A
button1Pressed = True
sendGO2()
#print("1 - A")
elif (joystick.get_button(2) and not button2Pressed): # SN30 - Heart
button2Pressed = True
sendREPORTall()
#print("2 - Heart")
elif (joystick.get_button(3) and not button3Pressed): # SN30 - X
button3Pressed = True
sendGO4()
#print("3 - X")
elif (joystick.get_button(4) and not button4Pressed): # SN30 - Y
button4Pressed = True
sendGO1()
#print("4 - Y")
#elif (joystick.get_button(5) and not button5Pressed): # SN30 - None
# button5Pressed = True
# sendREPORTall()
#print("5 - None")
elif (joystick.get_button(6) and not button6Pressed): # SN30 - L1
button6Pressed = True
sendSPEEDslow()
#print("6 - L1")
elif (joystick.get_button(7) and not button7Pressed): # SN30 - R1
button7Pressed = True
sendSPEEDfast()
#print("7 - R1")
elif (joystick.get_button(8) and not button8Pressed): # SN30 - L2
button8Pressed = True
isZooming = True
sendZOOMout()
#print("8 - L2")
elif (joystick.get_button(9) and not button9Pressed): # SN30 - R2
button9Pressed = True
isZooming = True
sendZOOMin()
#print("9 - R2")
elif (joystick.get_button(10) and not button10Pressed): # SN30 - Select
button10Pressed = True
sendREPORTall()
#print("10 - Select")
elif (joystick.get_button(11) and not button11Pressed): # SN30 - Start
button11Pressed = True
sendREPORTall()
#print("11 - Start")
#elif (joystick.get_button(12) and not button12Pressed): # SN30 - None
# button10Pressed = True
# sendREPORTall()
#print("12 - None")
elif (joystick.get_button(13) and not button13Pressed): # SN30 - L3
button13Pressed = True
sendREPORTall()
#print("13 - L3")
elif (joystick.get_button(14) and not button14Pressed): # SN30 - R3
button14Pressed = True
sendREPORTall()
#print("14 - R3")
if event.type == pygame.JOYBUTTONUP:
if button8Pressed and not joystick.get_button(8):
button8Pressed = False
sendZOOMstop()
isZooming = False
elif button9Pressed and not joystick.get_button(9):
button9Pressed = False
sendZOOMstop()
isZooming = False
if not panKeyPresseed and not tiltKeyPresseed and not joyCircle_draging:
joyXread = joystick.get_axis(0)
joyYread = joystick.get_axis(1)
if (joyXread < deadRangeLow):
axisX = int(scale(joyXread, (-1.0,deadRangeLow), (-255,0)))
elif (joyXread > deadRangeHigh):
axisX = int(scale(joyXread, (deadRangeHigh,1.0), (0,255)))
else:
axisX = 0
if (joyYread < deadRangeLow):
axisY = int(scale(joyYread, (-1.0,deadRangeLow), (-255,0)))
elif (joyYread > deadRangeHigh):
axisY = int(scale(joyYread, (deadRangeHigh,1.0), (0,255)))
else:
axisY = 0
if not sliderKeyPresseed and not sliderCircle_draging:
joyZread = joystick.get_axis(2)
if (joyZread < deadRangeLow):
axisZ = int(scale(joyZread, (-1.0,deadRangeLow), (-255,0)))
elif (joyZread > deadRangeHigh):
axisZ = int(scale(joyZread, (deadRangeHigh,1.0), (0,255)))
else:
axisZ = 0
else:
#print ("Other Controller Found")
if event.type == pygame.JOYBUTTONDOWN:
if (joystick.get_button(0) and not button0Pressed): # A
button0Pressed = True
sendGO4()
#print("0 - A")
elif (joystick.get_button(1) and not button1Pressed): # B
button1Pressed = True
sendGO1()
#print("1 - B")
elif (joystick.get_button(2) and not button2Pressed): # X
button2Pressed = True
sendGO2()
#print("2 - X")
elif (joystick.get_button(3) and not button3Pressed): # Y
button3Pressed = True
sendGO3()
#print("3 - Y")
elif (joystick.get_button(4) and not button4Pressed): # L1
button4Pressed = True
sendSPEEDslow()
#print("4 - L1")
elif (joystick.get_button(5) and not button5Pressed): # R1
button5Pressed = True
sendSPEEDfast()
#print("5 - R1")
elif (joystick.get_button(6) and not button6Pressed): # L2
button6Pressed = True
isZooming = True
sendZOOMout()
#print("6 - L2")
elif (joystick.get_button(7) and not button7Pressed): # R2
button7Pressed = True
isZooming = True
sendZOOMin()
#print("7 - R2")
elif (joystick.get_button(8) and not button8Pressed): # Up
button8Pressed = True
sendSET3()
#print("8 - Up")
elif (joystick.get_button(9) and not button9Pressed): # Down
button9Pressed = True
sendSET4()
#print("9 - Down")
elif (joystick.get_button(10) and not button10Pressed): # Right
button10Pressed = True
sendSET1()
#print("10 - Right")
elif (joystick.get_button(11) and not button11Pressed): # Left
button11Pressed = True
sendSET2()
#print("11 - Left")
elif (joystick.get_button(12) and not button12Pressed): # Menu
button12Pressed = True
sendREPORTall()
#print("12 - Menu")
if event.type == pygame.JOYBUTTONUP:
if button6Pressed and not joystick.get_button(6):
button6Pressed = False
sendZOOMstop()
isZooming = False
elif button7Pressed and not joystick.get_button(7):
button7Pressed = False
sendZOOMstop()
isZooming = False
if not panKeyPresseed and not tiltKeyPresseed and not joyCircle_draging:
joyXread = joystick.get_axis(0)
joyYread = joystick.get_axis(1)
if (joyXread < deadRangeLow):
axisX = int(scale(joyXread, (-1.0,deadRangeLow), (-255,0)))
elif (joyXread > deadRangeHigh):
axisX = int(scale(joyXread, (deadRangeHigh,1.0), (0,255)))
else:
axisX = 0
if (joyYread < deadRangeLow):
axisY = int(scale(joyYread, (-1.0,deadRangeLow), (-255,0)))
elif (joyYread > deadRangeHigh):
axisY = int(scale(joyYread, (deadRangeHigh,1.0), (0,255)))
else:
axisY = 0
if not sliderKeyPresseed and not sliderCircle_draging:
joyZread = joystick.get_axis(2)
if (joyZread < deadRangeLow):
axisZ = int(scale(joyZread, (-1.0,deadRangeLow), (-255,0)))
elif (joyZread > deadRangeHigh):
axisZ = int(scale(joyZread, (deadRangeHigh,1.0), (0,255)))
else:
axisZ = 0
if event.type == pygame.JOYBUTTONUP:
if (button0Pressed and not joystick.get_button(0)):
button0Pressed = False
elif (button1Pressed and not joystick.get_button(1)):
button1Pressed = False
elif (button2Pressed and not joystick.get_button(2)):
button2Pressed = False
elif (button3Pressed and not joystick.get_button(3)):
button3Pressed = False
elif (button4Pressed and not joystick.get_button(4)):
button4Pressed = False
elif (button5Pressed and not joystick.get_button(5)):
button5Pressed = False
elif (button6Pressed and not joystick.get_button(6)):
button6Pressed = False
elif (button7Pressed and not joystick.get_button(7)):
button7Pressed = False
elif (button8Pressed and not joystick.get_button(8)):
button8Pressed = False
elif (button9Pressed and not joystick.get_button(9)):
button9Pressed = False
elif (button10Pressed and not joystick.get_button(10)):
button10Pressed = False
elif (button11Pressed and not joystick.get_button(11)):
button11Pressed = False
elif (button12Pressed and not joystick.get_button(12)):
button12Pressed = False
elif (button13Pressed and not joystick.get_button(13)):
button13Pressed = False
elif (button14Pressed and not joystick.get_button(14)):
button14Pressed = False
if event.type == pygame.USEREVENT:
if (event.user_type == pygame_gui.UI_TEXT_ENTRY_FINISHED):
sendSerial(event.text)
serial_text_entry.set_text('')
if event.user_type == pygame_gui.UI_CONFIRMATION_DIALOG_CONFIRMED:
sendCLEARALLpos()
if event.user_type == pygame_gui.UI_BUTTON_PRESSED:
if event.ui_element == rel_button_L1:
sendLEFT1()
elif event.ui_element == rel_button_L10:
sendLEFT10()
elif event.ui_element == rel_button_R1:
sendRIGHT1()
elif event.ui_element == rel_button_R10:
sendRIGHT10()
elif event.ui_element == rel_button_U1:
sendUP1()
elif event.ui_element == rel_button_U10:
sendUP10()
elif event.ui_element == rel_button_D1:
sendDOWN1()
elif event.ui_element == rel_button_D10:
sendDOWN10()
#elif event.ui_element == rel_button_set0:
# sendRESETpos()
elif event.ui_element == rel_button_SR10:
sendSR1()
elif event.ui_element == rel_button_SR100:
sendSR10()
elif event.ui_element == rel_button_SL10:
sendSL1()
elif event.ui_element == rel_button_SL100:
sendSL10()
elif event.ui_element == rel_button_SET1:
if not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendSET1()
elif event.ui_element == rel_button_SET2:
if not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendSET2()
elif event.ui_element == rel_button_SET3:
if not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendSET3()
elif event.ui_element == rel_button_SET4:
if not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendSET4()
elif event.ui_element == rel_button_SET5:
if not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendSET5()
elif event.ui_element == rel_button_SET6:
if not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendSET6()
elif event.ui_element == rel_button_GO1:
if pos1set and not atPos1 and not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendGO1()
elif event.ui_element == rel_button_GO2:
if pos2set and not atPos2 and not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendGO2()
elif event.ui_element == rel_button_GO3:
if pos3set and not atPos3 and not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendGO3()
elif event.ui_element == rel_button_GO4:
if pos4set and not atPos4 and not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendGO4()
elif event.ui_element == rel_button_GO5:
if pos5set and not atPos5 and not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendGO5()
elif event.ui_element == rel_button_GO6:
if pos6set and not atPos6 and not (pos1run or pos2run or pos3run or pos4run or pos5run or pos6run):
sendGO6()
elif event.ui_element == rel_button_CLEARALL:
clearPosConfirm()
elif event.ui_element == rel_button_Refresh:
doRefresh()
elif event.ui_element == rel_button_SLOW:
sendSPEEDslow()
elif event.ui_element == rel_button_FAST:
sendSPEEDfast()
elif event.ui_element == rel_button_REPORT:
sendREPORTall()
elif event.ui_element == rel_button_REPORTPOS:
sendREPORTpos()
elif event.ui_element == rel_button_CLEARtext:
sendCLEARtext()
if (event.user_type == pygame_gui.UI_DROP_DOWN_MENU_CHANGED
and event.ui_element == drop_down_serial):
serialPort_changed()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if joyCircle.collidepoint(event.pos):
joyCircle_draging = True
mouse_x, mouse_y = event.pos
offset_x = joyCircle.x - mouse_x
offset_y = joyCircle.y - mouse_y
if sliderCircle.collidepoint(event.pos):
sliderCircle_draging = True
mouse_x, mouse_y = event.pos
sliderOffset_x = sliderCircle.x - mouse_x
sliderOffset_y = sliderCircle.y - mouse_y
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
if joyCircle_draging:
joyCircle_draging = False
joyCircle.x = 195
joyCircle.y = 195
axisX = int(0)
axisY = int(0)
if sliderCircle_draging:
sliderCircle_draging = False
sliderCircle.x = 195
sliderCircle.y = 415
axisZ = int(0)
mouseMoving = False
if isZooming:
sendZOOMstop()
isZooming = False
if event.type == pygame.MOUSEMOTION:
if joyCircle_draging:
mouseMoving = True
mouse_x, mouse_y = event.pos
joyCircle.x = mouse_x
joyCircle.y = mouse_y
"""
if ((mouse_x + offset_x) > mouseBorder) and ((mouse_y + offset_y) > mouseBorder): # XY Dot out of box: right & bottom
joyCircle.x = mouseBorder
joyCircle.y = mouseBorder
elif (((mouse_x + offset_x) > mouseBorder) and ((mouse_y + offset_y) < 30)): # XY Dot out of box: right & top
joyCircle.x = mouseBorder
joyCircle.y = 30
elif (((mouse_x + offset_x) < 30) and ((mouse_y + offset_y) > mouseBorder)): # XY Dot out of box: left & bottom
joyCircle.x = 30
joyCircle.y = mouseBorder
elif (((mouse_x + offset_x) < 30) and ((mouse_y + offset_y) < 30)): # XY Dot out of box: left & top
joyCircle.x = 30
joyCircle.y = 30
elif ((mouse_x + offset_x) > (mouseBorder)): # XY Dot out of box: right
joyCircle.x = mouseBorder
joyCircle.y = mouse_y + offset_y
elif ((mouse_x + offset_x) < 30): # XY Dot out of box: left
joyCircle.x = 30
joyCircle.y = mouse_y + offset_y
elif ((mouse_y + offset_y) > (mouseBorder)): # XY Dot out of box: bottom
joyCircle.y = mouseBorder
joyCircle.x = mouse_x + offset_x
elif ((mouse_y + offset_y) < 30): # XY Dot out of box: top
joyCircle.y = 30
joyCircle.x = mouse_x + offset_x
else:
"""
joyCircle.x = mouse_x + offset_x # XY Dot inside box
joyCircle.y = mouse_y + offset_y
axisX = int(scale((joyCircle.x), (30,mouseBorder), (-255,255)))
axisY = int(scale((joyCircle.y), (30,mouseBorder), (-255,255)))
if axisX > 255:
axisX = 255
if axisY > 255:
axisY = 255
if axisX < -255:
axisX = -255
if axisY < -255:
axisY = -255
if sliderCircle_draging:
mouseMoving = True
mouse_x, mouse_y = event.pos
sliderCircle.x = mouse_x
sliderCircle.y = 420
if ((mouse_x + sliderOffset_x) > mouseBorder): # Z Dot out of box: right
sliderCircle.x = mouseBorder
elif ((mouse_x + sliderOffset_x) < 30): # Z Dot out of box: left
sliderCircle.x = 30
else:
sliderCircle.x = mouse_x + sliderOffset_x # Z Dot inside box
axisZ = int(scale((sliderCircle.x), (30,mouseBorder), (-255,255)))
#axisXDOT = scale(axisX, (-255,255), (-1.0,1.0))
#axisYDOT = scale(axisY, (-255,255), (-1.0,1.0))
#axisZDOT = scale(axisZ, (-255,255), (-1.0,1.0))
#joyCircle.x = (axisXDOT*165)+210-radius
#joyCircle.y = (axisYDOT*165)+210-radius
#sliderCircle.x = (axisZDOT*165)+210-radius
while running:
time_delta = clock.tick() / 1000.0
time_delta_stack.append(time_delta)
process_events() # check for input
if (((axisX != oldAxisX) or (axisY != oldAxisY) or (axisZ != oldAxisZ)) and ((time.time() - previousTime) > 0.1)):
previousTime = time.time()
oldAxisX = axisX
oldAxisY = axisY
oldAxisZ = axisZ
axisXh = tohex(axisX, 16)
axisYh = tohex(-axisY, 16)
axisZh = tohex(axisZ, 16)
arr = [4, axisZh, axisXh, axisYh]
sendJoystick(arr)
#print(4,' - ', axisZh, ' - ', axisXh, ' - ', axisYh)
try:
readSerial()
except:
ser=''
current_serialPort = [' - ']
serialNotSel = 'Serial port disconnected.<br>'
textBoxSerial.kill()
serialText = serialNotSel + serialText
serialPortTextBox()
speedRec = False
pos1set = False
pos2set = False
pos3set = False
pos4set = False
pos5set = False
pos6set = False
atPos1 = False
atPos2 = False
atPos3 = False
atPos4 = False
atPos5 = False
atPos6 = False
pos1run = False
pos2run = False
pos3run = False
pos4run = False
pos5run = False
pos6run = False
ports = serial.tools.list_ports.comports() # Search for attached serial ports
available_ports = []
for p in ports:
available_ports.append(p.device) # Append each found serial port to array available_ports
drop_down_serial.kill()
drop_down_serial = UIDropDownMenu(available_ports, # Recreate serial port drop down list
current_serialPort[0], # Currently selected port
pygame.Rect((620,95),
(250, 30)),
ui_manager)
ui_manager.update(time_delta) # respond to input
# Clear screen
window_surface.blit(background_surface, (0, 0)) # draw graphics
# Draw position LEDs
if pos1set and not pos1run and not atPos1:
pygame.draw.circle(window_surface, RED, (60, 480), radius/2)
elif pos1set and not pos1run and atPos1:
pygame.draw.circle(window_surface, GREEN, (60, 480), radius/2)
elif pos1set and pos1run and not atPos1:
if blinkSet:
pygame.draw.circle(window_surface, GREEN, (60, 480), radius/2)
else:
pygame.draw.circle(window_surface, OFF, (60, 480), radius/2)
elif not pos1set:
pygame.draw.circle(window_surface, OFF, (60, 480), radius/2)
if pos2set and not pos2run and not atPos2:
pygame.draw.circle(window_surface, RED, (120, 480), radius/2)
elif pos2set and not pos2run and atPos2:
pygame.draw.circle(window_surface, GREEN, (120, 480), radius/2)
elif pos2set and pos2run and not atPos2:
if blinkSet:
pygame.draw.circle(window_surface, GREEN, (120, 480), radius/2)
else:
pygame.draw.circle(window_surface, OFF, (120, 480), radius/2)
elif not pos2set:
pygame.draw.circle(window_surface, OFF, (120, 480), radius/2)
if pos3set and not pos3run and not atPos3:
pygame.draw.circle(window_surface, RED, (180, 480), radius/2)
elif pos3set and not pos3run and atPos3:
pygame.draw.circle(window_surface, GREEN, (180, 480), radius/2)
elif pos3set and pos3run and not atPos3:
if blinkSet:
pygame.draw.circle(window_surface, GREEN, (180, 480), radius/2)
else:
pygame.draw.circle(window_surface, OFF, (180, 480), radius/2)
elif not pos3set:
pygame.draw.circle(window_surface, OFF, (180, 480), radius/2)
if pos4set and not pos4run and not atPos4:
pygame.draw.circle(window_surface, RED, (240, 480), radius/2)
elif pos4set and not pos4run and atPos4:
pygame.draw.circle(window_surface, GREEN, (240, 480), radius/2)
elif pos4set and pos4run and not atPos4:
if blinkSet:
pygame.draw.circle(window_surface, GREEN, (240, 480), radius/2)
else:
pygame.draw.circle(window_surface, OFF, (240, 480), radius/2)
elif not pos4set:
pygame.draw.circle(window_surface, OFF, (240, 480), radius/2)
if pos5set and not pos5run and not atPos5:
pygame.draw.circle(window_surface, RED, (300, 480), radius/2)
elif pos5set and not pos5run and atPos5:
pygame.draw.circle(window_surface, GREEN, (300, 480), radius/2)
elif pos5set and pos5run and not atPos5:
if blinkSet:
pygame.draw.circle(window_surface, GREEN, (300, 480), radius/2)
else:
pygame.draw.circle(window_surface, OFF, (300, 480), radius/2)
elif not pos5set:
pygame.draw.circle(window_surface, OFF, (300, 480), radius/2)
if pos6set and not pos6run and not atPos6:
pygame.draw.circle(window_surface, RED, (360, 480), radius/2)
elif pos6set and not pos6run and atPos6:
pygame.draw.circle(window_surface, GREEN, (360, 480), radius/2)
elif pos6set and pos6run and not atPos6:
if blinkSet:
pygame.draw.circle(window_surface, GREEN, (360, 480), radius/2)
else:
pygame.draw.circle(window_surface, OFF, (360, 480), radius/2)
elif not pos6set:
pygame.draw.circle(window_surface, OFF, (360, 480), radius/2)
# Blink timer for position LEDs
if previousTicks <= pygame.time.get_ticks():
blinkSet = not blinkSet
previousTicks = pygame.time.get_ticks() + interval
# Only enable sending of Report after delay
if canSendReport and (previousTicksReport <= pygame.time.get_ticks()):
canSendReport = False
temp='^r'
sendSerial(temp)
# Speed LEDs
if speedRec and speedIsFast:
pygame.draw.circle(window_surface, GREEN, (460, 130), radius/2)
elif speedRec and not speedIsFast:
pygame.draw.circle(window_surface, GREEN, (460, 190), radius/2)
ui_manager.draw_ui(window_surface) # draw UI
# Draw W A S D Letters
window_surface.blit(textsurfaceW,(198,28)) # W
window_surface.blit(textsurfaceA,(35,190)) # A
window_surface.blit(textsurfaceS,(205,355)) # S
window_surface.blit(textsurfaceD,(365,190)) # D
window_surface.blit(textsurfaceLeft,(35,415)) # ,
window_surface.blit(textsurfaceRight,(375,415)) # .
axisXDOT = scale(axisX, (-255,255), (-1.0,1.0))
axisYDOT = scale(axisY, (-255,255), (-1.0,1.0))
axisZDOT = scale(axisZ, (-255,255), (-1.0,1.0))
axisTestDot = pygame.math.Vector2((axisXDOT*10), (axisYDOT*10))
xCircle = axisXDOT * math.sqrt(1 - 0.5*axisYDOT**2)
yCircle = axisYDOT * math.sqrt(1 - 0.5*axisXDOT**2)
#joyCircle.x = (axisXDOT*165)+210-radius
#joyCircle.y = (axisYDOT*165)+210-radius
sliderCircle.x = (axisZDOT*165)+210-radius
joyCircle.x = (xCircle*165)+210-radius
joyCircle.y = (yCircle*165)+210-radius
# Draw draggable red dots
#pygame.draw.circle(window_surface, RED, (joyCircle.x+radius,joyCircle.y+radius), radius)
pygame.draw.circle(window_surface, RED, (axisTestDot), radius)
pygame.draw.circle(window_surface, RED, (sliderCircle.x+radius,430), radius)
# TEST
#pygame.draw.circle(window_surface, GREEN, (axisX+radius,axisY+radius), radius)
# Draw boxes that bound red dots
#pygame.draw.rect(window_surface, [125,0,0], [30,30,360,360],width=3)
pygame.draw.rect(window_surface, [125,0,0], [30,400,360,60],width=3)
pygame.draw.circle(window_surface, [125,0,0], (210,210),180+(radius/2),width=3)
mouse = pygame.mouse.get_pos()
# Zoom In & Out button highlights
if 482 <= mouse[0] <= 482+56 and 262 <= mouse[1] <= 262+56:
pygame.draw.rect(window_surface,colour_light,[482,262,56,56])
else:
pygame.draw.rect(window_surface,colour_dark,[482,262,56,56])
if 482 <= mouse[0] <= 482+56 and 322 <= mouse[1] <= 322+56:
pygame.draw.rect(window_surface,colour_light,[482,322,56,56])
else:
pygame.draw.rect(window_surface,colour_dark,[482,322,56,56])
# Display Zoom In & Zoom Out text inside their buttons
window_surface.blit(zoomINtext, (500, 278))
window_surface.blit(zoomOUTtext, (491, 338))
pygame.display.update()
clk.tick(40) |
the-stack_0_6770 | from IPython.parallel import Client
from random import uniform
from simul import Particle
def scatter_gather(nparticles):
particles = [Particle(uniform(-1.0, 1.0),
uniform(-1.0, 1.0),
uniform(-1.0, 1.0)) for i in range(nparticles)]
rc = Client()
dview = rc[:]
dview.scatter('particle_chunk', particles).get()
dview.execute('from simul import ParticleSimulator')
dview.execute('simulator = ParticleSimulator(particle_chunk)')
dview.execute('simulator.evolve_cython(0.1)')
particles = dview.gather('particle_chunk', block=True)
|
the-stack_0_6771 | #!/usr/bin/python3
# coding=utf-8
# pylint: disable=I0011,E0401,W0702,W0703
# Copyright 2019 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scanner: safety
"""
import os
import subprocess
from dusty.tools import log
from dusty.models.module import DependentModuleModel
from dusty.models.scanner import ScannerModel
from .parser import parse_findings
class Scanner(DependentModuleModel, ScannerModel):
""" Scanner class """
def __init__(self, context):
""" Initialize scanner instance """
super().__init__()
self.context = context
self.config = \
self.context.config["scanners"][__name__.split(".")[-3]][__name__.split(".")[-2]]
def execute(self):
""" Run the scanner """
targets = self.config.get("requirements", "requirements.txt")
if isinstance(targets, str):
targets = [targets]
options = list()
for target in targets:
options.append("-r")
options.append(target)
task = subprocess.run(
["safety", "check", "--json"] + options,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
log.log_subprocess_result(task)
parse_findings(task.stdout.decode("utf-8", errors="ignore"), self)
# Save intermediates
self.save_intermediates(task.stdout)
def save_intermediates(self, task_stdout):
""" Save scanner intermediates """
if self.config.get("save_intermediates_to", None):
log.info("Saving intermediates")
base = os.path.join(self.config.get("save_intermediates_to"), __name__.split(".")[-2])
try:
# Make directory for artifacts
os.makedirs(base, mode=0o755, exist_ok=True)
# Save report
with open(os.path.join(base, "report.json"), "w") as report:
report.write(task_stdout.decode("utf-8", errors="ignore"))
except:
log.exception("Failed to save intermediates")
@staticmethod
def fill_config(data_obj):
""" Make sample config """
data_obj.insert(
len(data_obj), "requirements", "requirements.txt",
comment="path to requirements.txt (string or list of strings)"
)
data_obj.insert(
len(data_obj), "save_intermediates_to", "/data/intermediates/dast",
comment="(optional) Save scan intermediates (raw results, logs, ...)"
)
@staticmethod
def validate_config(config):
""" Validate config """
required = []
not_set = [item for item in required if item not in config]
if not_set:
error = f"Required configuration options not set: {', '.join(not_set)}"
log.error(error)
raise ValueError(error)
@staticmethod
def get_name():
""" Module name """
return "safety"
@staticmethod
def get_description():
""" Module description or help message """
return "Python dependency analyzer"
|
the-stack_0_6775 | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "config.settings.production.StaticRootS3Boto3Storage"
STATIC_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/"
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="OSchool <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[OSchool]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
|
the-stack_0_6776 | #Import required libraries
import os
import cv2
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from random import shuffle
from tensorflow import keras
import matplotlib.pyplot as plt
from tensorflow.keras import models, layers
#Github: https://github.com/sujitmandal
#This programe is create by Sujit Mandal
"""
Github: https://github.com/sujitmandal
This programe is create by Sujit Mandal
LinkedIn : https://www.linkedin.com/in/sujit-mandal-91215013a/
Facebook : https://www.facebook.com/sujit.mandal.33671748
Twitter : https://twitter.com/mandalsujit37
"""
#Read The Dataset
train_images = np.load('Dataset/64/train_images.npy')
train_labels = np.load('Dataset/64/train_labels.npy')
test_images = np.load('Dataset/64/test_images.npy')
test_labels = np.load('Dataset/64/test_labels.npy')
#Normalized
train_images = train_images / 255.0
test_images = test_images / 255.0
image_size = 64
#image_size = int(input('Enter The Image Size [32, 64, 128] :'))
EPOCHS = 20
#Convolutional Neural Network(CNN) building
def build_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(image_size, image_size, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
model = build_model()
ch_path = ('save/64/cp.ckpt')
cp_dir = os.path.dirname(ch_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(ch_path,
save_weights_only = True,
verbose = 1)
model = build_model()
#Train the model
history = model.fit(train_images, train_labels, epochs=EPOCHS,
validation_data=(test_images, test_labels), callbacks = [cp_callback])
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('Accuracy: {:5.2f}%'.format(100*test_acc))
model = build_model()
loss, acc = model.evaluate(test_images, test_labels)
print('restored model, accuracy: {:5.2f}%'.format(100*acc))
model.load_weights(ch_path)
loss, acc = model.evaluate(test_images, test_labels)
print('restored model, accuracy: {:5.2f}%'.format(100*acc))
ch_path_2 = ('save/64/cp-{epoch:04d}.ckpt')
cp_dir_2 = os.path.dirname(ch_path_2)
cp_callback_2 = tf.keras.callbacks.ModelCheckpoint(ch_path_2,
save_weights_only = True,
verbose = 1,
period = 5)
model = build_model()
#Train the model
history_2 = model.fit(train_images, train_labels,
epochs=EPOCHS,
validation_data=(test_images, test_labels),
callbacks = [cp_callback_2],
verbose = 0
)
latest_model = tf.train.latest_checkpoint(cp_dir_2)
#save
model.save_weights('./save/64/my_save')
#restore
model = build_model()
model.load_weights('./save/64/my_save')
loss, acc = model.evaluate(test_images, test_labels)
print('restored model, accuracy: {:5.2f}%'.format(100*acc))
model = build_model()
model.fit(train_images, train_labels, epochs=15)
#save entire model to a HDF5 file
model.save('saved model/64/my_model.h5')
new_model = keras.models.load_model('saved model/64/my_model.h5')
new_model.summary()
loss, acc = new_model.evaluate(test_images, test_labels)
print('restored model, accuracy: {:5.2f}%'.format(100*acc))
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('Final Model, accuracy: {:5.2f}%'.format(100*test_acc)) |
the-stack_0_6779 | from keras.models import model_from_json
from common import *
def save_model(json_file, weights_file, model):
with open(json_file, 'w') as model_file:
model_file.write(model.to_json())
model.save_weights(weights_file)
def load_model(json_file, weights_file):
with open(json_file, 'r') as jfile:
model = model_from_json(jfile.read())
model.load_weights(weights_file)
return model
def load_base_model(cut_index):
return load_model(
base_model_stem(cut_index) + '.json',
base_model_stem(cut_index) + '.h5')
|
the-stack_0_6781 | from datetime import datetime, timezone
from typing import Tuple
from dagster import EventMetadataEntry, Output, OutputDefinition, solid
def binary_search_nearest_left(get_value, start, end, min_target):
mid = (start + end) // 2
while start <= end:
mid = (start + end) // 2
mid_timestamp = get_value(mid)
if mid_timestamp == min_target:
return mid
elif mid_timestamp < min_target:
start = mid + 1
elif mid_timestamp > min_target:
end = mid - 1
if mid == end:
return end + 1
return start
def binary_search_nearest_right(get_value, start, end, max_target):
mid = (start + end) // 2
while start <= end:
mid = (start + end) // 2
mid_timestamp = get_value(mid)
if not mid_timestamp:
end = end - 1
if mid_timestamp == max_target:
return mid
elif mid_timestamp < max_target:
start = mid + 1
elif mid_timestamp > max_target:
end = mid - 1
if end == -1:
return None
if start > end:
return end
return end
def _id_range_for_time(start, end, hn_client):
start = datetime.timestamp(
datetime.strptime(start, "%Y-%m-%d %H:%M:%S").replace(tzinfo=timezone.utc)
)
end = datetime.timestamp(
datetime.strptime(end, "%Y-%m-%d %H:%M:%S").replace(tzinfo=timezone.utc)
)
def _get_item_timestamp(item_id):
item = hn_client.fetch_item_by_id(item_id)
return item["time"]
max_item_id = hn_client.fetch_max_item_id()
# declared by resource to allow testability against snapshot
min_item_id = hn_client.min_item_id()
start_id = binary_search_nearest_left(_get_item_timestamp, min_item_id, max_item_id, start)
end_id = binary_search_nearest_right(_get_item_timestamp, min_item_id, max_item_id, end)
start_timestamp = str(datetime.fromtimestamp(_get_item_timestamp(start_id), tz=timezone.utc))
end_timestamp = str(datetime.fromtimestamp(_get_item_timestamp(end_id), tz=timezone.utc))
metadata_entries = [
EventMetadataEntry.int(value=max_item_id, label="max_item_id"),
EventMetadataEntry.int(value=start_id, label="start_id"),
EventMetadataEntry.int(value=end_id, label="end_id"),
EventMetadataEntry.int(value=end_id - start_id, label="items"),
EventMetadataEntry.text(text=start_timestamp, label="start_timestamp"),
EventMetadataEntry.text(text=end_timestamp, label="end_timestamp"),
]
id_range = (start_id, end_id)
return id_range, metadata_entries
@solid(
required_resource_keys={"hn_client", "partition_start", "partition_end"},
output_defs=[
OutputDefinition(
Tuple[int, int],
description="The lower (inclusive) and upper (exclusive) ids that bound the range for the partition",
)
],
)
def id_range_for_time(context):
"""
For the configured time partition, searches for the range of ids that were created in that time.
"""
id_range, metadata_entries = _id_range_for_time(
context.resources.partition_start,
context.resources.partition_end,
context.resources.hn_client,
)
yield Output(id_range, metadata_entries=metadata_entries)
|
the-stack_0_6782 | """Support for the Netatmo binary sensors."""
import logging
from pyatmo import NoDevice
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import CONF_TIMEOUT
from homeassistant.helpers import config_validation as cv
from .const import DATA_NETATMO_AUTH
from . import CameraData
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
WELCOME_SENSOR_TYPES = {
"Someone known": "motion",
"Someone unknown": "motion",
"Motion": "motion",
}
PRESENCE_SENSOR_TYPES = {
"Outdoor motion": "motion",
"Outdoor human": "motion",
"Outdoor animal": "motion",
"Outdoor vehicle": "motion",
}
TAG_SENSOR_TYPES = {"Tag Vibration": "vibration", "Tag Open": "opening"}
CONF_HOME = "home"
CONF_CAMERAS = "cameras"
CONF_WELCOME_SENSORS = "welcome_sensors"
CONF_PRESENCE_SENSORS = "presence_sensors"
CONF_TAG_SENSORS = "tag_sensors"
DEFAULT_TIMEOUT = 90
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CAMERAS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOME): cv.string,
vol.Optional(
CONF_PRESENCE_SENSORS, default=list(PRESENCE_SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(PRESENCE_SENSOR_TYPES)]),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_WELCOME_SENSORS, default=list(WELCOME_SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(WELCOME_SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
auth = hass.data[DATA_NETATMO_AUTH]
try:
data = CameraData(hass, auth, home)
if not data.get_camera_names():
return None
except NoDevice:
return None
welcome_sensors = config.get(CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == "NACamera":
if CONF_CAMERAS in config:
if (
config[CONF_CAMERAS] != []
and camera_name not in config[CONF_CAMERAS]
):
continue
for variable in welcome_sensors:
add_entities(
[
NetatmoBinarySensor(
data,
camera_name,
module_name,
home,
timeout,
camera_type,
variable,
)
],
True,
)
if camera_type == "NOC":
if CONF_CAMERAS in config:
if (
config[CONF_CAMERAS] != []
and camera_name not in config[CONF_CAMERAS]
):
continue
for variable in presence_sensors:
add_entities(
[
NetatmoBinarySensor(
data,
camera_name,
module_name,
home,
timeout,
camera_type,
variable,
)
],
True,
)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_entities(
[
NetatmoBinarySensor(
data,
camera_name,
module_name,
home,
timeout,
camera_type,
variable,
)
],
True,
)
class NetatmoBinarySensor(BinarySensorDevice):
"""Represent a single binary sensor in a Netatmo Camera device."""
def __init__(
self, data, camera_name, module_name, home, timeout, camera_type, sensor
):
"""Set up for access to the Netatmo camera events."""
self._data = data
self._camera_name = camera_name
self._module_name = module_name
self._home = home
self._timeout = timeout
if home:
self._name = f"{home} / {camera_name}"
else:
self._name = camera_name
if module_name:
self._name += " / " + module_name
self._sensor_name = sensor
self._name += " " + sensor
self._cameratype = camera_type
self._state = None
@property
def name(self):
"""Return the name of the Netatmo device and this sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
if self._cameratype == "NACamera":
return WELCOME_SENSOR_TYPES.get(self._sensor_name)
if self._cameratype == "NOC":
return PRESENCE_SENSOR_TYPES.get(self._sensor_name)
return TAG_SENSOR_TYPES.get(self._sensor_name)
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the Netatmo API."""
self._data.update()
self._data.update_event()
if self._cameratype == "NACamera":
if self._sensor_name == "Someone known":
self._state = self._data.camera_data.someoneKnownSeen(
self._home, self._camera_name, self._timeout
)
elif self._sensor_name == "Someone unknown":
self._state = self._data.camera_data.someoneUnknownSeen(
self._home, self._camera_name, self._timeout
)
elif self._sensor_name == "Motion":
self._state = self._data.camera_data.motionDetected(
self._home, self._camera_name, self._timeout
)
elif self._cameratype == "NOC":
if self._sensor_name == "Outdoor motion":
self._state = self._data.camera_data.outdoormotionDetected(
self._home, self._camera_name, self._timeout
)
elif self._sensor_name == "Outdoor human":
self._state = self._data.camera_data.humanDetected(
self._home, self._camera_name, self._timeout
)
elif self._sensor_name == "Outdoor animal":
self._state = self._data.camera_data.animalDetected(
self._home, self._camera_name, self._timeout
)
elif self._sensor_name == "Outdoor vehicle":
self._state = self._data.camera_data.carDetected(
self._home, self._camera_name, self._timeout
)
if self._sensor_name == "Tag Vibration":
self._state = self._data.camera_data.moduleMotionDetected(
self._home, self._module_name, self._camera_name, self._timeout
)
elif self._sensor_name == "Tag Open":
self._state = self._data.camera_data.moduleOpened(
self._home, self._module_name, self._camera_name, self._timeout
)
|
the-stack_0_6783 | from . import shapes, sprite, clock
import time, math
class Sprite:
def __init__(self, window, x=0, y=0, direction=(0, 0), speed=(0, 0), images=[], image_num=0, color_pair=None, group=None):
self.window = window
self.x = x
self.y = y
self.direction = tuple(direction)
self.speed = tuple(speed)
if color_pair != None:
self.color_pair = tuple(color_pair)
else:
self.color_pair = color_pair
self.images = images
self.image_num = image_num
self.source = self.images[self.image_num].source
self.width = self.images[self.image_num].width
self.height = self.images[self.image_num].height
self.image = self.images[self.image_num].value
self.animation_clock = clock.Clock()
self.group = group
if type(self.group) == list:
self.group.append(self)
def check_bounds(self):
pass
def unrender(self):
for y in range(len(self.image)):
for x in range(len(self.image[y])):
if self.image[y][x] != " " and 0 <= math.floor(self.x) + x <= self.window.width - 2 and 0 <= math.floor(self.y) + y <= self.window.height - 2:
is_changed = not(self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][1:] == [self.window.char, self.window.color_pair])
if not is_changed:
is_changed = self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][0]
self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x] = [is_changed, self.window.char, self.window.color_pair]
def render(self):
for y in range(len(self.image)):
for x in range(len(self.image[y])):
if self.image[y][x] != " " and 0 <= math.floor(self.x) + x <= self.window.width - 2 and 0 <= math.floor(self.y) + y <= self.window.height - 2:
is_changed = not(self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][1:] == [self.image[y][x], self.color_pair])
if not is_changed:
is_changed = self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][0]
self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x] = [is_changed, self.image[y][x], self.color_pair]
def update(self, dt):
self.unrender()
self.x += self.direction[0] * self.speed[0] * dt
self.y += self.direction[1] * self.speed[1] * dt
self.check_bounds()
def animate(self, loop=True, fps=60):
if self.animation_clock.get_dt() >= 1 / fps:
if self.image_num == len(self.images):
if loop:
self.image_num = 0
else:
self.destroy()
return
self.unrender()
self.source = self.images[self.image_num].source
self.width = self.images[self.image_num].width
self.height = self.images[self.image_num].height
self.image = self.images[self.image_num].value
self.image_num += 1
self.animation_clock.update()
self.render()
def destroy(self):
self.unrender()
if self.group:
self.group.remove(self)
def check_group_collision(self, others):
for obj in others:
collided = self.is_collided_with(obj)
if not(collided is self) and collided:
return collided
def is_collided_with(self, other):
if (self.x < other.x + other.width and self.x + self.width > other.x) and (self.y < other.y + other.height and self.y + self.height > other.y) \
and (isinstance(other, shapes.Rect) or isinstance(other, sprite.Sprite)):
return other |
the-stack_0_6784 | import copy
import logging
import os
from absl import app
from absl import flags
import torch
from torch.nn.functional import cosine_similarity
from torch.optim import AdamW
from torch.utils.tensorboard import SummaryWriter
from torch_geometric.data import DataLoader
from torch_geometric.datasets import PPI
from tqdm import tqdm
from bgrl import *
log = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', None, 'Random seed.')
flags.DEFINE_integer('num_workers', 1, 'Number of CPU workers for dataloader.')
# Dataset.
flags.DEFINE_string('dataset_dir', './data', 'Where the dataset resides.')
# Architecture.
flags.DEFINE_integer('predictor_hidden_size', 4096, 'Hidden size of predictor.')
# Training hyperparameters.
flags.DEFINE_integer('steps', 10000, 'The number of training epochs.')
flags.DEFINE_integer('batch_size', 22, 'Number of graphs used in a batch.')
flags.DEFINE_float('lr', 0.02, 'The learning rate for model training.')
flags.DEFINE_float('weight_decay', 5e-4, 'The value of the weight decay.')
flags.DEFINE_float('mm', 0.99, 'The momentum for moving average.')
flags.DEFINE_integer('lr_warmup_steps', 1000, 'Warmup period for learning rate.')
# Augmentations.
flags.DEFINE_float('drop_edge_p_1', 0., 'Probability of edge dropout 1.')
flags.DEFINE_float('drop_feat_p_1', 0., 'Probability of node feature dropout 1.')
flags.DEFINE_float('drop_edge_p_2', 0., 'Probability of edge dropout 2.')
flags.DEFINE_float('drop_feat_p_2', 0., 'Probability of node feature dropout 2.')
# Logging and checkpoint.
flags.DEFINE_string('logdir', None, 'Where the checkpoint and logs are stored.')
flags.DEFINE_integer('log_steps', 10, 'Log information at every log_steps.')
# Evaluation
flags.DEFINE_integer('eval_steps', 2000, 'Evaluate every eval_epochs.')
def main(argv):
# use CUDA_VISIBLE_DEVICES to select gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
log.info('Using {} for training.'.format(device))
# set random seed
if FLAGS.seed is not None:
log.info('Random seed set to {}.'.format(FLAGS.seed))
set_random_seeds(random_seed=FLAGS.seed)
# create log directory
os.makedirs(FLAGS.logdir, exist_ok=True)
with open(os.path.join(FLAGS.logdir, 'config.cfg'), "w") as file:
file.write(FLAGS.flags_into_string()) # save config file
# setup tensorboard
writer = SummaryWriter(FLAGS.logdir)
# load data
train_dataset = PPI(FLAGS.dataset_dir, split='train')
val_dataset = PPI(FLAGS.dataset_dir, split='val')
test_dataset = PPI(FLAGS.dataset_dir, split='test')
log.info('Dataset {}, graph 0: {}.'.format(train_dataset.__class__.__name__, train_dataset[0]))
# train BGRL using both train and val splits
train_loader = DataLoader(ConcatDataset([train_dataset, val_dataset]), batch_size=FLAGS.batch_size, shuffle=True,
num_workers=FLAGS.num_workers)
# prepare transforms
transform_1 = get_graph_drop_transform(drop_edge_p=FLAGS.drop_edge_p_1, drop_feat_p=FLAGS.drop_feat_p_1)
transform_2 = get_graph_drop_transform(drop_edge_p=FLAGS.drop_edge_p_2, drop_feat_p=FLAGS.drop_feat_p_2)
# build networks
input_size, representation_size = train_dataset.num_node_features, 512
encoder = GraphSAGE_GCN(input_size, 512, 512)
predictor = MLP_Predictor(representation_size, representation_size, hidden_size=FLAGS.predictor_hidden_size)
model = BGRL(encoder, predictor).to(device)
# optimizer
optimizer = AdamW(model.trainable_parameters(), lr=0., weight_decay=FLAGS.weight_decay)
# scheduler
lr_scheduler = CosineDecayScheduler(FLAGS.lr, FLAGS.lr_warmup_steps, FLAGS.steps)
mm_scheduler = CosineDecayScheduler(1 - FLAGS.mm, 0, FLAGS.steps)
def train(data, step):
model.train()
# move data to gpu and transform
data = data.to(device)
x1, x2 = transform_1(data), transform_2(data)
# update learning rate
lr = lr_scheduler.get(step)
for g in optimizer.param_groups:
g['lr'] = lr
# update momentum
mm = 1 - mm_scheduler.get(step)
# forward
optimizer.zero_grad()
q1, y2 = model(x1, x2)
q2, y1 = model(x2, x1)
loss = 2 - cosine_similarity(q1, y2.detach(), dim=-1).mean() - cosine_similarity(q2, y1.detach(), dim=-1).mean()
loss.backward()
# update online network
optimizer.step()
# update target network
model.update_target_network(mm)
# log scalars
writer.add_scalar('params/lr', lr, step)
writer.add_scalar('params/mm', mm, step)
writer.add_scalar('train/loss', loss, step)
def eval(step):
tmp_encoder = copy.deepcopy(model.online_encoder).eval()
train_data = compute_representations(tmp_encoder, train_dataset, device)
val_data = compute_representations(tmp_encoder, val_dataset, device)
test_data = compute_representations(tmp_encoder, test_dataset, device)
val_f1, test_f1 = ppi_train_linear_layer(train_dataset.num_classes, train_data, val_data, test_data, device)
writer.add_scalar('accuracy/val', val_f1, step)
writer.add_scalar('accuracy/test', test_f1, step)
train_iter = iter(train_loader)
for step in tqdm(range(1, FLAGS.steps + 1)):
data = next(train_iter, None)
if data is None:
train_iter = iter(train_loader)
data = next(train_iter, None)
train(data, step)
if step % FLAGS.eval_steps == 0:
eval(step)
# save encoder weights
torch.save({'model': model.online_encoder.state_dict()}, os.path.join(FLAGS.logdir, 'bgrl-wikics.pt'))
if __name__ == "__main__":
log.info('PyTorch version: %s' % torch.__version__)
app.run(main)
|
the-stack_0_6785 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
import codecs
import functools
import glob
import gzip
import itertools
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.
```
config = datasets.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
datasets.Split.TRAIN: ["commoncrawl_frde"],
datasets.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = datasets.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset:
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, str) else path
self._urls = (url,) if isinstance(url, str) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="https://huggingface.co/datasets/wmt/wmt16/resolve/main/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="https://huggingface.co/datasets/wmt/europarl/resolve/main/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="https://huggingface.co/datasets/wmt/wmt10/resolve/main/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="https://huggingface.co/datasets/wmt/wmt14/resolve/main/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="https://huggingface.co/datasets/wmt/wmt16/resolve/main/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="https://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="https://huggingface.co/datasets/wmt/wmt14/resolve/main/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="https://huggingface.co/datasets/wmt/wikititles/resolve/main/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:[email protected]/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(datasets.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `datasets.features.text.TextEncoder` used for the
`datasets.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, datasets.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logger.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"translation": datasets.features.Translation(languages=self.config.language_pair)}
),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logger.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language)
return [
datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logger.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logger.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path, encoding="utf-8") as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path, encoding="utf-8") as f:
fr_sentences = f.read().split("\n")
with open(de_path, encoding="utf-8") as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path, encoding="utf-8") as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path, encoding="utf-8") as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path, encoding="utf-8") as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path, encoding="utf-8") as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logger.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
the-stack_0_6786 | import os.path
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='wp_iso3166',
version="0.1",
author="original author, Mike Spindel",
author_email="-",
license="MIT",
keywords="iso 3166-1 country codes WorldPop",
url="https://github.com/vesnikos/wp_iso3166",
description='Self-contained ISO 3166-1 country definitions.',
packages=find_packages(exclude=['ez_setup']),
long_description=read('README.rst'),
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
])
|
the-stack_0_6787 | # Rewritten by RayzoR
import sys
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
from com.l2jfrozen.gameserver.model.base import Race
qn = "236_SeedsOfChaos"
DROP_RATE = 20
#prerequisites:
STAR_OF_DESTINY = 5011
#Quest items
BLACK_ECHO_CRYSTAL = 9745
SHINING_MEDALLION = 9743
#How many of each do you need?
NEEDED = {
BLACK_ECHO_CRYSTAL: 1,
SHINING_MEDALLION: 62
}
SCROLL_ENCHANT_WEAPON_A = 729
#NPCs
KEKROPUS,WIZARD,KATENAR,ROCK,HARKILGAMED,MAO,RODENPICULA,NORNIL = 32138,31522,32235,32238,32334,32190,32237,32239
#Mobs
NEEDLE_STAKATO_DRONE = [21516,21517]
SPLENDOR_MOBS = [21520,21521,21522,21523,21524,21525,21526,21527,21528,21529,21530,21531,21532,21533,21534,21535,21536,21537,21538,21539,21540,21541]
#Mobs, cond, Drop
DROPLIST = {
#Needle Stakato Drones
21516: [2,BLACK_ECHO_CRYSTAL],
21517: [2,BLACK_ECHO_CRYSTAL],
#Splendor Mobs
21520: [12,SHINING_MEDALLION],
21521: [12,SHINING_MEDALLION],
21522: [12,SHINING_MEDALLION],
21523: [12,SHINING_MEDALLION],
21524: [12,SHINING_MEDALLION],
21525: [12,SHINING_MEDALLION],
21526: [12,SHINING_MEDALLION],
21527: [12,SHINING_MEDALLION],
21528: [12,SHINING_MEDALLION],
21529: [12,SHINING_MEDALLION],
21530: [12,SHINING_MEDALLION],
21531: [12,SHINING_MEDALLION],
21532: [12,SHINING_MEDALLION],
21533: [12,SHINING_MEDALLION],
21534: [12,SHINING_MEDALLION],
21535: [12,SHINING_MEDALLION],
21536: [12,SHINING_MEDALLION],
21537: [12,SHINING_MEDALLION],
21538: [12,SHINING_MEDALLION],
21539: [12,SHINING_MEDALLION],
21540: [12,SHINING_MEDALLION],
21541: [12,SHINING_MEDALLION]
}
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.katenar = self.harkil = 0
self.questItemId = [BLACK_ECHO_CRYSTAL, SHINING_MEDALLION]
def onEvent (self,event,st) :
if event == "1" : #Go talk to the wizard!
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
htmltext = "32138_02b.htm"
elif event == "1_yes" : #Ok, know about those Stakato Drones?
htmltext = "31522_01c.htm"
elif event == "1_no" : #You suck. Come back when you want to talk
htmltext = "31522_01no.htm"
elif event == "2" : #Get me the crystal
st.set("cond","2")
htmltext = "31522_02.htm"
elif event == "31522_03b" :
st.takeItems(BLACK_ECHO_CRYSTAL,-1)
htmltext = event + ".htm"
elif event == "4" : #Time to summon this bad boy
st.set("cond","4")
if not self.katenar :
st.addSpawn(KATENAR,120000)
self.katenar = 1
st.startQuestTimer("Despawn_Katenar",120000)
return
elif event == "5" : #gotta go. talk to Harkilgamed
st.set("cond","5")
htmltext = "32235_02.htm"
elif event == "spawn_harkil" : #talk to the rock, this spawns Harkilgamed
if not self.harkil :
st.addSpawn(HARKILGAMED,120000)
self.hark = 1
st.startQuestTimer("Despawn_Harkil",120000)
return
elif event == "6" : #now go hunt splendor mobs
st.set("cond","12")
htmltext = "32236_06.htm"
elif event == "8" : #go back to Kekropus
st.set("cond","14")
htmltext = "32236_08.htm"
elif event == "9" : #Go talk to Mao, no not the dictator Mao... the Vice Hierarch Mao. <_<
st.set("cond","15")
htmltext = "32138_09.htm"
elif event == "10" : #This is where you can find Rodenpicula.
st.set("cond","16")
st.getPlayer().teleToLocation(-119534,87176,-12593)
htmltext = "32190_02.htm"
elif event == "11" : #Go talk to Mother Nornil now
st.set("cond","17")
htmltext = "32237_11.htm"
elif event == "12" : #Get Rodenpicula's permission
st.set("cond","18")
htmltext = "32239_12.htm"
elif event == "13" : #LETS DO THIS!!
st.set("cond","19")
htmltext = "32237_13.htm"
elif event == "14" : #LEROOY JENKINS!!!! Finish the quest at Rodenpicula
st.set("cond","20")
htmltext = "32239_14.htm"
elif event == "15" : #done done done!!!
st.giveItems(SCROLL_ENCHANT_WEAPON_A,1)
st.setState(COMPLETED)
htmltext = "32237_15.htm"
elif event == "Despawn_Katenar" :
self.katenar = 0
return
elif event == "Despawn_Harkil" :
self.harkil = 0
return
else :
htmltext = event + ".htm" #this is for having to go through pages upon pages of html text... <_<
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond = st.getInt("cond")
if npcId == KEKROPUS :
if id == CREATED :
st.set("cond","0")
if player.getRace() != Race.Kamael :
st.exitQuest(1)
htmltext = "<html><body>I'm sorry, but I can only give this quest to Kamael. Talk to Magister Ladd.</body></html>"
elif player.getLevel() < 75 :
st.exitQuest(1)
htmltext = "32138_01.htm" #not qualified
elif not st.getQuestItemsCount(STAR_OF_DESTINY) :
st.exitQuest(1)
htmltext = "32138_01a.htm" #not qualified
else :
htmltext = "32138_02.htm" # Successful start: Talk to me a few times,
elif id == STARTED :
if cond < 14 :
htmltext = "32138_02c.htm"
elif cond == 14:
htmltext = "32138_08.htm"
else :
htmltext = "<html><body>Kekropus:<br>Go talk to Rodenpicula. Mao can help you get to her.</body></html>"
elif id == COMPLETED :
htmltext = "<html><body>You have already completed this quest.</body></html>"
elif npcId == WIZARD and id == STARTED:
# first time talking to Wizard. Talk a bit
if cond==1 :
htmltext = "31522_01.htm"
# Why are you back alraedy? You don't have the echo crystal
elif cond==2 :
htmltext = "31522_02a.htm" # you haven't gotten the crystal yet?
# aha! Here is the black echo crystal! Now where's that one chap?
elif cond == 3 or (cond == 4 and not self.katenar) :
htmltext = "31522_03.htm" # ah yes. Now you get to talk to this guy that I will soon summon
else :
htmltext = "31522_04.htm" #shouldn't you be talking to Katenar?
elif npcId == KATENAR and id == STARTED:
if cond == 4:
htmltext = "32235_01.htm"
elif cond >= 5:
htmltext = "32235_02.htm"
elif npcId == ROCK and id == STARTED:
if cond == 5 or cond == 13:
htmltext = "32238.htm" #click the link to spawn Harkilgamed
else:
htmltext = "<html><body>A strange rock...</body></html>"
elif npcId == HARKILGAMED and id == STARTED:
if cond == 5:
htmltext = "32236_05.htm" #First time talking to Harkilgamed
elif cond == 12:
htmltext = "32236_06.htm" #Kill the Splendor mobs, bring back 62 Shining Medallions
elif cond == 13:
st.takeItems(SHINING_MEDALLION,-1)
htmltext = "32236_07.htm"
elif cond > 13:
htmltext = "<html><body>Harkilgamed:<br><br>Go talk to Kekropus already.</body></html>"
elif npcId == MAO and id == STARTED: #Ok. The deal with Mao is that he's supposed to port you to Mother Nornil, but since she's not yet in the spawnlist, he's just gonna tell ya where to find her.
#THIS MEANS: WHOEVER SPAWNS NORNIL AND RODENPICULA MUST WRITE THE FOLLOWING .htm FILE ACCORDINGLY
if cond == 15 or cond == 16:
htmltext = "32190_01.htm"
elif npcId == RODENPICULA and id==STARTED:
if cond == 16:
htmltext = "32237_10.htm" #heys. long talk, figure stuff out
elif cond == 17:
htmltext = "32237_11.htm" #talk to nornil already
elif cond == 18:
htmltext = "32237_12.htm" #you want approval
elif cond == 19:
htmltext = "32237_13.htm" #here's approval, talk to her
elif cond == 20:
htmltext = "32237_14.htm" #congrats. here's a scroll
elif npcId == NORNIL and id==STARTED:
if cond == 17:
htmltext = "32239_11.htm" #yo. get rodenpicula's approval
elif cond == 18:
htmltext = "32239_12.htm" #i need rodenpicula's approval
elif cond == 19:
htmltext = "32239_13.htm" #lets get it over with
elif cond == 20:
htmltext = "32239_14.htm" #you're good. talk to roden one more time
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
#The following algorithm should work for both quest mobs and drops for this quest.... hopefully.
npcId = npc.getNpcId()
dropcond, item = DROPLIST[npcId]
drop = st.getRandom(100)
cond = st.getInt("cond")
if drop < DROP_RATE and cond == dropcond :
if st.getQuestItemsCount(item) < NEEDED[item] :
st.giveItems(item,1)
st.playSound("ItemSound.quest_itemget")
if st.getQuestItemsCount(item) == NEEDED[item]:
st.set("cond",str(cond+1))
return
QUEST = Quest(236,qn,"Seeds of Chaos")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(KEKROPUS)
QUEST.addTalkId(KEKROPUS)
QUEST.addTalkId(WIZARD)
QUEST.addTalkId(KATENAR)
QUEST.addTalkId(ROCK)
QUEST.addTalkId(HARKILGAMED)
QUEST.addTalkId(MAO)
QUEST.addTalkId(RODENPICULA)
QUEST.addTalkId(NORNIL)
for i in DROPLIST.keys():
QUEST.addKillId(i)
|
the-stack_0_6788 | # -*- coding: utf-8 -*-
from os.path import join
from os.path import dirname
from os.path import isfile
class Template(object):
SUPPORTED_METHODS = {}
TEMPLATES = {}
def __init__(self, estimator, target_language='java',
target_method='predict', **kwargs):
# pylint: disable=unused-argument
self.target_language = str(target_language)
self.target_method = str(target_method)
# Default settings:
self.class_name = 'Brain'
self.method_name = 'predict'
self._num_format = lambda x: str(x)
self.use_file = False
def indent(self, text, n_indents=1, skipping=False):
"""
Indent text with single spaces.
Parameters
----------
:param text : string
The text which get a specific indentation.
:param n_indents : int, default: 1
The number of indentations.
:param skipping : boolean, default: False
Whether to skip the initial indentation.
Returns
-------
return : string
The indented text.
"""
lines = text.splitlines()
space = self.TEMPLATES.get(self.target_language).get('indent', ' ')
# Single line:
if len(lines) == 1:
if skipping:
return text.strip()
return n_indents * space + text.strip()
# Multiple lines:
indented_lines = []
for idx, line in enumerate(lines):
if skipping and idx is 0:
indented_lines.append(line)
else:
line = n_indents * space + line
indented_lines.append(line)
indented_text = '\n'.join(indented_lines)
return indented_text
def temp(self, name, templates=None, n_indents=None, skipping=False):
"""
Get specific template of chosen
programming language.
Parameters
----------
:param param name : string
The key name of the template.
:param param templates : string, default: None
The template with placeholders.
:param param n_indents : int, default: None
The number of indentations.
:param param skipping : bool, default: False
Whether to skip the initial indentation.
Returns
-------
return : string
The wanted template string.
"""
if templates is None:
templates = self.TEMPLATES.get(self.target_language)
keys = name.split('.')
key = keys.pop(0).lower()
template = templates.get(key, None)
if template is not None:
if isinstance(template, str):
if n_indents is not None:
template = self.indent(template, n_indents, skipping)
return template
else:
keys = '.'.join(keys)
return self.temp(keys, template, skipping=False)
else:
class_name = self.__class__.__name__
estimator_type = getattr(self, 'estimator_type') if \
hasattr(self, 'estimator_type') else 'classifier'
path = join(dirname(__file__), 'estimator',
estimator_type, class_name, 'templates',
self.target_language, name + '.txt')
if isfile(path):
with open(path, 'r') as file_:
template = file_.read()
if n_indents is not None:
template = self.indent(template, n_indents, skipping)
return template
else:
err = "Template '{}' wasn't found.".format(name)
raise AttributeError(err)
def repr(self, value):
return self._num_format(value)
def data(self, dict_):
copy = self.__dict__.copy()
copy.update(dict_) # update and extend dictionary
return copy
|
the-stack_0_6790 | #! /usr/bin/env python
import numpy as np
import sys
sys.path.append("spnet/")
sys.path.append("../spnet/")
from diagnostics import compute_iou
def test_compute_iou():
# make up two ellipes
Y_true = (100, 140, 120, 60, 90, 0, 10.3) # ellipse a
Y_pred = (120, 123, 120, 60, 149.97, 0, 7.8) # ellips b
#iou = evaluate_spnet.compute_iou(Y_true, Y_pred)
iou = compute_iou(Y_true, Y_pred)
np.testing.assert_equal(iou, 0.44227983107795693) # force an assertion for the test
return iou
if __name__ == '__main__':
# current setup: testing a couple pre-defined ellipses
iou = test_compute_iou()
print("IOU score = ",iou)
|
the-stack_0_6794 | from app.helpers.cache import CacheExpiresAfter
from app.helpers.units import Units
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
def handle_cache_expires_after():
try:
if CacheExpiresAfter(config["cache"]["cache_expires_after"]) is CacheExpiresAfter.DISABLE:
return CacheExpiresAfter.DISABLE
if CacheExpiresAfter(config["cache"]["cache_expires_after"]) is CacheExpiresAfter.TODAY:
return CacheExpiresAfter.TODAY
except:
return int(config["cache"]["cache_expires_after"])
class BaseConfigError(Exception):
pass
class BaseConfig:
OPEN_WEATHER_MAP_API_KEY = config["api"]["open_weather_map_key"]
LATITUDE = config["general"]["latitude"]
LONGITUDE = config["general"]["longitude"]
CACHE_EXPIRES_AFTER = handle_cache_expires_after()
DEFAULT_BASE_UNITS = Units.METRIC
BASE_UNITS = DEFAULT_BASE_UNITS if config["general"]["base_units"] is None else Units(
config["general"]["base_units"])
DEFAULT_LANGUAGE = "en"
LANGUAGE = DEFAULT_LANGUAGE if config["general"]["language"] is None else config["general"]["language"]
MEMCACHED_SERVER = config["cache"]["memcached"]
if BaseConfig.OPEN_WEATHER_MAP_API_KEY is None:
raise BaseConfigError("OPEN_WEATHER_MAP_API_KEY is missing")
|
the-stack_0_6795 | """Tests for the main tournament class."""
import csv
import logging
from multiprocessing import Queue, cpu_count
import unittest
import warnings
from hypothesis import given, example, settings
from hypothesis.strategies import integers, floats
from axelrod.tests.property import (tournaments,
prob_end_tournaments,
spatial_tournaments,
strategy_lists)
import axelrod
try:
# Python 3
from unittest.mock import MagicMock
except ImportError:
# Python 2
from mock import MagicMock
test_strategies = [axelrod.Cooperator,
axelrod.TitForTat,
axelrod.Defector,
axelrod.Grudger,
axelrod.GoByMajority]
test_repetitions = 5
test_turns = 100
test_prob_end = .5
test_edges = [(0, 1), (1, 2), (3, 4)]
deterministic_strategies = [s for s in axelrod.strategies
if not s().classifier['stochastic']]
class TestTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = 'test'
cls.test_repetitions = test_repetitions
cls.test_turns = test_turns
cls.expected_payoff = [
[600, 600, 0, 600, 600],
[600, 600, 199, 600, 600],
[1000, 204, 200, 204, 204],
[600, 600, 199, 600, 600],
[600, 600, 199, 600, 600]]
cls.expected_cooperation = [
[200, 200, 200, 200, 200],
[200, 200, 1, 200, 200],
[0, 0, 0, 0, 0],
[200, 200, 1, 200, 200],
[200, 200, 1, 200, 200]]
cls.filename = "test_outputs/test_tournament.csv"
def test_init(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=self.test_turns,
noise=0.2)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertIsInstance(
tournament.players[0].match_attributes['game'], axelrod.Game
)
self.assertEqual(tournament.game.score(('C', 'C')), (3, 3))
self.assertEqual(tournament.turns, self.test_turns)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, 'test')
self.assertTrue(tournament._with_morality)
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, 'axelrod')
def test_warning(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=10,
repetitions=1)
with warnings.catch_warnings(record=True) as w:
# Check that a warning is raised if no results set is built and no
# filename given
results = tournament.play(build_results=False, progress_bar=False)
self.assertEqual(len(w), 1)
with warnings.catch_warnings(record=True) as w:
# Check that no warning is raised if no results set is built and a
# is filename given
tournament.play(build_results=False,
filename=self.filename, progress_bar=False)
self.assertEqual(len(w), 0)
def test_serial_play(self):
# Test that we get an instance of ResultSet
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
# Test that _run_serial_repetitions is called with empty matches list
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play(progress_bar=False)
self.assertEqual(tournament.num_interactions, 75)
def test_serial_play_with_different_game(self):
# Test that a non default game is passed to the result set
game = axelrod.Game(p=-1, r=-1, s=-1, t=-1)
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=game,
turns=1,
repetitions=1)
results = tournament.play(progress_bar=False)
self.assertEqual(results.game.RPST(), (-1, -1, -1, -1))
def test_no_progress_bar_play(self):
"""Test that progress bar is not created for progress_bar=False"""
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
# Test with build results
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
# Check that no progress bar was created
call_progress_bar = lambda: tournament.progress_bar.total
self.assertRaises(AttributeError, call_progress_bar)
# Test without build results
results = tournament.play(progress_bar=False, build_results=False,
filename=self.filename)
self.assertIsNone(results)
results = axelrod.ResultSetFromFile(self.filename)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertRaises(AttributeError, call_progress_bar)
def test_progress_bar_play(self):
"""Test that progress bar is created by default and with True argument"""
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play()
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(tournament.progress_bar.total, 15)
results = tournament.play(progress_bar=True)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(tournament.progress_bar.total, 15)
# Test without build results
results = tournament.play(progress_bar=True, build_results=False,
filename=self.filename)
self.assertIsNone(results)
results = axelrod.ResultSetFromFile(self.filename)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(tournament.progress_bar.total, 15)
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
def test_progress_bar_play_parallel(self):
"""Test that tournament plays when asking for progress bar for parallel
tournament"""
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play(processes=2)
self.assertIsInstance(results, axelrod.ResultSet)
results = tournament.play(progress_bar=True)
self.assertIsInstance(results, axelrod.ResultSet)
@given(tournament=tournaments(min_size=2, max_size=5, min_turns=2,
max_turns=50, min_repetitions=2,
max_repetitions=4))
@settings(max_examples=50, timeout=0)
@example(tournament=axelrod.Tournament(players=[s() for s in
test_strategies], turns=test_turns, repetitions=test_repetitions)
)
# These two examples are to make sure #465 is fixed.
# As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465,
# these two examples were identified by hypothesis.
@example(tournament=
axelrod.Tournament(players=[axelrod.BackStabber(),
axelrod.MindReader()],
turns=2, repetitions=1),
)
@example(tournament=
axelrod.Tournament(players=[axelrod.BackStabber(),
axelrod.ThueMorse()],
turns=2, repetitions=1),
)
def test_property_serial_play(self, tournament):
"""Test serial play using hypothesis"""
# Test that we get an instance of ResultSet
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(results.nplayers, len(tournament.players))
self.assertEqual(results.players, [str(p) for p in tournament.players])
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
def test_parallel_play(self):
# Test that we get an instance of ResultSet
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play(processes=2, progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(tournament.num_interactions, 75)
# The following relates to #516
players = [axelrod.Cooperator(), axelrod.Defector(),
axelrod.BackStabber(), axelrod.PSOGambler(),
axelrod.ThueMorse(), axelrod.DoubleCrosser()]
tournament = axelrod.Tournament(
name=self.test_name,
players=players,
game=self.game,
turns=20,
repetitions=self.test_repetitions)
scores = tournament.play(processes=2, progress_bar=False).scores
self.assertEqual(len(scores), len(players))
def test_run_serial(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
tournament._write_interactions = MagicMock(
name='_write_interactions')
self.assertTrue(tournament._run_serial())
# Get the calls made to write_interactions
calls = tournament._write_interactions.call_args_list
self.assertEqual(len(calls), 15)
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
def test_run_parallel(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
tournament._write_interactions = MagicMock(
name='_write_interactions')
self.assertTrue(tournament._run_parallel())
# Get the calls made to write_interactions
calls = tournament._write_interactions.call_args_list
self.assertEqual(len(calls), 15)
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
def test_n_workers(self):
max_processes = cpu_count()
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
self.assertEqual(tournament._n_workers(processes=1), max_processes)
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
self.assertEqual(tournament._n_workers(processes=max_processes+2),
max_processes)
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
@unittest.skipIf(
cpu_count() < 2,
"not supported on single processor machines")
def test_2_workers(self):
# This is a separate test with a skip condition because we
# cannot guarantee that the tests will always run on a machine
# with more than one processor
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,)
self.assertEqual(tournament._n_workers(processes=2), 2)
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
def test_start_workers(self):
workers = 2
work_queue = Queue()
done_queue = Queue()
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
chunks = tournament.match_generator.build_match_chunks()
for chunk in chunks:
work_queue.put(chunk)
tournament._start_workers(workers, work_queue, done_queue)
stops = 0
while stops < workers:
payoffs = done_queue.get()
if payoffs == 'STOP':
stops += 1
self.assertEqual(stops, workers)
@unittest.skipIf(axelrod.on_windows,
"Parallel processing not supported on Windows")
def test_worker(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
work_queue = Queue()
chunks = tournament.match_generator.build_match_chunks()
count = 0
for chunk in chunks:
work_queue.put(chunk)
count += 1
work_queue.put('STOP')
done_queue = Queue()
tournament._worker(work_queue, done_queue)
for r in range(count):
new_matches = done_queue.get()
for index_pair, matches in new_matches.items():
self.assertIsInstance(index_pair, tuple)
self.assertEqual(len(matches), self.test_repetitions)
queue_stop = done_queue.get()
self.assertEqual(queue_stop, 'STOP')
def test_build_result_set(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
# Test in memory
results = tournament.play(progress_bar=False, in_memory=True)
self.assertIsInstance(results, axelrod.ResultSet)
def test_no_build_result_set(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play(build_results=False, filename=self.filename,
progress_bar=False)
self.assertIsNone(results)
# Checking that results were written properly
results = axelrod.ResultSetFromFile(self.filename)
self.assertIsInstance(results, axelrod.ResultSet)
@given(turns=integers(min_value=1, max_value=200))
@example(turns=3)
@example(turns=200)
def test_play_matches(self, turns):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
repetitions=self.test_repetitions)
def make_chunk_generator():
for player1_index in range(len(self.players)):
for player2_index in range(player1_index, len(self.players)):
index_pair = (player1_index, player2_index)
match_params = (turns, self.game, None, 0)
yield (index_pair, match_params, self.test_repetitions)
chunk_generator = make_chunk_generator()
interactions = {}
for chunk in chunk_generator:
result = tournament._play_matches(chunk)
for index_pair, inters in result.items():
try:
interactions[index_pair].append(inters)
except KeyError:
interactions[index_pair] = [inters]
self.assertEqual(len(interactions), 15)
for index_pair, inter in interactions.items():
self.assertEqual(len(index_pair), 2)
for plays in inter:
# Check that have the expected number of repetitions
self.assertEqual(len(plays), self.test_repetitions)
for repetition in plays:
# Check that have the correct length for each rep
self.assertEqual(len(repetition), turns)
# Check that matches no longer exist
self.assertEqual((len(list(chunk_generator))), 0)
def test_write_interactions(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2)
tournament._write_interactions = MagicMock(
name='_write_interactions')
tournament._build_result_set = MagicMock(
name='_build_result_set') # Mocking this as it is called by play
self.assertTrue(tournament.play(filename=self.filename,
progress_bar=False))
tournament.outputfile.close() # This is normally closed by `build_result_set`
# Get the calls made to write_interactions
calls = tournament._write_interactions.call_args_list
self.assertEqual(len(calls), 15)
# Test when runnning in memory
tournament._write_interactions = MagicMock(
name='_write_interactions')
self.assertTrue(tournament.play(filename=self.filename,
progress_bar=False,
in_memory=True))
# Get the calls made to write_interactions
calls = tournament._write_interactions.call_args_list
self.assertEqual(len(calls), 15)
def test_write_to_csv(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2)
tournament.play(filename=self.filename, progress_bar=False)
with open(self.filename, 'r') as f:
written_data = [[int(r[0]), int(r[1])] + r[2:] for r in csv.reader(f)]
expected_data = [[0, 1, 'Cooperator', 'Tit For Tat', 'CC', 'CC'],
[0, 1, 'Cooperator', 'Tit For Tat', 'CC', 'CC'],
[1, 2, 'Tit For Tat', 'Defector', 'CD', 'DD'],
[1, 2, 'Tit For Tat', 'Defector', 'CD', 'DD'],
[0, 0, 'Cooperator', 'Cooperator', 'CC', 'CC'],
[0, 0, 'Cooperator', 'Cooperator', 'CC', 'CC'],
[3, 3, 'Grudger', 'Grudger', 'CC', 'CC'],
[3, 3, 'Grudger', 'Grudger', 'CC', 'CC'],
[2, 2, 'Defector', 'Defector', 'DD', 'DD'],
[2, 2, 'Defector', 'Defector', 'DD', 'DD'],
[4, 4, 'Soft Go By Majority', 'Soft Go By Majority', 'CC', 'CC'],
[4, 4, 'Soft Go By Majority', 'Soft Go By Majority', 'CC', 'CC'],
[1, 4, 'Tit For Tat', 'Soft Go By Majority', 'CC', 'CC'],
[1, 4, 'Tit For Tat', 'Soft Go By Majority', 'CC', 'CC'],
[1, 1, 'Tit For Tat', 'Tit For Tat', 'CC', 'CC'],
[1, 1, 'Tit For Tat', 'Tit For Tat', 'CC', 'CC'],
[1, 3, 'Tit For Tat', 'Grudger', 'CC', 'CC'],
[1, 3, 'Tit For Tat', 'Grudger', 'CC', 'CC'],
[2, 3, 'Defector', 'Grudger', 'DD', 'CD'],
[2, 3, 'Defector', 'Grudger', 'DD', 'CD'],
[0, 4, 'Cooperator', 'Soft Go By Majority', 'CC', 'CC'],
[0, 4, 'Cooperator', 'Soft Go By Majority', 'CC', 'CC'],
[2, 4, 'Defector', 'Soft Go By Majority', 'DD', 'CD'],
[2, 4, 'Defector', 'Soft Go By Majority', 'DD', 'CD'],
[0, 3, 'Cooperator', 'Grudger', 'CC', 'CC'],
[0, 3, 'Cooperator', 'Grudger', 'CC', 'CC'],
[3, 4, 'Grudger', 'Soft Go By Majority', 'CC', 'CC'],
[3, 4, 'Grudger', 'Soft Go By Majority', 'CC', 'CC'],
[0, 2, 'Cooperator', 'Defector', 'CC', 'DD'],
[0, 2, 'Cooperator', 'Defector', 'CC', 'DD']]
self.assertEqual(sorted(written_data), sorted(expected_data))
class TestProbEndTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = 'test'
cls.test_repetitions = test_repetitions
cls.test_prob_end = test_prob_end
def test_init(self):
tournament = axelrod.ProbEndTournament(
name=self.test_name,
players=self.players,
game=self.game,
prob_end=self.test_prob_end,
noise=0.2)
self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score(('C', 'C')), (3, 3))
self.assertEqual(tournament.turns, float("inf"))
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, 'test')
self.assertTrue(tournament._with_morality)
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, 'axelrod')
@given(tournament=prob_end_tournaments(min_size=2, max_size=5,
min_prob_end=.1,
max_prob_end=.9,
min_repetitions=2,
max_repetitions=4))
@settings(max_examples=50, timeout=0)
@example(tournament=
axelrod.ProbEndTournament(players=[s() for s in test_strategies],
prob_end=.2, repetitions=test_repetitions)
)
# These two examples are to make sure #465 is fixed.
# As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465,
# these two examples were identified by hypothesis.
@example(tournament=
axelrod.ProbEndTournament(players=[axelrod.BackStabber(),
axelrod.MindReader()],
prob_end=.2, repetitions=1))
@example(tournament=
axelrod.ProbEndTournament(players=[axelrod.ThueMorse(),
axelrod.MindReader()],
prob_end=.2, repetitions=1))
def test_property_serial_play(self, tournament):
"""Test serial play using hypothesis"""
# Test that we get an instance of ResultSet
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(results.nplayers, len(tournament.players))
self.assertEqual(results.players, [str(p) for p in tournament.players])
class TestSpatialTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = 'test'
cls.test_repetitions = test_repetitions
cls.test_turns = test_turns
cls.test_edges = test_edges
def test_init(self):
tournament = axelrod.SpatialTournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=self.test_turns,
edges=self.test_edges,
noise=0.2)
self.assertEqual(tournament.match_generator.edges, tournament.edges)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score(('C', 'C')), (3, 3))
self.assertEqual(tournament.turns, 100)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, 'test')
self.assertTrue(tournament._with_morality)
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
self.assertEqual(tournament.match_generator.noise, 0.2)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, 'axelrod')
@given(strategies=strategy_lists(strategies=deterministic_strategies,
min_size=2, max_size=2),
turns=integers(min_value=1, max_value=20),
repetitions=integers(min_value=1, max_value=5),
noise=floats(min_value=0, max_value=1),
seed=integers(min_value=0, max_value=4294967295))
@settings(max_examples=50, timeout=0)
def test_complete_tournament(self, strategies, turns, repetitions,
noise, seed):
"""
A test to check that a spatial tournament on the complete multigraph
gives the same results as the round robin.
"""
players = [s() for s in strategies]
# edges
edges = []
for i in range(0, len(players)):
for j in range(i, len(players)):
edges.append((i, j))
# create a round robin tournament
tournament = axelrod.Tournament(players, repetitions=repetitions,
turns=turns, noise=noise)
# create a complete spatial tournament
spatial_tournament = axelrod.SpatialTournament(players,
repetitions=repetitions,
turns=turns,
noise=noise,
edges=edges)
axelrod.seed(seed)
results = tournament.play(progress_bar=False)
axelrod.seed(seed)
spatial_results = spatial_tournament.play(progress_bar=False)
self.assertEqual(results.ranked_names, spatial_results.ranked_names)
self.assertEqual(results.nplayers, spatial_results.nplayers)
self.assertEqual(results.nrepetitions, spatial_results.nrepetitions)
self.assertEqual(results.payoff_diffs_means,
spatial_results.payoff_diffs_means)
self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix)
self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs)
self.assertEqual(results.payoffs, spatial_results.payoffs)
self.assertEqual(results.cooperating_rating,
spatial_results.cooperating_rating)
self.assertEqual(results.cooperation, spatial_results.cooperation)
self.assertEqual(results.normalised_cooperation,
spatial_results.normalised_cooperation)
self.assertEqual(results.normalised_scores,
spatial_results.normalised_scores)
self.assertEqual(results.good_partner_matrix,
spatial_results.good_partner_matrix)
self.assertEqual(results.good_partner_rating,
spatial_results.good_partner_rating)
def test_particular_tournament(self):
"""A test for a tournament that has caused failures during some bug
fixing"""
players = [axelrod.Cooperator(), axelrod.Defector(),
axelrod.TitForTat(), axelrod.Grudger()]
edges = [(0, 2), (0, 3), (1, 2), (1, 3)]
tournament = axelrod.SpatialTournament(players, edges=edges)
results = tournament.play(progress_bar=False)
expected_ranked_names = ['Cooperator', 'Tit For Tat',
'Grudger', 'Defector']
self.assertEqual(results.ranked_names, expected_ranked_names)
# Check that this tournament runs with noise
tournament = axelrod.SpatialTournament(players, edges=edges, noise=.5)
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
class TestProbEndingSpatialTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = 'test'
cls.test_repetitions = test_repetitions
cls.test_prob_end = test_prob_end
cls.test_edges = test_edges
def test_init(self):
tournament = axelrod.ProbEndSpatialTournament(
name=self.test_name,
players=self.players,
game=self.game,
prob_end=self.test_prob_end,
edges=self.test_edges,
noise=0.2)
self.assertEqual(tournament.match_generator.edges, tournament.edges)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score(('C', 'C')), (3, 3))
self.assertEqual(tournament.turns, float("inf"))
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, 'test')
self.assertTrue(tournament._with_morality)
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
self.assertEqual(tournament.match_generator.noise, 0.2)
self.assertEqual(tournament.prob_end, self.test_prob_end)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, 'axelrod')
@given(strategies=strategy_lists(strategies=deterministic_strategies,
min_size=2, max_size=2),
prob_end=floats(min_value=.1, max_value=.9),
reps=integers(min_value=1, max_value=3),
seed=integers(min_value=0, max_value=4294967295))
@settings(max_examples=50, timeout=0)
def test_complete_tournament(self, strategies, prob_end,
seed, reps):
"""
A test to check that a spatial tournament on the complete graph
gives the same results as the round robin.
"""
players = [s() for s in strategies]
# edges
edges = [(i, j) for i in range(len(players))
for j in range(i, len(players))]
# create a prob end round robin tournament
axelrod.seed(seed)
tournament = axelrod.ProbEndTournament(players, prob_end=prob_end,
repetitions=reps)
results = tournament.play(progress_bar=False)
# create a complete spatial tournament
axelrod.seed(seed)
spatial_tournament = axelrod.ProbEndSpatialTournament(players,
prob_end=prob_end,
repetitions=reps,
edges=edges)
spatial_results = spatial_tournament.play(progress_bar=False)
self.assertEqual(results.match_lengths, spatial_results.match_lengths)
self.assertEqual(results.ranked_names, spatial_results.ranked_names)
self.assertEqual(results.wins, spatial_results.wins)
self.assertEqual(results.scores, spatial_results.scores)
self.assertEqual(results.cooperation,
spatial_results.cooperation)
@given(tournament=spatial_tournaments(strategies=axelrod.basic_strategies,
max_turns=1, max_noise=0,
max_repetitions=3),
seed=integers(min_value=0, max_value=4294967295))
@settings(max_examples=50, timeout=0)
def test_one_turn_tournament(self, tournament, seed):
"""
Tests that gives same result as the corresponding spatial round robin
spatial tournament
"""
prob_end_tour = axelrod.ProbEndSpatialTournament(tournament.players,
prob_end=1,
edges=tournament.edges,
repetitions=tournament.repetitions)
axelrod.seed(seed)
prob_end_results = prob_end_tour.play(progress_bar=False)
axelrod.seed(seed)
one_turn_results = tournament.play(progress_bar=False)
self.assertEqual(prob_end_results.scores,
one_turn_results.scores)
self.assertEqual(prob_end_results.wins,
one_turn_results.wins)
self.assertEqual(prob_end_results.cooperation,
one_turn_results.cooperation)
|
the-stack_0_6797 | # *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
def peerCorrelation(client, symbol, range="6m"):
"""This will return a dataframe of peer correlations for the given symbol across
the given range
Args:
client (pyEX.Client): Client
symbol (string): Ticker
range (string): range to use, for pyEX.chart
Returns:
DataFrame: result
"""
peers = client.peers(symbol)
rets = client.batchDF(peers + [symbol], "chart", range)["chart"]
ret = rets.pivot(columns="symbol", values="changePercent").corr()
ret.index.name = "symbol"
ret.columns = ret.columns.tolist()
return ret
def peerCorrelationPlot(client, symbol, range="6m"):
"""This will plot a dataframe of peer correlations for the given symbol across
the given range
Note: this function requires the use of `seaborn.heatmap`
Args:
client (pyEX.Client): Client
symbol (string): Ticker
range (string): range to use, for pyEX.chart
Returns:
DataFrame: result
"""
import seaborn as sns
return sns.heatmap(peerCorrelation)
|
the-stack_0_6798 | from os.path import dirname, join
import pandas as pd
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter
from bokeh.io import curdoc
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
source = ColumnDataSource(data=dict())
def update():
current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
slider = RangeSlider(title="Max Salary", start=10000, end=110000, value=(10000, 50000), step=1000, format="0,0")
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
button.js_on_click(CustomJS(args=dict(source=source),
code=open(join(dirname(__file__), "download.js")).read()))
columns = [
TableColumn(field="name", title="Employee Name"),
TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
TableColumn(field="years_experience", title="Experience (years)")
]
data_table = DataTable(source=source, columns=columns, width=800)
controls = column(slider, button)
curdoc().add_root(row(controls, data_table))
curdoc().title = "Export CSV"
update()
|
the-stack_0_6799 | from Logger import log
import numpy as np
# from sklearn.metrics import confusion_matrix
def get_TP(target, prediction, threshold):
'''
compute the number of true positive
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
assert (target.shape == prediction.shape)
target = 1 - np.clip(target, threshold, 0) / threshold
prediction = 1 - np.clip(prediction, threshold, 0) / threshold
tp_array = np.logical_and(target, prediction) * 1.0
tp = np.sum(tp_array)
return tp
def get_FP(target, prediction, threshold):
'''
compute the number of false positive
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
assert (target.shape == prediction.shape)
target = np.clip(target, threshold, 0) / threshold
prediction = 1 - np.clip(prediction, threshold, 0) / threshold
fp_array = np.logical_and(target, prediction) * 1.0
fp = np.sum(fp_array)
return fp
def get_FN(target, prediction, threshold):
'''
compute the number of false negtive
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
assert (target.shape == prediction.shape)
target = 1 - np.clip(target, threshold, 0) / threshold
prediction = np.clip(prediction, threshold, 0) / threshold
fn_array = np.logical_and(target, prediction) * 1.0
fn = np.sum(fn_array)
return fn
def get_TN(target, prediction, threshold):
'''
compute the number of true negative
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
assert (target.shape == prediction.shape)
target = np.clip(target, threshold, 0) / threshold
prediction = np.clip(prediction, threshold, 0) / threshold
tn_array = np.logical_and(target, prediction) * 1.0
tn = np.sum(tn_array)
return tn
def get_recall(target, prediction, threshold):
'''
compute the recall rate
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
tp = get_TP(target, prediction, threshold)
fn = get_FN(target, prediction, threshold)
log('tp={0}'.format(tp))
log('fn={0}'.format(fn))
if tp + fn <= 0.0:
recall = tp / (tp + fn + 1e-9)
else:
recall = tp / (tp + fn)
return recall
def get_precision(target, prediction, threshold):
'''
compute the precision rate
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
tp = get_TP(target, prediction, threshold)
fp = get_FP(target, prediction, threshold)
log('tp={0}'.format(tp))
log('fp={0}'.format(fp))
if tp + fp <= 0.0:
precision = tp / (tp + fp + 1e-9)
else:
precision = tp / (tp + fp)
return precision
def get_F1(target, prediction, threshold):
'''
compute the F1 score
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
recall = get_recall(target, prediction, threshold)
log(recall)
precision = get_precision(target, prediction, threshold)
log(precision)
if precision == 0.0 or recall == 0.0:
f1 = 0.0
else:
f1 = 2 * precision * recall / (precision + recall)
return f1
def get_accuracy(target, prediction, threshold):
'''
compute the accuracy rate
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
threshold: float
'''
tp = get_TP(target, prediction, threshold)
tn = get_TN(target, prediction, threshold)
accuracy = (tp + tn) / target.size
return accuracy
def get_relative_error(target, prediction):
'''
compute the relative_error
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
'''
assert (target.shape == prediction.shape)
return np.mean(np.nan_to_num(np.abs(target - prediction) / np.maximum(target, prediction)))
def get_abs_error(target, prediction):
'''
compute the absolute_error
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
'''
assert (target.shape == prediction.shape)
data = np.abs(target - prediction)
mean, std, min_v, max_v, quartile1, median, quartile2 = get_statistics(data)
return mean, std, min_v, max_v, quartile1, median, quartile2, data
def get_nde(target, prediction):
'''
compute the normalized disaggregation error
Parameters:
----------------
target: the groud truth , np.array
prediction: the prediction, np.array
'''
return np.sum((target - prediction) ** 2) / np.sum((target ** 2))
def get_sae(target, prediction, sample_second):
'''
compute the signal aggregate error
sae = |\hat(r)-r|/r where r is the ground truth total energy;
\hat(r) is the predicted total energy.
'''
r = np.sum(target * sample_second * 1.0 / 3600.0)
rhat = np.sum(prediction * sample_second * 1.0 / 3600.0)
sae = np.abs(r - rhat) / np.abs(r)
return sae
def get_Epd(target, prediction, sample_second):
'''
Energy per day
- calculate energy of a day for both ground truth and prediction
- sum all the energies
- divide by the number of days
'''
day = int(24.0 * 3600 / sample_second) #how many rows per day
gt_en_days = []
pred_en_days = []
for start in range(0, int(len(target)-day), int(day)):
gt_en_days.append(np.sum(target[start:start+day]*sample_second)/3600)
pred_en_days.append(np.sum(prediction[start:start+day]*sample_second)/3600)
Epd = np.sum(np.abs(np.array(gt_en_days)-np.array(pred_en_days)))/(len(target)/day)
return Epd
def get_statistics(data):
mean = np.mean(data)
std = np.std(data)
min_v = np.sort(data)[0]
max_v = np.sort(data)[-1]
quartile1 = np.percentile(data, 25)
median = np.percentile(data, 50)
quartile2 = np.percentile(data, 75)
return mean, std, min_v, max_v, quartile1, median, quartile2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.