ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b417d5fbf1ec60ff44b7715d51fcc2cd0efe2e0f | from setuptools import setup, find_packages
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
'Programming Language :: Python :: 3'
]
setup(
name='jsonfiles',
version='0.1',
description='Use json objects with a bit of sugar.',
long_description='Use json objects with a bit of sugar.',
url='https://github.com/anthony16t/jsonfiles',
author='anthony16t',
author_email='[email protected]',
license='MIT',
classifiers=classifiers,
keywords=['json','objects'],
packages=find_packages()
) |
py | b417d6129fe7eb6d8db025f6ebb78d04f2780e0a | from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth tokn for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve or return authenticated user"""
return self.request.user
|
py | b417d695c63545d88cea95dd4d72d9add11121ac | import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dinesafelysite.settings")
django.setup()
from restaurant.models import Restaurant, InspectionRecords # noqa: E402
def update_covid_compliant_status():
restaurants = Restaurant.objects.all()
for r in restaurants:
inspection_records = InspectionRecords.objects.filter(
restaurant_name=r.restaurant_name,
business_address=r.business_address,
postcode=r.postcode,
).order_by("-inspected_on")
if len(inspection_records) >= 1:
latest_compliant_status = inspection_records[0].is_roadway_compliant
r.compliant_status = latest_compliant_status
r.save()
if __name__ == "__main__":
update_covid_compliant_status()
|
py | b417d72ce9bf3412c24274edd0928de3c10be8de | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train mobilenetV2 on ImageNet."""
import os
import time
import random
import numpy as np
from mindspore import Tensor
from mindspore.nn import WithLossCell, TrainOneStepCell
from mindspore.nn.optim.momentum import Momentum
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.common import dtype as mstype
from mindspore.train.model import Model
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import save_checkpoint
from mindspore.common import set_seed
from src.dataset import create_dataset, extract_features
from src.lr_generator import get_lr
from src.config import set_config
from src.args import train_parse_args
from src.utils import context_device_init, switch_precision, config_ckpoint
from src.models import CrossEntropyWithLabelSmooth, define_net
set_seed(1)
if __name__ == '__main__':
args_opt = train_parse_args()
config = set_config(args_opt)
start = time.time()
print(f"train args: {args_opt}\ncfg: {config}")
#set context and device init
context_device_init(config)
# define network
backbone_net, head_net, net = define_net(args_opt, config)
# CPU only support "incremental_learn"
if args_opt.train_method == "incremental_learn":
step_size = extract_features(backbone_net, args_opt.dataset_path, config)
net = head_net
elif args_opt.train_method in ("train", "fine_tune"):
if args_opt.platform == "CPU":
raise ValueError("Currently, CPU only support \"incremental_learn\", not \"fine_tune\" or \"train\".")
dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, config=config)
step_size = dataset.get_dataset_size()
# Currently, only Ascend support switch precision.
switch_precision(net, mstype.float16, config)
# define loss
if config.label_smooth > 0:
loss = CrossEntropyWithLabelSmooth(
smooth_factor=config.label_smooth, num_classes=config.num_classes)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
epoch_size = config.epoch_size
# get learning rate
lr = Tensor(get_lr(global_step=0,
lr_init=0,
lr_end=config.lr_end,
lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs,
total_epochs=epoch_size,
steps_per_epoch=step_size))
if args_opt.train_method == "incremental_learn":
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay)
network = WithLossCell(net, loss)
network = TrainOneStepCell(network, opt)
network.set_train()
features_path = args_opt.dataset_path + '_features'
idx_list = list(range(step_size))
if os.path.isdir(config.save_checkpoint_path):
os.rename(config.save_checkpoint_path, "{}_{}".format(config.save_checkpoint_path, time.time()))
os.mkdir(config.save_checkpoint_path)
for epoch in range(epoch_size):
random.shuffle(idx_list)
epoch_start = time.time()
losses = []
for j in idx_list:
feature = Tensor(np.load(os.path.join(features_path, f"feature_{j}.npy")))
label = Tensor(np.load(os.path.join(features_path, f"label_{j}.npy")))
losses.append(network(feature, label).asnumpy())
epoch_mseconds = (time.time()-epoch_start) * 1000
per_step_mseconds = epoch_mseconds / step_size
# lr cause to pynative, but cpu doesn't support this mode
# print("\r epoch[{}], iter[{}] cost: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.3f}, lr: {}"\
# .format(epoch + 1, step_step, epoch_mseconds, per_step_mseconds, np.mean(np.array(losses)), \
# lr[(epoch+1)*step_size - 1]), end="")
print("\r epoch[{}], iter[{}] cost: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.3f}"\
.format(epoch + 1, step_size, epoch_mseconds, per_step_mseconds, np.mean(np.array(losses))), \
end="")
if (epoch + 1) % config.save_checkpoint_epochs == 0:
save_checkpoint(network, os.path.join(config.save_checkpoint_path, \
f"mobilenetv2_head_{epoch+1}.ckpt"))
print("total cost {:5.4f} s".format(time.time() - start))
elif args_opt.train_method in ("train", "fine_tune"):
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, \
config.weight_decay, config.loss_scale)
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale)
cb = config_ckpoint(config, lr, step_size)
print("============== Starting Training ==============")
model.train(epoch_size, dataset, callbacks=cb)
print("============== End Training ==============")
|
py | b417d77ca0984cad073727ff5ed451dea06a8eb5 | def <caret>f(param1, param2):
"""
Parameters
----------
param2 : object
""" |
py | b417d788c310ede3359623dd9dca1be5efb7a4f2 | # pylint: disable = too-many-lines
import itertools
import logging
from copy import deepcopy
from functools import reduce
from itertools import combinations, product
import numpy as np
from scipy import linalg
from formulae.utils import get_interaction_matrix
from formulae.contrasts import pick_contrasts
from formulae.terms.call import Call
from formulae.terms.variable import Variable
_log = logging.getLogger("formulae")
class Intercept:
"""Internal representation of a model intercept."""
def __init__(self):
self.name = "Intercept"
self._type = "Intercept"
self.data = None
self.len = None
self.metadata = {"type": "intercept"}
def __eq__(self, other):
return isinstance(other, type(self))
def __hash__(self):
return hash(self._type)
def __add__(self, other):
"""Addition operator.
Generally this operator is used to explicitly add an intercept to a model. However, there
may be cases where the result is not a ``Model``, or does not contain an intercept.
* ``"1 + 0"`` and ``"1 + (-1)"`` return an empty model.
* ``"1 + 1"`` returns an intercept.
* ``"1 + x"`` and ``"1 + (x|g)"`` returns a model with both the term and the intercept.
* ``"1 + (x + y)"`` adds an intercept to the model given by ``x`` and ``y``.
"""
if isinstance(other, NegatedIntercept):
return Model()
elif isinstance(other, type(self)):
return self
elif isinstance(other, (Term, GroupSpecificTerm)):
return Model(self, other)
elif isinstance(other, Model):
return Model(self) + other
else:
return NotImplemented
def __sub__(self, other):
"""Subtraction operator.
This operator removes an intercept from a model if the given model has an intercept.
* ``"1 - 1"`` returns an empty model.
* ``"1 - 0"`` and ``"1 - (-1)"`` return an intercept.
* ``"1 - (x + y)"`` returns the model given by ``x`` and ``y`` unchanged.
* ``"1 - (1 + x + y)"`` returns the model given by ``x`` and ``y``, removing the intercept.
"""
if isinstance(other, type(self)):
return Model()
elif isinstance(other, NegatedIntercept):
return self
elif isinstance(other, Model):
if self in other.common_terms:
return Model()
else:
return self
else:
return NotImplemented
def __or__(self, other):
"""Group-specific operator. Creates group-specific intercept.
This operation is usually surrounded by parenthesis. It is not actually required. They
are always used because ``|`` has lower precedence that the other common operators.
This operator is distributed over the right-hand side, which means ``(1|g + h)`` is
equivalent to ``(1|g) + (1|h)``.
"""
if isinstance(other, Term):
return GroupSpecificTerm(self, other)
elif isinstance(other, Model):
products = product([self], other.common_terms)
terms = [GroupSpecificTerm(p[0], p[1]) for p in products]
return Model(*terms)
else:
return NotImplemented
def __repr__(self):
return self.__str__()
def __str__(self):
return f"{self.__class__.__name__}()"
@property
def var_names(self):
"""Returns empty set, no variables are used in the intercept."""
return set()
def set_type(self, data, env): # pylint: disable = unused-argument
"""Sets length of the intercept."""
# Nothing goes here as the type is given by the class.
# Only works with DataFrames or Series so far
self.len = data.shape[0]
def set_data(self, encoding): # pylint: disable = unused-argument
"""Creates data for the intercept.
It sets ``self.data`` equal to a numpy array of ones of length ``(self.len, 1)``.
"""
self.data = np.ones((self.len, 1))
def eval_new_data(self, data):
"""Returns data for a new intercept.
The length of the new intercept is given by the number of rows in ``data``.
"""
# it assumes data is a pandas DataFrame now
return np.ones((data.shape[0], 1))
class NegatedIntercept:
"""Internal representation of the opposite of a model intercept.
This object is created whenever we use ``"0"`` or ``"-1"`` in a model formula. It is not
expected to appear in a final model. It's here to help us make operations using the
``Intercept`` and deciding when to keep it and when to drop it.
"""
def __init__(self):
self.name = "NegatedIntercept"
self._type = "Intercept"
def __add__(self, other):
"""Addition operator.
Generally this operator is used to explicitly remove an from a model.
* ``"0 + 1"`` returns an empty model.
* ``"0 + 0"`` returns a negated intercept
* ``"0 + x"`` returns a model that includes the negated intercept.
* ``"0 + (x + y)"`` adds an the negated intercept to the model given by ``x`` and ``y``.
No matter the final result contains the negated intercept, for example if we do something
like ``"y ~ 0 + x + y + 0"``, the ``Model`` that is obtained removes any negated intercepts
thay may have been left. They just don't make sense in a model.
"""
if isinstance(other, type(self)):
return self
elif isinstance(other, Intercept):
return Model()
elif isinstance(other, (Term, GroupSpecificTerm)):
return Model(self, other)
elif isinstance(other, Model):
return Model(self) + other
else:
return NotImplemented
def __eq__(self, other):
return isinstance(other, type(self))
def __or__(self, other):
raise ValueError("At least include an intercept in '|' operation")
def __repr__(self):
return self.__str__()
def __str__(self):
return f"{self.__class__.__name__}()"
@property
def var_names(self):
# This method should never be called. Leaving a set() to avoid harmless error.
return set()
def set_type(self, *args, **kwargs):
# This method should never be called. Leaving a pass to avoid harmless error.
pass
def set_data(self, *args, **kwargs):
# This method should never be called. Leaving a pass to avoid harmless error.
pass
class Term:
"""Representation of a model term.
Terms are made of one or more components. Components are instances of :class:`.Variable` or
:class:`.Call`. Terms with only one component are known as main effects and terms with more than
one component are known as interaction effects. The order of the interaction is given by the
number of components in the term.
Parameters
----------
components: :class:`.Variable` or :class:`.Call`
Atomic components of a term.
Attributes
----------
data: dict
The values associated with the term as they go into the design matrix.
metadata: dict
Metadata associated with the term. If ``"numeric"`` or ``"categoric"`` it holds additional
information in the component ``.data`` attribute. If ``"interaction"``, the keys are
the name of the components and the values are dictionaries holding the metadata.
_type: string
Indicates the type of the term. Can be one of ``"numeric"``, ``"categoric"``, or
``"interaction"``.
name: string
The name of the term as it was originally written in the model formula.
"""
def __init__(self, *components):
self.data = None
self.metadata = {}
self._type = None
self.components = []
self.component_types = None
for component in components:
if component not in self.components:
self.components.append(component)
self.name = ":".join([str(c.name) for c in self.components])
def __hash__(self):
return hash(tuple(self.components))
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return self.components == other.components
def __add__(self, other):
"""Addition operator. Analogous to set union.
* ``"x + x"`` is equal to just ``"x"``
* ``"x + y"`` is equal to a model with both ``x`` and ``y``.
* ``"x + (y + z)"`` adds ``x`` to model already containing ``y`` and ``z``.
"""
# x + x -> x
# x + y -> x + y
# x:y + u -> x:y + u
# x:y + u:v -> x:y + u:v
# x:y + (u + v) -> x:y + u + v
# f(x) + y -> f(x) + y
# f(x) + (y + z) -> f(x) + y + z
if self == other:
return self
elif isinstance(other, type(self)):
return Model(self, other)
elif isinstance(other, Model):
return Model(self) + other
else:
return NotImplemented
def __sub__(self, other):
"""Subtraction operator. Analogous to set difference.
* ``"x - x"`` returns empty model.
* ``"x - y"`` returns the term ``"x"``.
* ``"x - (y + z)"`` returns the term ``"x"``.
"""
# x:y - u -> x:y
# x:y - u:v -> x:y
# x:y - (u + v) -> x:y
# f(x) - y -> f(x)
# f(x) - (y + z) -> f(x)
if isinstance(other, type(self)):
if self.components == other.components:
return Model()
else:
return self
elif isinstance(other, Model):
if self in other.terms:
return Model()
else:
return self
else:
return NotImplemented
def __mul__(self, other):
"""Full interaction operator.
This operator includes both the interaction as well as the main effects involved in the
interaction. It is a shortcut for ``x + y + x:y``.
* ``"x * x"`` equals to ``"x"``
* ``"x * y"`` equals to``"x + y + x:y"``
* ``"x:y * u"`` equals to ``"x:y + u + x:y:u"``
* ``"x:y * u:v"`` equals to ``"x:y + u:v + x:y:u:v"``
* ``"x:y * (u + v)"`` equals to ``"x:y + u + v + x:y:u + x:y:v"``
"""
if self == other:
return self
elif isinstance(other, type(self)):
if len(other.components) == 1 and isinstance(other.components[0].name, (int, float)):
raise TypeError("Interaction with numeric does not make sense.")
return Model(self, other, Term(*deepcopy(self.components), *deepcopy(other.components)))
elif isinstance(other, Model):
products = product([self], other.common_terms)
terms = [self] + other.common_terms
iterms = [
Term(*deepcopy(p[0].components), *deepcopy(p[1].components)) for p in products
]
return Model(*terms) + Model(*iterms)
else:
return NotImplemented
def __matmul__(self, other):
"""Simple interaction operator.
This operator is actually invoked as ``:`` but internally passed as ``@`` because there
is no ``:`` operator in Python.
* ``"x : x"`` equals to ``"x"``
* ``"x : y"`` is the interaction between ``"x"`` and ``"y"``
* ``x:(y:z)"`` equals to just ``"x:y:z"``
* ``(x:y):u"`` equals to just ``"x:y:u"``
* ``"(x:y):(u + v)"`` equals to ``"x:y:u + x:y:v"``
"""
if self == other:
return self
elif isinstance(other, type(self)):
if len(other.components) == 1 and isinstance(other.components[0].name, (int, float)):
raise TypeError("Interaction with numeric does not make sense.")
return Term(*self.components, *other.components)
elif isinstance(other, Model):
products = product([self], other.common_terms)
iterms = [Term(*p[0].components, *p[1].components) for p in products]
return Model(*iterms)
else:
return NotImplemented
def __truediv__(self, other):
"""Division interaction operator.
* ``"x / x"`` equals to just ``"x"``
* ``"x / y"`` equals to ``"x + x:y"``
* ``"x / z:y"`` equals to ``"x + x:z:y"``
* ``"x / (z + y)"`` equals to ``"x + x:z + x:y"``
* ``"x:y / u:v"`` equals to ``"x:y + x:y:u:v"``
* ``"x:y / (u + v)"`` equals to ``"x:y + x:y:u + x:y:v"``
"""
if self == other:
return self
elif isinstance(other, type(self)):
if len(other.components) == 1 and isinstance(other.components[0].name, (int, float)):
raise TypeError("Interaction with numeric does not make sense.")
return Model(self, Term(*self.components, *other.components))
elif isinstance(other, Model):
products = product([self], other.common_terms)
iterms = [Term(*p[0].components, *p[1].components) for p in products]
return self + Model(*iterms)
else:
return NotImplemented
def __or__(self, other):
"""Group-specific operator. Creates group-specific intercept.
Intercepts are implicitly added.
* ``"x|g"`` equals to ``"(1|g) + (x|g)"``
Distributive over right hand side
* ``"(x|g + h)"`` equals to ``"(1|g) + (1|h) + (x|g) + (x|h)"``
"""
if isinstance(other, Term):
# Only accepts terms, call terms and interactions.
# Adds implicit intercept.
terms = [GroupSpecificTerm(Intercept(), other), GroupSpecificTerm(self, other)]
return Model(*terms)
elif isinstance(other, Model):
intercepts = [
GroupSpecificTerm(Intercept(), p[1]) for p in product([self], other.common_terms)
]
slopes = [GroupSpecificTerm(p[0], p[1]) for p in product([self], other.common_terms)]
return Model(*intercepts, *slopes)
else:
return NotImplemented
def __pow__(self, other):
"""Power operator.
It leaves the term as it is. For a power in the math sense do ``I(x ** n)`` or ``{x ** n}``.
"""
c = other.components
if len(c) == 1 and isinstance(c[0].name, int) and c[0].name >= 1:
_log.warning(
"Exponentiation on an individual variable returns the variable as it is.\n"
"Use {%s**%s} or I(%s**%s) to compute the math power.",
self.name,
c[0].name,
self.name,
c[0].name,
)
return self
else:
return NotImplemented
def __repr__(self):
return self.__str__()
def __str__(self):
string = "[" + ", ".join([str(component) for component in self.components]) + "]"
return f"{self.__class__.__name__}({string})"
def set_type(self, data, env):
"""Set type of the components in the term.
Calls ``.set_type()`` method on each component in the term. For those components of class
:class:`.Variable`` it only passes the data mask. For `:class:`.Call` objects it also passes
the evaluation environment.
Parameters
----------
data: pd.DataFrame
The data frame where variables are taken from
env: Environment
The environment where values and functions are taken from.
"""
# Set the type of the components by calling their set_type method.
for component in self.components:
if isinstance(component, Variable):
component.set_type(data)
elif isinstance(component, Call):
component.set_type(data, env)
else:
raise ValueError(
"Can't set type on Term because at least one of the components "
f"is of the unexpected type {type(component)}."
)
# Store the type of the components
self.component_types = {
component.name: component._type # pylint: disable = protected-access
for component in self.components
}
# Determine whether this term is numeric, categoric, or an interaction.
if len(self.components) > 1:
self._type = "interaction" # pylint: disable = protected-access
else:
self._type = self.components[0]._type # pylint: disable = protected-access
def set_data(self, encoding):
"""Obtains and stores the final data object related to this term.
Calls ``.set_data()`` method on each component in the term. Then, it uses the ``.data``
attribute on each of them to build ``self.data`` and ``self.metadata``.
Parameters
----------
encoding: list or dict
Indicates if it uses full or reduced encoding when the type of the variable is
categoric.
"""
if isinstance(encoding, list) and len(encoding) == 1:
encoding = encoding[0]
else:
ValueError("encoding is a list of len > 1")
for component in self.components:
encoding_ = False
if isinstance(encoding, dict):
encoding_ = encoding.get(component.name, False)
elif isinstance(encoding, bool):
encoding_ = encoding
component.set_data(encoding_)
if self._type == "interaction":
self.data = reduce(get_interaction_matrix, [c.data["value"] for c in self.components])
self.metadata["type"] = "interaction"
self.metadata["terms"] = {
c.name: {k: v for k, v in c.data.items() if k != "value"} for c in self.components
}
else:
component = self.components[0]
self.data = component.data["value"]
self.metadata = {k: v for k, v in component.data.items() if k != "value"}
def eval_new_data(self, data):
"""Evaluates the term with new data.
Calls ``.eval_new_data()`` method on each component in the term and combines the results
appropiately.
Parameters
----------
data: pd.DataFrame
The data frame where variables are taken from
Returns
----------
result: np.array
The values resulting from evaluating this term using the new data.
"""
if self._type == "interaction":
result = reduce(
get_interaction_matrix, [c.eval_new_data(data) for c in self.components]
)
else:
result = self.components[0].eval_new_data(data)
return result
@property
def var_names(self):
"""Returns the name of the variables in the term as a set.
Loops through each component and updates the set with the ``.var_names`` of each component.
Returns
----------
var_names: set
The names of the variables involved in the term.
"""
var_names = set()
for component in self.components:
var_names.update(component.var_names)
return var_names
def get_component(self, name): # pylint: disable = inconsistent-return-statements
"""Returns a component by name.
Parameters
----------
name: string
The name of the component to return.
Returns
-------
component: `:class:`.Variable` or `:class:`.Call`
The component with name ``name``.
"""
for component in self.components:
if component.name == name:
return component
class GroupSpecificTerm:
"""Representation of a group specific term.
Group specific terms are of the form ``(expr | factor)``. The expression ``expr`` is evaluated
as a model formula with only common effects and produces a model matrix following the rules
for common terms. ``factor`` is inspired on factors in R, but here it is evaluated as an ordered
pandas.CategoricalDtype object.
The operator ``|`` works as in R package lme4. As its authors say: "One way to think about the
vertical bar operator is as a special kind of interaction between the model matrix and the
grouping factor. This interaction ensures that the columns of the model matrix have different
effects for each level of the grouping factor"
Parameters
----------
expr: :class:`.Intercept` or :class:`.Term`
The term for which we want to have a group specific term.
factor: :class:`.Term`
The factor that determines the groups in the group specific term.
Attributes
----------
factor_type: pandas.core.dtypes.dtypes.CategoricalDtype
The type assigned to the grouping factor ``factor``. This is useful for when we need to
create a design matrix for new a new data set.
"""
def __init__(self, expr, factor):
self.expr = expr
self.factor = factor
self.groups = None
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.expr == other.expr and self.factor == other.factor
def __hash__(self):
return hash((self.expr, self.factor))
def __repr__(self):
return self.__str__()
def __str__(self):
strlist = [
f"expr= {' '.join(str(self.expr).splitlines(True))}",
f"factor= {' '.join(str(self.factor).splitlines(True))}",
]
return self.__class__.__name__ + "(\n " + ",\n ".join(strlist) + "\n)"
def eval(self, data, env, encoding):
"""Evaluates term.
First, it evaluates the variable in ``self.factor``, creates an oredered categorical data
type using its levels, and stores it in ``self.factor_type``. Then, it obtains the
design matrix for ``self.expr`` to finally produce the matrix for the group specific
effect.
The output contains the following information
* ``"type"``: The type of the ``expr`` term.
* ``"Xi"``: The design matrix for the ``expr`` term.
* ``"Ji"``: The design matrix for the ``factor`` term.
* ``"Zi"``: The design matrix for the group specific term.
* ``"groups"``: The groups present in ``factor``.
If ``"type"`` is ``"categoric"``, the output dictionary also contains
* ``"levels"``: Levels of the term in ``expr``.
* ``"reference"``: The level taken as baseline.
* ``"encoding"``: The encoding of the term, either ``"full"`` or ``"reduced"``
If ``"type"`` is ``"interaction"``, the output dictionary also contains
* ``"terms"``: Metadata for each of the components in the interaction in ``expr``.
Parameters
----------
data: pandas.DataFrame
The data frame where variables are taken from.
env: Environment
The environment where values and functions are taken from.
encoding: bool
Whether to use full or reduced rank encoding when ``expr`` is categoric.
Returns
-------
out: dict
See above.
"""
# Factor must be considered categorical, and with full encoding. We set type and obtain
# data for the factor term manually.
# Set type on each component to check data is behaved as expected and then
# manually set type of the components to categoric.
for comp in self.factor.components:
if isinstance(comp, Variable):
comp.set_type(data)
elif isinstance(comp, Call):
comp.set_type(data, env)
else:
raise ValueError(
"Can't set type on Term because at least one of the components "
f"is of the unexpected type {type(comp)}."
)
comp._type = "categoric" # pylint: disable = protected-access
# Store the type of the components.
# We know they are categoric.
self.factor.component_types = {comp.name: "categoric" for comp in self.factor.components}
if len(self.factor.components) > 1:
self.factor._type = "interaction" # pylint: disable = protected-access
else:
self.factor._type = "categoric" # pylint: disable = protected-access
# Pass encoding=True when setting data.
self.factor.set_data(True)
# Obtain group names
groups = []
for comp in self.factor.components:
# We're certain they are all categoric with full encoding.
groups.append([str(lvl) for lvl in comp.data["levels"]])
self.groups = [":".join(s) for s in list(itertools.product(*groups))]
self.expr.set_type(data, env)
self.expr.set_data(encoding)
Xi = self.expr.data
Ji = self.factor.data
Zi = linalg.khatri_rao(Ji.T, Xi.T).T
out = {
"type": self.expr.metadata["type"],
"Xi": Xi,
"Ji": Ji,
"Zi": Zi,
"groups": self.groups,
}
if self.expr._type == "categoric": # pylint: disable = protected-access
out["levels"] = self.expr.metadata["levels"]
out["reference"] = self.expr.metadata["reference"]
out["encoding"] = self.expr.metadata["encoding"]
elif self.expr._type == "interaction": # pylint: disable = protected-access
out["terms"] = self.expr.metadata["terms"]
return out
def eval_new_data(self, data):
"""Evaluates the term with new data.
Converts the variable in ``factor`` to the type remembered from the first evaluation and
produces the design matrix for this grouping, calls ``.eval_new_data()`` on ``self.expr``
to obtain the design matrix for the ``expr`` side, then computes the design matrix
corresponding to the group specific effect.
Parameters
----------
data: pd.DataFrame
The data frame where variables are taken from.
Returns
----------
out: dict
Same rules as in :meth:`eval <GroupSpecificTerm.eval>`.
"""
Xi = self.expr.eval_new_data(data)
Ji = self.factor.eval_new_data(data)
Zi = linalg.khatri_rao(Ji.T, Xi.T).T
out = {
"type": self.expr.metadata["type"],
"Xi": Xi,
"Ji": Ji,
"Zi": Zi,
"groups": self.groups,
}
if self.expr._type == "categoric": # pylint: disable = protected-access
out["levels"] = self.expr.metadata["levels"]
out["reference"] = self.expr.metadata["reference"]
out["encoding"] = self.expr.metadata["encoding"]
elif self.expr._type == "interaction": # pylint: disable = protected-access
out["terms"] = self.expr.metadata["terms"]
return out
@property
def var_names(self):
"""Returns the name of the variables in the term as a set.
Obtains both the variables in the ``expr`` as well as the variables in ``factor``.
Returns
----------
var_names: set
The names of the variables involved in the term.
"""
expr_names = self.expr.var_names.copy()
factor_names = self.factor.var_names.copy()
return expr_names.union(factor_names)
def get_name(self):
"""Obtain string representation of the name of the term.
Returns
----------
name: str
The name of the term, such as ``1|g`` or ``var|g``.
"""
name = ""
if isinstance(self.expr, Intercept):
name += "1|"
elif isinstance(self.expr, Term):
name += f"{self.expr.name}|"
else:
raise ValueError("Invalid LHS expression for group specific term")
if isinstance(self.factor, Term):
name += self.factor.name
else:
raise ValueError("Invalid RHS expression for group specific term")
return name
class Response:
"""Representation of a response term.
It is mostly a wrapper around :class:`.Term`.
Parameters
----------
term: :class:`.Term`
The term we want to take as response in the model. Must contain only one component.
"""
def __init__(self, term):
if isinstance(term, Term):
n = len(term.components)
if n == 1:
self.term = term
self.term.components[0].is_response = True
else:
raise ValueError(f"The response term must contain only one component, not {n}.")
else:
raise ValueError(f"The response term must be of class Term, not {type(term)}.")
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return self.term == other.term
def __add__(self, other):
"""Modelled as operator.
The operator is ``~``, but since it is not an operator in Python, we internally replace it
with ``+``. It means the LHS is taken as the response, and the RHS as the predictor.
"""
if isinstance(other, (Term, GroupSpecificTerm, Intercept)):
return Model(other, response=self)
elif isinstance(other, Model):
return other.add_response(self)
else:
return NotImplemented
def __repr__(self):
return self.__str__()
def __str__(self):
return f"{self.__class__.__name__}({self.term})"
@property
def var_names(self):
"""Returns the name of the variables in the response as a set."""
return self.term.var_names
def set_type(self, data, env):
"""Set type of the response term."""
self.term.set_type(data, env)
def set_data(self, encoding=False):
"""Set data of the response term."""
self.term.set_data(encoding)
ACCEPTED_TERMS = (Term, GroupSpecificTerm, Intercept, NegatedIntercept)
class Model:
"""Representation of a model.
Parameters
----------
terms: :class:`.Term`
This object can be instantiated with one or many terms.
response::class:`.Response`
The response term. Defaults to ``None`` which means there is no response.
"""
def __init__(self, *terms, response=None):
if isinstance(response, Response) or response is None:
self.response = response
else:
raise ValueError("Response must be of class Response.")
if all(isinstance(term, ACCEPTED_TERMS) for term in terms):
self.common_terms = [term for term in terms if not isinstance(term, GroupSpecificTerm)]
self.group_terms = [term for term in terms if isinstance(term, GroupSpecificTerm)]
else:
raise ValueError("There is a least one term of an unexpected class.")
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
equal_terms = set(self.terms) == set(other.terms)
equal_response = self.response == other.response
return equal_terms and equal_response
def __add__(self, other):
"""Addition operator. Analogous to set union.
Adds terms to the model and returns the model.
Returns
-------
self: :class:`.Model`
The same model object with the added term(s).
"""
if isinstance(other, NegatedIntercept):
return self - Intercept()
elif isinstance(other, (Term, GroupSpecificTerm, Intercept)):
return self.add_term(other)
elif isinstance(other, type(self)):
for term in other.terms:
self.add_term(term)
return self
else:
return NotImplemented
def __sub__(self, other):
"""Subtraction operator. Analogous to set difference.
* ``"(x + y) - (x + u)"`` equals to ``"y + u"``..
* ``"(x + y) - x"`` equals to ``"y"``.
* ``"(x + y + (1 | g)) - (1 | g)"`` equals to ``"x + y"``.
Returns
-------
self: :class:`.Model`
The same model object with the removed term(s).
"""
if isinstance(other, type(self)):
for term in other.terms:
if term in self.common_terms:
self.common_terms.remove(term)
if term in self.group_terms:
self.group_terms.remove(term)
return self
elif isinstance(other, (Term, Intercept)):
if other in self.common_terms:
self.common_terms.remove(other)
return self
elif isinstance(other, GroupSpecificTerm):
if other in self.group_terms:
self.group_terms.remove(other)
return self
else:
return NotImplemented
def __matmul__(self, other):
"""Simple interaction operator.
* ``"(x + y) : (u + v)"`` equals to ``"x:u + x:v + y:u + y:v"``.
* ``"(x + y) : u"`` equals to ``"x:u + y:u"``.
* ``"(x + y) : f(u)"`` equals to ``"x:f(u) + y:f(u)"``.
Returns
-------
model: :class:`.Model`
A new instance of the model with all the interaction terms computed.
"""
if isinstance(other, type(self)):
products = product(self.common_terms, other.common_terms)
iterms = [Term(*p[0].components, *p[1].components) for p in products]
return Model(*iterms)
elif isinstance(other, Term):
products = product(self.common_terms, [other])
iterms = [Term(*p[0].components, *p[1].components) for p in products]
return Model(*iterms)
else:
return NotImplemented
def __mul__(self, other):
"""Full interaction operator.
* ``"(x + y) * (u + v)"`` equals to ``"x + y + u + v + x:u + x:v + y:u + y:v"``.
* ``"(x + y) * u"`` equals to ``"x + y + u + x:u + y:u"``.
Returns
-------
model: :class:`.Model`
A new instance of the model with all the interaction terms computed.
"""
if self == other:
return self
elif isinstance(other, type(self)):
if len(other.common_terms) == 1:
components = other.common_terms[0].components
if len(components) == 1 and isinstance(components, (int, float)):
raise TypeError("Interaction with numeric does not make sense.")
products = product(self.common_terms, other.common_terms)
terms = self.common_terms + other.common_terms
iterms = [Term(*p[0].components, *p[1].components) for p in products]
return Model(*terms) + Model(*iterms)
elif isinstance(other, Term):
if len(other.components) == 1 and isinstance(other.components[0].name, (int, float)):
raise TypeError("Interaction with numeric does not make sense.")
products = product(self.common_terms, [other])
terms = self.common_terms + [other]
iterms = [Term(*p[0].components, *p[1].components) for p in products]
return Model(*terms) + Model(*iterms)
else:
return NotImplemented
def __pow__(self, other):
"""Power of a set made of :class:`.Term`
Computes all interactions up to order ``n`` between the terms in the set.
* ``"(x + y + z) ** 2"`` equals to ``"x + y + z + x:y + x:z + y:z"``.
Returns
-------
model: :class:`.Model`
A new instance of the model with all the terms computed.
"""
if isinstance(other, Term) and len(other.components) == 1:
value = other.components[0].name
if isinstance(value, int) and value >= 1:
comb = [
list(p) for i in range(2, value + 1) for p in combinations(self.common_terms, i)
]
iterms = [Term(*[comp for term in terms for comp in term.components]) for terms in comb]
return self + Model(*iterms)
else:
raise ValueError("Power must be a positive integer.")
def __truediv__(self, other):
"""Division interaction operator.
* ``"(x + y) / z"`` equals to ``"x + y + x:y:z"``.
* ``"(x + y) / (u + v)"`` equals to ``"x + y + x:y:u + x:y:v"``.
Returns
-------
model: :class:`.Model`
A new instance of the model with all the terms computed.
"""
if isinstance(other, Term):
return self.add_term(Term(*self.common_components + other.components))
elif isinstance(other, Model):
iterms = [Term(*self.common_components, comp) for comp in other.common_components]
return self + Model(*iterms)
else:
return NotImplemented
def __or__(self, other):
"""Group specific term operator.
Only _models_ ``"0 + x"`` arrive here.
* ``"(0 + x | g)"`` equals to ``"(x|g)"``.
* ``"(0 + x | g + y)"`` equals to ``"(x|g) + (x|y)"``.
There are several edge cases to handle here. See in-line comments.
Returns
-------
model: :class:`.Model`
A new instance of the model with all the terms computed.
"""
# If only one term in the expr, resolve according to the type of the term.
if len(self.common_terms) == 1:
return self.common_terms[0] | other
# Handle intercept
if Intercept() in self.common_terms and NegatedIntercept() in self.common_terms:
# Explicit addition and negation -> remove both -> no intercept
self.common_terms.remove(Intercept())
self.common_terms.remove(NegatedIntercept())
elif NegatedIntercept() in self.common_terms:
# Negation -> remove negation and do not add intercept
self.common_terms.remove(NegatedIntercept())
elif Intercept() not in self.common_terms:
# No negation and no explicit intercept -> implicit intercept
self.common_terms.insert(0, Intercept())
if isinstance(other, Term):
products = product(self.common_terms, [other])
terms = [GroupSpecificTerm(p[0], p[1]) for p in products]
return Model(*terms)
elif isinstance(other, type(self)):
products = product(self.common_terms, other.common_terms)
terms = [GroupSpecificTerm(p[0], p[1]) for p in products]
return Model(*terms)
else:
return NotImplemented
def __repr__(self):
return self.__str__()
def __str__(self):
terms = [str(term) for term in self.common_terms]
if self.response is not None:
terms.insert(0, str(self.response))
string = ",\n ".join([str(term) for term in terms])
if self.group_terms:
group_terms = ",\n".join([str(term) for term in self.group_terms])
if len(string) > 0:
string += ",\n "
string += " ".join(group_terms.splitlines(True))
return f"{self.__class__.__name__}(\n {string}\n)"
def add_response(self, term):
"""Add response term to model description.
This method is called when something like ``"y ~ x + z"`` appears in a model formula.
This method is called via special methods such as :meth:`Response.__add__`.
Returns
-------
self: :class:`.Model`
The same model object but now with a reponse term.
"""
if isinstance(term, Response):
self.response = term
return self
else:
raise ValueError("not Response")
def add_term(self, term):
"""Add term to model description.
The term added can be of class :class:`.Intercept` :class:`.Term`, or
:class:`.GroupSpecificTerm`. It appends the new term object to the list of common terms or
group specific terms as appropriate.
This method is called via special methods such as :meth:`__add__`.
Returns
-------
self: :class:`.Model`
The same model object but now containing the new term.
"""
if isinstance(term, GroupSpecificTerm):
if term not in self.group_terms:
self.group_terms.append(term)
return self
elif isinstance(term, (Term, Intercept)):
if term not in self.common_terms:
self.common_terms.append(term)
return self
else:
raise ValueError(f"Can't add an object of class {type(term)} to Model.")
@property
def terms(self):
"""Terms in the model.
Returns
-------
terms: list
A list containing both common and group specific terms.
"""
return self.common_terms + self.group_terms
@property
def common_components(self):
"""Components in common terms in the model.
Returns
-------
components: list
A list containing all components from common terms in the model.
"""
# Note: Check whether this method is really necessary.
return [
comp for term in self.common_terms if isinstance(term, Term) for comp in term.components
]
@property
def var_names(self):
"""Get the name of the variables in the model.
Returns
-------
var_names: set
The names of all variables in the model.
"""
var_names = set()
for term in self.terms:
var_names.update(term.var_names)
if self.response is not None:
var_names.update(self.response.var_names)
return var_names
def set_types(self, data, env):
"""Set the type of the common terms in the model.
Calls ``.set_type()`` method on term in the model.
Parameters
----------
data: pd.DataFrame
The data frame where variables are taken from
env: Environment
The environment where values and functions are taken from.
"""
for term in self.common_terms:
term.set_type(data, env)
def _encoding_groups(self):
components = {}
for term in self.common_terms:
if term._type == "interaction": # pylint: disable = protected-access
components[term.name] = {
c.name: c._type for c in term.components # pylint: disable = protected-access
}
else:
components[term.name] = term._type # pylint: disable = protected-access
# First, group with only categoric terms
categoric_group = {}
for k, v in components.items():
if v == "categoric":
categoric_group[k] = [k]
elif v == "Intercept":
categoric_group[k] = []
elif isinstance(v, dict): # interaction
# If all categoric terms in the interaction
if all(v_ == "categoric" for v_ in v.values()):
categoric_group[k] = list(v.keys())
# Determine groups of numerics
numeric_group_sets = []
numeric_groups = []
for k, v in components.items():
# v is dict when interaction, otherwise is string.
if isinstance(v, dict):
categoric = [k_ for k_, v_ in v.items() if v_ == "categoric"]
numeric = [k_ for k_, v_ in v.items() if v_ == "numeric"]
# if it is an interaction with both categoric and numeric terms
if categoric and numeric:
numeric_set = set(numeric)
numeric_part = ":".join(numeric)
if numeric_set not in numeric_group_sets:
numeric_group_sets.append(numeric_set)
numeric_groups.append({})
idx = numeric_group_sets.index(numeric_set)
# Prevent full encoding when numeric part is present outside
# this numeric-categoric interaction
if numeric_part in components.keys():
numeric_groups[idx][numeric_part] = []
numeric_groups[idx][k] = categoric
return [categoric_group] + numeric_groups
def _encoding_bools(self):
"""Determine encodings for terms containing at least one categorical variable.
This method returns dictionaries with ``True``/``False`` values.
``True`` means the categorical variable uses 'levels' dummies.
``False`` means the categorial variable uses 'levels - 1' dummies.
"""
groups = self._encoding_groups()
l = [pick_contrasts(group) for group in groups]
result = {}
for d in l:
result.update(d)
return result
def eval(self, data, env):
"""Evaluates terms in the model.
Only common effects are evaluated here. Group specific terms are evaluated individually
in :class:`GroupEffectsMatrix <formulae.matrices.GroupEffectsMatrix>`.
Parameters
----------
data: pd.DataFrame
The data frame where variables are taken from
env: Environment
The environment where values and functions are taken from.
Returns
-------
result: dict
A dictionary where keys are the name of the terms and the values are their ``.data``
attribute.
"""
self.set_types(data, env)
encodings = self._encoding_bools()
result = {}
# First, we have to add terms if the encoding implies so.
# Group specific effects aren't evaluated here -- this may change
common_terms = self.common_terms.copy()
for term in common_terms:
term_encoding = False
if term.name in encodings.keys():
term_encoding = encodings[term.name]
if hasattr(term_encoding, "__len__") and len(term_encoding) > 1:
# we're in an interaction that added terms.
# we need to create and evaluate these extra terms.
# i.e. "y ~ g1:g2", both g1 and g2 categoric, is equivalent to "y ~ g2 + g1:g2"
# Possibly an interaction adds LOWER order terms, but NEVER HIGHER order terms.
for (idx, encoding) in enumerate(term_encoding):
# Last term never adds any new term, it corresponds to the outer `term`.
if idx == len(term_encoding) - 1:
term.set_data(encoding)
result[term.name] = term.data
else:
extra_term = _create_and_eval_extra_term(term, encoding, data, env)
result[extra_term.name] = extra_term.data
# Finally, add term to self.common_terms object, right before the term
# that causes its addition.
self.common_terms.insert(self.common_terms.index(term), extra_term)
else:
# This term does not add any lower order term, so we just evaluate it as it is.
term.set_data(term_encoding)
result[term.name] = term.data
return result
def _create_and_eval_extra_term(term, encoding, data, env):
if len(encoding) == 1:
component_name = list(encoding.keys())[0]
encoding_ = list(encoding.values())[0]
component = term.get_component(component_name)
extra_term = Term(component)
else:
component_names = [c.name for c in term.components]
encoding_ = encoding
components = [
term.get_component(name) for name in component_names if name in encoding.keys()
]
extra_term = Term(*components)
extra_term.set_type(data, env)
extra_term.set_data(encoding_)
return extra_term
|
py | b417d7a23fb5144e5775b3f62185cce42a6417fe | import discord #Imports==
import unicodedata #==
#Set client
client = discord.Client()
#function
def is_japanese(string):
for ch in string: #loop strings
name = unicodedata.name(ch) #Str to Uni
if "CJK UNIFIED" in name \ #Kanji check
or "HIRAGANA" in name \ #Hiragana check
or "KATAKANA" in name: #Katakana check
return True #If include
return False #Not include
# 起動時に通知してくれる処理
@client.event
async def on_ready():
print('ログインしました')
#Discord Message recieve event
@client.event
async def on_message(message):
if client.user != message.author:
if message.channel.id == "550280477295247362": #Set channel ID
if is_japanese(message.content):
reply = f"{message.author.mention} ```Hey you! This message contains JAPANESE!! TakeCare.. \n おいお前! このメッセージには日本語が含まれてるぞ!! 気をつけろよ!!```" # 返信文の作成
await client.send_message(message.channel, reply)
if client.user.id in message.content: # 話しかけられたかの判定
reply = 'Hello, how u doing?'# 返信文の作成
await client.send_message(message.channel, reply)
# botの接続と起動
# (tokenにはbotアカウントのアクセストークンを入れてください)
client.run('TOKEN HERE')
|
py | b417d96e8967efa610d4a00147e985efd1788175 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import re
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'hetzner-fix-report'
copyright = '2019, Felix'
author = 'Felix'
# The full version, including alpha/beta/rc tags
release = re.sub('^v', '', os.popen('git describe --tags').read().strip())
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
py | b417d9f7c010f1aab1cf7f8ac71384ab8c7dd7c1 | import os
from flask import Flask, session, render_template, request, flash, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess secure key'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# setup SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
db = SQLAlchemy(app)
# define database tables
class Artist(db.Model):
__tablename__ = 'artists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
about = db.Column(db.Text)
songs = db.relationship('Song', backref='artist', cascade="delete")
class Song(db.Model):
__tablename__ = 'songs'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256))
year = db.Column(db.Integer)
lyrics = db.Column(db.Text)
artist_id = db.Column(db.Integer, db.ForeignKey('artists.id'))
@app.route('/')
def index():
# return HTML
# return "<h1>this is the index page!<h1>"
return render_template('index.html')
@app.route('/artists')
def show_all_artists():
artists = Artist.query.all()
return render_template('artist-all.html', artists=artists)
@app.route('/artist/add', methods=['GET', 'POST'])
def add_artists():
if request.method == 'GET':
return render_template('artist-add.html')
if request.method == 'POST':
# get data from the form
name = request.form['name']
about = request.form['about']
# insert the data into the database
artist = Artist(name=name, about=about)
db.session.add(artist)
db.session.commit()
return redirect(url_for('show_all_artists'))
@app.route('/api/artist/add', methods=['POST'])
def add_ajax_artists():
# get data from the form
name = request.form['name']
about = request.form['about']
# insert the data into the database
artist = Artist(name=name, about=about)
db.session.add(artist)
db.session.commit()
# flash message type: success, info, warning, and danger from bootstrap
flash('Artist Inserted', 'success')
return jsonify({"id": str(artist.id), "name": artist.name})
@app.route('/artist/edit/<int:id>', methods=['GET', 'POST'])
def edit_artist(id):
artist = Artist.query.filter_by(id=id).first()
if request.method == 'GET':
return render_template('artist-edit.html', artist=artist)
if request.method == 'POST':
# update data based on the form data
artist.name = request.form['name']
artist.about = request.form['about']
# update the database
db.session.commit()
return redirect(url_for('show_all_artists'))
@app.route('/artist/delete/<int:id>', methods=['GET', 'POST'])
def delete_artist(id):
artist = Artist.query.filter_by(id=id).first()
if request.method == 'GET':
return render_template('artist-delete.html', artist=artist)
if request.method == 'POST':
# delete the artist by id
# all related songs are deleted as well
db.session.delete(artist)
db.session.commit()
return redirect(url_for('show_all_artists'))
@app.route('/api/artist/<int:id>', methods=['DELETE'])
def delete_ajax_artist(id):
artist = Artist.query.get_or_404(id)
db.session.delete(artist)
db.session.commit()
return jsonify({"id": str(artist.id), "name": artist.name})
# song-all.html adds song id to the edit button using a hidden input
@app.route('/songs')
def show_all_songs():
songs = Song.query.all()
return render_template('song-all.html', songs=songs)
@app.route('/song/add', methods=['GET', 'POST'])
def add_songs():
if request.method == 'GET':
artists = Artist.query.all()
return render_template('song-add.html', artists=artists)
if request.method == 'POST':
# get data from the form
name = request.form['name']
year = request.form['year']
lyrics = request.form['lyrics']
artist_name = request.form['artist']
artist = Artist.query.filter_by(name=artist_name).first()
song = Song(name=name, year=year, lyrics=lyrics, artist=artist)
# insert the data into the database
db.session.add(song)
db.session.commit()
return redirect(url_for('show_all_songs'))
@app.route('/song/edit/<int:id>', methods=['GET', 'POST'])
def edit_song(id):
song = Song.query.filter_by(id=id).first()
artists = Artist.query.all()
if request.method == 'GET':
return render_template('song-edit.html', song=song, artists=artists)
if request.method == 'POST':
# update data based on the form data
song.name = request.form['name']
song.year = request.form['year']
song.lyrics = request.form['lyrics']
artist_name = request.form['artist']
artist = Artist.query.filter_by(name=artist_name).first()
song.artist = artist
# update the database
db.session.commit()
return redirect(url_for('show_all_songs'))
@app.route('/song/delete/<int:id>', methods=['GET', 'POST'])
def delete_song(id):
song = Song.query.filter_by(id=id).first()
artists = Artist.query.all()
if request.method == 'GET':
return render_template('song-delete.html', song=song, artists=artists)
if request.method == 'POST':
# use the id to delete the song
# song.query.filter_by(id=id).delete()
db.session.delete(song)
db.session.commit()
return redirect(url_for('show_all_songs'))
@app.route('/api/song/<int:id>', methods=['DELETE'])
def delete_ajax_song(id):
song = Song.query.get_or_404(id)
db.session.delete(song)
db.session.commit()
return jsonify({"id": str(song.id), "name": song.name})
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/users')
def show_all_users():
return render_template('user-all.html')
@app.route('/form-demo', methods=['GET', 'POST'])
def form_demo():
# how to get form data is different for GET vs. POST
if request.method == 'GET':
first_name = request.args.get('first_name')
if first_name:
return render_template('form-demo.html', first_name=first_name)
else:
return render_template('form-demo.html', first_name=session.get('first_name'))
if request.method == 'POST':
session['first_name'] = request.form['first_name']
# return render_template('form-demo.html', first_name=first_name)
return redirect(url_for('form_demo'))
@app.route('/user/<string:name>/')
def get_user_name(name):
# return "hello " + name
# return "Hello %s, this is %s" % (name, 'administrator')
return render_template('user.html', name=name)
@app.route('/song/<int:id>/')
def get_song_id(id):
# return "This song's ID is " + str(id)
return "Hi, this is %s and the song's id is %d" % ('administrator', id)
# https://goo.gl/Pc39w8 explains the following line
if __name__ == '__main__':
# activates the debugger and the reloader during development
# app.run(debug=True)
app.run()
# make the server publicly available on port 80
# note that Ports below 1024 can be opened only by root
# you need to use sudo for the following conmmand
# app.run(host='0.0.0.0', port=80)
|
py | b417dc3f49c4c109c7c6f37eddddf5cea945df4e | # 2019-11-13 21:06:16(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
def main():
n, k, *v = [int(x) for x in sys.stdin.read().split()]
ma = 0
for a in range(min(n, k)+1):
for b in range(min(n, k)-a+1):
take = v[:a] + v[-b:] if b != 0 else v[:a]
take.sort()
if a + b == 0:
optimum_sum = 0
else:
for i in range(min(k-(a+b), a+b)-1, -1, -1):
if take[i] > 0:
continue
else:
optimum_sum = sum(take[i+1:])
break
else:
optimum_sum = sum(take)
ma = max(ma, optimum_sum)
print(ma)
if __name__ == "__main__":
main()
|
py | b417dcee7b2640a9297ebb641ee4c27f8f3fa246 | import os
class Cert(object):
def __init__(self, name, buff):
self.name = name
self.len = len(buff)
self.buff = buff
pass
def __str__(self):
out_str = ['\0']*32
for i in range(len(self.name)):
out_str[i] = self.name[i]
out_str = "".join(out_str)
out_str += str(chr(self.len & 0xFF))
out_str += str(chr((self.len & 0xFF00) >> 8))
out_str += self.buff
return out_str
pass
def main():
cert_list = []
file_list = os.listdir(os.getcwd())
cert_file_list = []
for _file in file_list:
pos = _file.find(".cer")
if pos != -1:
cert_file_list.append(_file[:pos])
for cert_file in cert_file_list:
with open(cert_file+".cer", 'rb') as f:
buff = f.read()
cert_list.append(Cert(cert_file, buff))
with open('esp_ca_cert.bin', 'wb+') as f:
for _cert in cert_list:
f.write("%s" % _cert)
pass
if __name__ == '__main__':
main()
|
py | b417dd4199687da825854971ed1ac3c3285e421a | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import)
import logging
from optparse import make_option
import six
from django.core.management.base import BaseCommand, CommandError
from django import db
from tracker import models
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--cache',
action='store_true',
dest='cache',
default=False,
),
)
def handle(self, *args, **options):
years = list(map(int, args))
for year in years:
self.empty_ranking(year)
for year in years:
start, end = models.Rank.get_period_for_year(year)
qs = (models.Profile.objects
.popular()
.filter(
alias__player__game__date_finished__gte=start,
alias__player__game__date_finished__lte=end
)
.distinct('pk')
)
for profile in qs:
print('Checking %s, %s' % (profile.name, profile.pk))
aggregated = profile.aggregate_mode_stats(models.Profile.SET_STATS_ALL, start, end)
models.Rank.objects.store_many(aggregated, year, profile)
if options['cache']:
for year in years:
models.Rank.objects.rank(year)
@staticmethod
def empty_ranking(year):
cursor = db.connection.cursor()
try:
cursor.execute(
'DELETE FROM {table} WHERE year=%s'.format(table=models.Rank._meta.db_table),
[year]
)
finally:
cursor.close() |
py | b417ddb12612fc3332540ee4b53fe4d7ad1f6f8e | import pandas as pd
from Linux_Android_Classification.Src.data_process import get_firstN_rows_opcodes, data_process_opcodes, get_data_Dataframe
from Linux_Android_Classification.Src.common_libs import *
from sklearn.svm import LinearSVC
from sklearn.feature_selection import RFE
def write2file(list_data, filepath):
with open(filepath, "w") as f:
for i in list_data:
f.write("%s\n" % i)
filepath = r"../../../Files/opcodes624_fcns249-malicious.csv"
# data, label = data_process_opcodes(filepath)
csv_data, firstN = get_data_Dataframe(filepath)
# all
data = csv_data.iloc[:, :-2]
hash_values = csv_data["hash_value"]
label = csv_data["android"]
# first 5000 data
# data = firstN.iloc[:, :-2]
# hash_values = firstN["hash_value"]
# label = firstN["android"]
# print(data)
# data, label = get_firstN_rows_opcodes(filepath)
"""
"""
# selector = selector.fit(data_train, label_train)
# selector_score = selector.score(data_test, label_test)
# print(selector_score)
# print(selector.ranking_)
# rfeData = pd.DataFrame(selector.transform(data_train), columns=data_train.columns.values[selector.support_])
# print(rfeData.columns.tolist())
##########################################
# print(data, label)
result_params = []
lsvc_results = []
lsvc_ranking = []
lsvc_columns = []
importances = []
data_train, data_test, label_train, label_test = train_test_split(data, label, test_size=0.1, shuffle=True)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(data, label)
for train_index, test_index in skf.split(data, label):
data_train, data_test = data.iloc[train_index], data.iloc[test_index]
label_train, label_test = label[train_index], label[test_index]
print("TRAIN: ", train_index, "TEST: ", test_index)
seed = 1
clf = LinearSVC(random_state=seed)
selector = RFE(estimator=clf)
params = [{'estimator__penalty': ['l2'], 'estimator__loss': ['squared_hinge', 'hinge'],
'estimator__C': [1, 10], 'estimator__max_iter': [10000]}]
grid = GridSearchCV(selector, params, cv=10, verbose=10)
grid.fit(data_train, label_train)
best_params = grid.best_params_
result_params.append(best_params)
print("best params: " + str(best_params))
print("best scores: " + str(grid.best_score_))
clf_lsvc = LinearSVC(penalty=best_params['estimator__penalty'], loss=best_params['estimator__loss'],
C=best_params['estimator__C'], max_iter=best_params['estimator__max_iter'])
rfe_lsvc = RFE(clf_lsvc)
rfe_lsvc.fit(data_train, label_train)
rfeData_train = pd.DataFrame(rfe_lsvc.transform(data_train), columns=data_train.columns.values[rfe_lsvc.support_])
for i in rfeData_train.columns.tolist():
lsvc_columns.append(i)
lsvc_ranking.append(rfe_lsvc.ranking_)
score = roc_auc_score(label_test, clf_lsvc.predict(data_test))
lsvc_results.append(score)
print(score)
write2file(result_params, r"results_parames.txt")
write2file(lsvc_results, r"lsvc_results.txt")
write2file(lsvc_ranking, r'lsvc_ranking_.txt')
write2file(lsvc_columns, r"lsvc_columns.txt")
|
py | b417de2dcf0b99001085939cba0a057e0d7d6d48 | import numpy as np
from visdom import Visdom
viz = Visdom()
win = None
def update_viz(ep, ep_reward, algo):
global win
if win is None:
win = viz.line(
X=np.array([ep]),
Y=np.array([ep_reward]),
win=algo,
opts=dict(
title=algo,
xlabel='Timesteps',
ylabel='Episodic Reward',
fillarea=False,
markers=True,
markersize=4,
dash=np.array(['dot', 'dot', 'dot','dot']),
opacity=.25
)
)
else:
viz.line(
X=np.array([ep]),
Y=np.array([ep_reward]),
win=win,
# name='all',
update='append'
)
viz.line(
X=np.array([ep]),
Y=np.array([ep_reward.mean()]),
win=win,
name='mean',
update='append',
opts=dict(
fillarea=False,
markers=True,
markersize=7
)
)
|
py | b417dee23220843253c3080c86408a3a73dd982c | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstraps gn.
It is done by first building it manually in a temporary directory, then building
it with its own BUILD.gn to the final destination.
"""
import contextlib
import errno
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
BOOTSTRAP_DIR = os.path.dirname(os.path.abspath(__file__))
GN_ROOT = os.path.dirname(BOOTSTRAP_DIR)
SRC_ROOT = os.path.dirname(os.path.dirname(GN_ROOT))
is_linux = sys.platform.startswith('linux')
is_mac = sys.platform.startswith('darwin')
is_posix = is_linux or is_mac
def check_call(cmd, **kwargs):
logging.debug('Running: %s', ' '.join(cmd))
subprocess.check_call(cmd, cwd=GN_ROOT, **kwargs)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
@contextlib.contextmanager
def scoped_tempdir():
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
def main(argv):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-d', '--debug', action='store_true',
help='Do a debug build. Defaults to release build.')
parser.add_option('-o', '--output',
help='place output in PATH', metavar='PATH')
parser.add_option('-s', '--no-rebuild', action='store_true',
help='Do not rebuild GN with GN.')
parser.add_option('-v', '--verbose', action='store_true',
help='Log more details')
options, args = parser.parse_args(argv)
if args:
parser.error('Unrecognized command line arguments: %s.' % ', '.join(args))
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
if options.debug:
build_rel = os.path.join('out', 'Debug')
else:
build_rel = os.path.join('out', 'Release')
build_root = os.path.join(SRC_ROOT, build_rel)
try:
with scoped_tempdir() as tempdir:
print 'Building gn manually in a temporary directory for bootstrapping...'
build_gn_with_ninja_manually(tempdir, options)
temp_gn = os.path.join(tempdir, 'gn')
out_gn = os.path.join(build_root, 'gn')
if options.no_rebuild:
mkdir_p(build_root)
shutil.copy2(temp_gn, out_gn)
else:
print 'Building gn using itself to %s...' % build_rel
build_gn_with_gn(temp_gn, build_rel, options)
if options.output:
# Preserve the executable permission bit.
shutil.copy2(out_gn, options.output)
except subprocess.CalledProcessError as e:
print >> sys.stderr, str(e)
return 1
return 0
def build_gn_with_ninja_manually(tempdir, options):
write_ninja(os.path.join(tempdir, 'build.ninja'), options)
cmd = ['ninja', '-C', tempdir]
if options.verbose:
cmd.append('-v')
cmd.append('gn')
check_call(cmd)
def write_ninja(path, options):
cc = os.environ.get('CC', '')
cxx = os.environ.get('CXX', '')
cflags = os.environ.get('CFLAGS', '').split()
cflags_cc = os.environ.get('CXXFLAGS', '').split()
ld = os.environ.get('LD', cxx)
ldflags = os.environ.get('LDFLAGS', '').split()
include_dirs = [SRC_ROOT]
libs = []
if is_posix:
if options.debug:
cflags.extend(['-O0', '-g'])
else:
cflags.extend(['-O2', '-g0'])
cflags.extend(['-D_FILE_OFFSET_BITS=64', '-pthread', '-pipe'])
cflags_cc.extend(['-std=gnu++11', '-Wno-c++11-narrowing'])
static_libraries = {
'base': {'sources': [], 'tool': 'cxx'},
'dynamic_annotations': {'sources': [], 'tool': 'cc'},
'gn': {'sources': [], 'tool': 'cxx'},
}
for name in os.listdir(GN_ROOT):
if not name.endswith('.cc'):
continue
if name.endswith('_unittest.cc'):
continue
if name in ['generate_test_gn_data.cc', 'run_all_unittests.cc']:
continue
full_path = os.path.join(GN_ROOT, name)
static_libraries['gn']['sources'].append(
os.path.relpath(full_path, SRC_ROOT))
static_libraries['dynamic_annotations']['sources'].extend([
'base/third_party/dynamic_annotations/dynamic_annotations.c',
])
static_libraries['base']['sources'].extend([
'base/at_exit.cc',
'base/atomicops_internals_x86_gcc.cc',
'base/base_paths.cc',
'base/base_switches.cc',
'base/callback_internal.cc',
'base/command_line.cc',
'base/debug/alias.cc',
'base/debug/stack_trace.cc',
'base/debug/task_annotator.cc',
'base/environment.cc',
'base/files/file.cc',
'base/files/file_enumerator.cc',
'base/files/file_path.cc',
'base/files/file_path_constants.cc',
'base/files/file_util.cc',
'base/files/scoped_file.cc',
'base/json/json_parser.cc',
'base/json/json_reader.cc',
'base/json/json_string_value_serializer.cc',
'base/json/json_writer.cc',
'base/json/string_escape.cc',
'base/lazy_instance.cc',
'base/location.cc',
'base/logging.cc',
'base/memory/ref_counted.cc',
'base/memory/ref_counted_memory.cc',
'base/memory/singleton.cc',
'base/memory/weak_ptr.cc',
'base/message_loop/incoming_task_queue.cc',
'base/message_loop/message_loop.cc',
'base/message_loop/message_loop_proxy.cc',
'base/message_loop/message_loop_proxy_impl.cc',
'base/message_loop/message_pump.cc',
'base/message_loop/message_pump_default.cc',
'base/metrics/bucket_ranges.cc',
'base/metrics/histogram.cc',
'base/metrics/histogram_base.cc',
'base/metrics/histogram_samples.cc',
'base/metrics/sample_map.cc',
'base/metrics/sample_vector.cc',
'base/metrics/sparse_histogram.cc',
'base/metrics/statistics_recorder.cc',
'base/path_service.cc',
'base/pending_task.cc',
'base/pickle.cc',
'base/process/kill.cc',
'base/process/process_iterator.cc',
'base/process/process_metrics.cc',
'base/profiler/alternate_timer.cc',
'base/profiler/tracked_time.cc',
'base/run_loop.cc',
'base/sequence_checker_impl.cc',
'base/sequenced_task_runner.cc',
'base/strings/string16.cc',
'base/strings/string_number_conversions.cc',
'base/strings/string_piece.cc',
'base/strings/string_split.cc',
'base/strings/string_util.cc',
'base/strings/string_util_constants.cc',
'base/strings/stringprintf.cc',
'base/strings/utf_string_conversion_utils.cc',
'base/strings/utf_string_conversions.cc',
'base/synchronization/cancellation_flag.cc',
'base/synchronization/lock.cc',
'base/sys_info.cc',
'base/task_runner.cc',
'base/third_party/dmg_fp/dtoa_wrapper.cc',
'base/third_party/dmg_fp/g_fmt.cc',
'base/third_party/icu/icu_utf.cc',
'base/third_party/nspr/prtime.cc',
'base/thread_task_runner_handle.cc',
'base/threading/non_thread_safe_impl.cc',
'base/threading/post_task_and_reply_impl.cc',
'base/threading/sequenced_worker_pool.cc',
'base/threading/simple_thread.cc',
'base/threading/thread_checker_impl.cc',
'base/threading/thread_collision_warner.cc',
'base/threading/thread_id_name_manager.cc',
'base/threading/thread_local_storage.cc',
'base/threading/thread_restrictions.cc',
'base/threading/worker_pool.cc',
'base/time/time.cc',
'base/timer/elapsed_timer.cc',
'base/timer/timer.cc',
'base/trace_event/trace_event_impl.cc',
'base/trace_event/trace_event_impl_constants.cc',
'base/trace_event/trace_event_memory.cc',
'base/trace_event/trace_event_synthetic_delay.cc',
'base/tracked_objects.cc',
'base/tracking_info.cc',
'base/values.cc',
'base/vlog.cc',
])
if is_posix:
static_libraries['base']['sources'].extend([
'base/base_paths_posix.cc',
'base/debug/debugger_posix.cc',
'base/debug/stack_trace_posix.cc',
'base/files/file_enumerator_posix.cc',
'base/files/file_posix.cc',
'base/files/file_util_posix.cc',
'base/message_loop/message_pump_libevent.cc',
'base/posix/file_descriptor_shuffle.cc',
'base/process/kill_posix.cc',
'base/process/process_handle_posix.cc',
'base/process/process_metrics_posix.cc',
'base/process/process_posix.cc',
'base/safe_strerror_posix.cc',
'base/synchronization/condition_variable_posix.cc',
'base/synchronization/lock_impl_posix.cc',
'base/synchronization/waitable_event_posix.cc',
'base/sys_info_posix.cc',
'base/threading/platform_thread_internal_posix.cc',
'base/threading/platform_thread_posix.cc',
'base/threading/thread_local_posix.cc',
'base/threading/thread_local_storage_posix.cc',
'base/threading/worker_pool_posix.cc',
'base/time/time_posix.cc',
])
static_libraries['libevent'] = {
'sources': [
'third_party/libevent/buffer.c',
'third_party/libevent/evbuffer.c',
'third_party/libevent/evdns.c',
'third_party/libevent/event.c',
'third_party/libevent/event_tagging.c',
'third_party/libevent/evrpc.c',
'third_party/libevent/evutil.c',
'third_party/libevent/http.c',
'third_party/libevent/log.c',
'third_party/libevent/poll.c',
'third_party/libevent/select.c',
'third_party/libevent/signal.c',
'third_party/libevent/strlcpy.c',
],
'tool': 'cc',
'include_dirs': [],
'cflags': cflags + ['-DHAVE_CONFIG_H'],
}
if is_linux:
libs.extend(['-lrt'])
ldflags.extend(['-pthread'])
static_libraries['xdg_user_dirs'] = {
'sources': [
'base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
],
'tool': 'cxx',
}
static_libraries['base']['sources'].extend([
'base/nix/xdg_util.cc',
'base/process/internal_linux.cc',
'base/process/process_handle_linux.cc',
'base/process/process_iterator_linux.cc',
'base/process/process_linux.cc',
'base/process/process_metrics_linux.cc',
'base/strings/sys_string_conversions_posix.cc',
'base/sys_info_linux.cc',
'base/threading/platform_thread_linux.cc',
])
static_libraries['libevent']['include_dirs'].extend([
os.path.join(SRC_ROOT, 'third_party', 'libevent', 'linux')
])
static_libraries['libevent']['sources'].extend([
'third_party/libevent/epoll.c',
])
if is_mac:
static_libraries['base']['sources'].extend([
'base/base_paths_mac.mm',
'base/files/file_util_mac.mm',
'base/mac/bundle_locations.mm',
'base/mac/foundation_util.mm',
'base/mac/mach_logging.cc',
'base/mac/scoped_mach_port.cc',
'base/mac/scoped_nsautorelease_pool.mm',
'base/message_loop/message_pump_mac.mm',
'base/process/process_handle_mac.cc',
'base/process/process_iterator_mac.cc',
'base/strings/sys_string_conversions_mac.mm',
'base/time/time_mac.cc',
'base/threading/platform_thread_mac.mm',
])
static_libraries['libevent']['include_dirs'].extend([
os.path.join(SRC_ROOT, 'third_party', 'libevent', 'mac')
])
static_libraries['libevent']['sources'].extend([
'third_party/libevent/kqueue.c',
])
if is_mac:
template_filename = 'build_mac.ninja.template'
else:
template_filename = 'build.ninja.template'
with open(os.path.join(GN_ROOT, 'bootstrap', template_filename)) as f:
ninja_template = f.read()
def src_to_obj(path):
return '%s' % os.path.splitext(path)[0] + '.o'
ninja_lines = []
for library, settings in static_libraries.iteritems():
for src_file in settings['sources']:
ninja_lines.extend([
'build %s: %s %s' % (src_to_obj(src_file),
settings['tool'],
os.path.join(SRC_ROOT, src_file)),
' includes = %s' % ' '.join(
['-I' + dirname for dirname in
include_dirs + settings.get('include_dirs', [])]),
' cflags = %s' % ' '.join(cflags + settings.get('cflags', [])),
' cflags_cc = %s' %
' '.join(cflags_cc + settings.get('cflags_cc', [])),
])
if cc:
ninja_lines.append(' cc = %s' % cc)
if cxx:
ninja_lines.append(' cxx = %s' % cxx)
ninja_lines.append('build %s.a: alink_thin %s' % (
library,
' '.join([src_to_obj(src_file) for src_file in settings['sources']])))
if is_mac:
libs.extend([
'-framework', 'AppKit',
'-framework', 'CoreFoundation',
'-framework', 'Foundation',
'-framework', 'Security',
]);
ninja_lines.extend([
'build gn: link %s' % (
' '.join(['%s.a' % library for library in static_libraries])),
' ldflags = %s' % ' '.join(ldflags),
' libs = %s' % ' '.join(libs),
])
if ld:
ninja_lines.append(' ld = %s' % ld)
else:
ninja_lines.append(' ld = $ldxx')
ninja_lines.append('') # Make sure the file ends with a newline.
with open(path, 'w') as f:
f.write(ninja_template + '\n'.join(ninja_lines))
def build_gn_with_gn(temp_gn, build_dir, options):
cmd = [temp_gn, 'gen', build_dir]
if not options.debug:
cmd.append('--args=is_debug=false')
check_call(cmd)
cmd = ['ninja', '-C', build_dir]
if options.verbose:
cmd.append('-v')
cmd.append('gn')
check_call(cmd)
if not options.debug:
check_call(['strip', os.path.join(build_dir, 'gn')])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
py | b417e01fd36b33525055b53d91dc9942cd720c30 | """Policy players"""
import numpy as np
from AlphaGo import go
#import go
from AlphaGo import mcts
from operator import itemgetter
class GreedyPolicyPlayer(object):
"""A player that uses a greedy policy (i.e. chooses the highest probability
move each turn)
"""
def __init__(self, policy_function, pass_when_offered=False, move_limit=None):
self.policy = policy_function
self.pass_when_offered = pass_when_offered
self.move_limit = move_limit
def get_move(self, state):
# check move limit
if self.move_limit is not None and state.get_history_size() > self.move_limit:
return go.PASS
# check if pass was offered and we want to pass
if self.pass_when_offered:
if state.get_history_size() > 100 and state.get_history()[-1] == go.PASS:
return go.PASS
# list with sensible moves
sensible_moves = [move for move in state.get_legal_moves(include_eyes=False)]
# check if there are sensible moves left to do
if len(sensible_moves) > 0:
move_probs = self.policy.eval_state(state, sensible_moves)
max_prob = max(move_probs, key=itemgetter(1))
return max_prob[0]
# No 'sensible' moves available, so do pass move
return go.PASS
class ProbabilisticPolicyPlayer(object):
"""A player that samples a move in proportion to the probability given by the
policy.
By manipulating the 'temperature', moves can be pushed towards totally random
(high temperature) or towards greedy play (low temperature)
"""
def __init__(self, policy_function, temperature=1.0, pass_when_offered=False,
move_limit=None, greedy_start=None):
assert(temperature > 0.0)
self.policy = policy_function
self.move_limit = move_limit
self.beta = 1.0 / temperature
self.pass_when_offered = pass_when_offered
self.greedy_start = greedy_start
def apply_temperature(self, distribution):
log_probabilities = np.log(distribution)
# apply beta exponent to probabilities (in log space)
log_probabilities = log_probabilities * self.beta
# scale probabilities to a more numerically stable range (in log space)
log_probabilities = log_probabilities - log_probabilities.max()
# convert back from log space
probabilities = np.exp(log_probabilities)
# re-normalize the distribution
return probabilities / probabilities.sum()
def get_move(self, state):
# check move limit
if self.move_limit is not None and state.get_history_size() > self.move_limit:
return go.PASS
# check if pass was offered and we want to pass
if self.pass_when_offered:
if state.get_history_size() > 100 and state.get_history()[-1] == go.PASS:
return go.PASS
# list with 'sensible' moves
sensible_moves = [move for move in state.get_legal_moves(include_eyes=False)]
# check if there are 'sensible' moves left to do
if len(sensible_moves) > 0:
move_probs = self.policy.eval_state(state, sensible_moves)
if self.greedy_start is not None and state.get_history_size() >= self.greedy_start:
# greedy
max_prob = max(move_probs, key=itemgetter(1))
return max_prob[0]
else:
# probabilistic
# zip(*list) is like the 'transpose' of zip;
# zip(*zip([1,2,3], [4,5,6])) is [(1,2,3), (4,5,6)]
moves, probabilities = list(zip(*move_probs))
# apply 'temperature' to the distribution
probabilities = self.apply_temperature(probabilities)
# numpy interprets a list of tuples as 2D, so we must choose an
# _index_ of moves then apply it in 2 steps
choice_idx = np.random.choice(len(moves), p=probabilities)
return moves[choice_idx]
# No 'sensible' moves available, so do pass move
return go.PASS
def get_moves(self, states):
"""Batch version of get_move. A list of moves is returned (one per state)
"""
sensible_move_lists = [[move for move in st.get_legal_moves(include_eyes=False)]
for st in states]
all_moves_distributions = self.policy.batch_eval_state(states, sensible_move_lists)
move_list = [None] * len(states)
for i, move_probs in enumerate(all_moves_distributions):
if len(move_probs) == 0 or states[i].get_history_size() > self.move_limit:
move_list[i] = go.PASS
else:
if self.greedy_start is not None and \
states[i].get_history_size() >= self.greedy_start:
# greedy
max_prob = max(move_probs, key=itemgetter(1))
move_list[i] = max_prob[0]
else:
# probabilistic
moves, probabilities = list(zip(*move_probs))
# apply 'temperature' to the distribution
probabilities = self.apply_temperature(probabilities)
# numpy interprets a list of tuples as 2D, so we must choose an
# _index_ of moves then apply it in 2 steps
choice_idx = np.random.choice(len(moves), p=probabilities)
move_list[i] = moves[choice_idx]
return move_list
class RolloutPlayer(object):
"""A player that samples a move in proportion to the probability given by the
policy.
By manipulating the 'temperature', moves can be pushed towards totally random
(high temperature) or towards greedy play (low temperature)
"""
def __init__(self, rollout_function, temperature=1.0, pass_when_offered=False,
move_limit=None, greedy_start=None):
assert(temperature > 0.0)
self.policy = rollout_function
self.move_limit = move_limit
self.beta = 1.0 / temperature
self.pass_when_offered = pass_when_offered
self.greedy_start = greedy_start
def apply_temperature(self, distribution):
log_probabilities = np.log(distribution)
# apply beta exponent to probabilities (in log space)
log_probabilities = log_probabilities * self.beta
# scale probabilities to a more numerically stable range (in log space)
log_probabilities = log_probabilities - log_probabilities.max()
# convert back from log space
probabilities = np.exp(log_probabilities)
# re-normalize the distribution
return probabilities / probabilities.sum()
def get_move(self, state):
# check move limit
if self.move_limit is not None and state.get_history_size() > self.move_limit:
return go.PASS
# check if pass was offered and we want to pass
if self.pass_when_offered:
if state.get_history_size() > 100 and state.get_history()[-1] == go.PASS:
return go.PASS
# list with 'sensible' moves
sensible_moves = [move for move in state.get_legal_moves(include_eyes=False)]
# check if there are 'sensible' moves left to do
if len(sensible_moves) > 0:
move_probs = self.policy.eval_state(state, sensible_moves)
if self.greedy_start is not None and \
state.get_history_size() >= self.greedy_start:
# greedy
max_prob = max(move_probs, key=itemgetter(1))
return max_prob[0]
else:
# probabilistic
# zip(*list) is like the 'transpose' of zip;
# zip(*zip([1,2,3], [4,5,6])) is [(1,2,3), (4,5,6)]
moves, probabilities = list(zip(*move_probs))
# apply 'temperature' to the distribution
probabilities = self.apply_temperature(probabilities)
# numpy interprets a list of tuples as 2D, so we must choose an
# _index_ of moves then apply it in 2 steps
choice_idx = np.random.choice(len(moves), p=probabilities)
return moves[choice_idx]
# No 'sensible' moves available, so do pass move
return go.PASS
def get_moves(self, states):
"""Batch version of get_move. A list of moves is returned (one per state)
"""
sensible_move_lists = [[move for move in st.get_legal_moves(include_eyes=False)]
for st in states]
all_moves_distributions = self.policy.batch_eval_state(states, sensible_move_lists)
move_list = [None] * len(states)
for i, move_probs in enumerate(all_moves_distributions):
if len(move_probs) == 0 or states[i].get_history_size() > self.move_limit:
move_list[i] = go.PASS
else:
if self.greedy_start is not None and \
states[i].get_history_size() >= self.greedy_start:
# greedy
max_prob = max(move_probs, key=itemgetter(1))
move_list[i] = max_prob[0]
else:
# probabilistic
moves, probabilities = list(zip(*move_probs))
# apply 'temperature' to the distribution
probabilities = self.apply_temperature(probabilities)
# numpy interprets a list of tuples as 2D, so we must choose an
# _index_ of moves then apply it in 2 steps
choice_idx = np.random.choice(len(moves), p=probabilities)
move_list[i] = moves[choice_idx]
return move_list
class ValuePlayer(object):
"""A player that samples a move in proportion to the probability given by the
value policy.
By manipulating the 'temperature', moves can be pushed towards totally random
(high temperature) or towards greedy play (low temperature)
greedy_start can be used to force greedy play as of move #greedy_start
"""
def __init__(self, value_function, temperature=1.0, pass_when_offered=False,
move_limit=None, greedy_start=None):
assert(temperature > 0.0)
self.pass_when_offered = pass_when_offered
self.greedy_start = greedy_start
self.beta = 1.0 / temperature
self.move_limit = move_limit
self.value = value_function
def apply_temperature(self, distribution):
log_probabilities = np.log(distribution)
# apply beta exponent to probabilities (in log space)
log_probabilities = log_probabilities * self.beta
# scale probabilities to a more numerically stable range (in log space)
log_probabilities = log_probabilities - log_probabilities.max()
# convert back from log space
probabilities = np.exp(log_probabilities)
# re-normalize the distribution
return probabilities / probabilities.sum()
def get_move(self, state):
# check move limit
if self.move_limit is not None and state.get_history_size() > self.move_limit:
return go.PASS
# check if pass was offered and we want to pass
if self.pass_when_offered:
if state.get_history_size() > 100 and state.get_history()[-1] == go.PASS:
return go.PASS
# list with 'sensible' moves
sensible_moves = [move for move in state.get_legal_moves(include_eyes=False)]
# check if there are 'sensible' moves left to do
if len(sensible_moves) > 0:
# list with legal moves
legal_moves = [move for move in state.get_legal_moves()]
# generate all possible next states
state_list = [state.copy() for _ in legal_moves]
for st, mv in zip(state_list, legal_moves):
st.do_move(mv)
# evaluate all possble states
probabilities = [self.value.eval_state(next_state) for next_state in state_list]
if self.greedy_start is not None and state.get_history_size() >= self.greedy_start:
# greedy play
move_probs = list(zip(legal_moves, probabilities))
max_prob = max(move_probs, key=itemgetter(1))
return max_prob[0]
else:
# probabilistic play
# apply 'temperature' to the distribution
probabilities = self.apply_temperature(probabilities)
# numpy interprets a list of tuples as 2D, so we must choose an
# _index_ of moves then apply it in 2 steps
choice_idx = np.random.choice(len(legal_moves), p=probabilities)
return legal_moves[choice_idx]
# No 'sensible' moves available, so do pass move
return go.PASS
class MCTSPlayer(object):
def __init__(self, value_function, policy_function, rollout_function, lmbda=.5, c_puct=5,
rollout_limit=500, playout_depth=40, n_playout=100):
self.mcts = mcts.MCTS(value_function, policy_function, rollout_function, lmbda, c_puct,
rollout_limit, playout_depth, n_playout)
def get_move(self, state):
sensible_moves = [move for move in state.get_legal_moves(include_eyes=False)]
if len(sensible_moves) > 0:
move = self.mcts.get_move(state)
self.mcts.update_with_move(move)
return move
# No 'sensible' moves available, so do pass move
return go.PASS
|
py | b417e0c969390149e08617478ad4ac183e3e36cb | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""rlu_dmlab_seekavoid_arena01 dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.rl_unplugged import dmlab_dataset
_TASK = 'seekavoid_arena_01'
_EPISODE_LENGTH = 301
_CONFIG_NAMES = ['training_0', 'training_1', 'training_2',
'snapshot_0_eps_0.0', 'snapshot_1_eps_0.0',
'snapshot_0_eps_0.01', 'snapshot_1_eps_0.01',
'snapshot_0_eps_0.25', 'snapshot_1_eps_0.25']
class RluDmlabSeekavoidArena01(dmlab_dataset.DMLabDatasetBuilder):
"""DatasetBuilder for rlu_dmlab_seekavoid_arena01 dataset."""
VERSION = tfds.core.Version('1.1.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
'1.1.0': 'Added is_last.',
}
# pytype: disable=wrong-keyword-args
BUILDER_CONFIGS = [
dmlab_dataset.BuilderConfig(
name=name, task=_TASK, episode_length=_EPISODE_LENGTH)
for name in _CONFIG_NAMES
]
# pytype: enable=wrong-keyword-args
|
py | b417e16bf4fa9d35fcbdcbb37b7a1434a44f2862 | from pytest import mark
from storyruntime.reporting.ReportingAgent import ReportingAgent
@mark.asyncio
async def test_ensure_interface(magic):
impl = magic()
class ReportingAgentSample(ReportingAgent):
async def capture(self, re):
impl.received(re)
sample_agent = ReportingAgentSample()
re = magic()
await sample_agent.capture(re)
impl.received.assert_called_with(re)
|
py | b417e1dc70344a0114d11199ed37736860fa0f77 | """This module provides classes that allow Numpy-type access
to VTK datasets and arrays. This is best described with some examples.
To normalize a VTK array:
import vtk
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.numpy_interface.algorithms as algs
rt = vtk.vtkRTAnalyticSource()
rt.Update()
image = dsa.WrapDataObject(rt.GetOutput())
rtdata = image.PointData['RTData']
rtmin = algs.min(rtdata)
rtmax = algs.max(rtdata)
rtnorm = (rtdata - rtmin) / (rtmax - rtmin)
image.PointData.append(rtnorm, 'RTData - normalized')
print image.GetPointData().GetArray('RTData - normalized').GetRange()
To calculate gradient:
grad= algs.gradient(rtnorm)
To access subsets:
>>> grad[0:10]
VTKArray([[ 0.10729134, 0.03763443, 0.03136338],
[ 0.02754352, 0.03886006, 0.032589 ],
[ 0.02248248, 0.04127144, 0.03500038],
[ 0.02678365, 0.04357527, 0.03730421],
[ 0.01765099, 0.04571581, 0.03944477],
[ 0.02344007, 0.04763837, 0.04136734],
[ 0.01089381, 0.04929155, 0.04302051],
[ 0.01769151, 0.05062952, 0.04435848],
[ 0.002764 , 0.05161414, 0.04534309],
[ 0.01010841, 0.05221677, 0.04594573]])
>>> grad[:, 0]
VTKArray([ 0.10729134, 0.02754352, 0.02248248, ..., -0.02748174,
-0.02410045, 0.05509736])
All of this functionality is also supported for composite datasets
even though their data arrays may be spread across multiple datasets.
We have implemented a VTKCompositeDataArray class that handles many
Numpy style operators and is supported by all algorithms in the
algorithms module.
This module also provides an API to access composite datasets.
For example:
mb = vtk.vtkMultiBlockDataSet()
mb.SetBlock(0, image.VTKObject)
mb.SetBlock(1e, image.VTKObject)
cds = dsa.WrapDataObject(mb)
for block in cds:
print block
Note that this module implements only the wrappers for datasets
and arrays. The classes implement many useful operators. However,
to make best use of these classes, take a look at the algorithms
module.
"""
try:
import numpy
except ImportError:
raise RuntimeError("This module depends on the numpy module. Please make\
sure that it is installed properly.")
import itertools
import operator
import sys
from vtk import buffer_shared
from vtk.util import numpy_support
from vtk.vtkCommonDataModel import vtkDataObject
from vtk.vtkCommonCore import vtkWeakReference
import weakref
if sys.hexversion < 0x03000000:
izip = itertools.izip
else:
izip = zip
def reshape_append_ones (a1, a2):
"""Returns a list with the two arguments, any of them may be
processed. If the arguments are numpy.ndarrays, append 1s to the
shape of the array with the smallest number of dimensions until
the arrays have the same number of dimensions. Does nothing if the
arguments are not ndarrays or the arrays have the same number of
dimensions.
"""
l = [a1, a2]
if (isinstance(a1, numpy.ndarray) and isinstance(a2, numpy.ndarray)):
len1 = len(a1.shape)
len2 = len(a2.shape)
if (len1 == len2 or len1 == 0 or len2 == 0 or
a1.shape[0] != a2.shape[0]):
return l;
elif (len1 < len2):
d = len1
maxLength = len2
i = 0
else:
d = len2
maxLength = len1
i = 1
while (d < maxLength):
l[i] = numpy.expand_dims(l[i], d)
d = d + 1
return l
class ArrayAssociation :
"""Easy access to vtkDataObject.AttributeTypes"""
POINT = vtkDataObject.POINT
CELL = vtkDataObject.CELL
FIELD = vtkDataObject.FIELD
ROW = vtkDataObject.ROW
class VTKObjectWrapper(object):
"""Superclass for classes that wrap VTK objects with Python objects.
This class holds a reference to the wrapped VTK object. It also
forwards unresolved methods to the underlying object by overloading
__get__attr."""
def __init__(self, vtkobject):
self.VTKObject = vtkobject
def __getattr__(self, name):
"Forwards unknown attribute requests to VTK object."
return getattr(self.VTKObject, name)
def vtkDataArrayToVTKArray(array, dataset=None):
"Given a vtkDataArray and a dataset owning it, returns a VTKArray."
narray = numpy_support.vtk_to_numpy(array)
# Make arrays of 9 components into matrices. Also transpose
# as VTK store matrices in Fortran order
shape = narray.shape
if len(shape) == 2 and shape[1] == 9:
narray = narray.reshape((shape[0], 3, 3)).transpose(0, 2, 1)
return VTKArray(narray, array=array, dataset=dataset)
def numpyTovtkDataArray(array, name="numpy_array", array_type=None):
"""Given a numpy array or a VTKArray and a name, returns a vtkDataArray.
The resulting vtkDataArray will store a reference to the numpy array:
the numpy array is released only when the vtkDataArray is destroyed."""
vtkarray = numpy_support.numpy_to_vtk(array, array_type=array_type)
vtkarray.SetName(name)
return vtkarray
def _make_tensor_array_contiguous(array):
if array is None:
return None
if array.flags.contiguous:
return array
array = numpy.asarray(array)
size = array.dtype.itemsize
strides = array.strides
if len(strides) == 3 and strides[1]/size == 1 and strides[2]/size == 3:
return array.transpose(0, 2, 1)
return array
def _metaclass(mcs):
"""For compatibility between python 2 and python 3."""
def decorator(cls):
body = vars(cls).copy()
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcs(cls.__name__, cls.__bases__, body)
return decorator
class VTKArrayMetaClass(type):
def __new__(mcs, name, parent, attr):
"""We overwrite numerical/comparison operators because we might need
to reshape one of the arrays to perform the operation without
broadcast errors. For instace:
An array G of shape (n,3) resulted from computing the
gradient on a scalar array S of shape (n,) cannot be added together without
reshaping.
G + expand_dims(S,1) works,
G + S gives an error:
ValueError: operands could not be broadcast together with shapes (n,3) (n,)
This metaclass overwrites operators such that it computes this
reshape operation automatically by appending 1s to the
dimensions of the array with fewer dimensions.
"""
def add_numeric_op(attr_name):
"""Create an attribute named attr_name that calls
_numeric_op(self, other, op)."""
def closure(self, other):
return VTKArray._numeric_op(self, other, attr_name)
closure.__name__ = attr_name
attr[attr_name] = closure
def add_default_numeric_op(op_name):
"""Adds '__[op_name]__' attribute that uses operator.[op_name]"""
add_numeric_op("__%s__"%op_name)
def add_reverse_numeric_op(attr_name):
"""Create an attribute named attr_name that calls
_reverse_numeric_op(self, other, op)."""
def closure(self, other):
return VTKArray._reverse_numeric_op(self, other, attr_name)
closure.__name__ = attr_name
attr[attr_name] = closure
def add_default_reverse_numeric_op(op_name):
"""Adds '__r[op_name]__' attribute that uses operator.[op_name]"""
add_reverse_numeric_op("__r%s__"%op_name)
def add_default_numeric_ops(op_name):
"""Call both add_default_numeric_op and add_default_reverse_numeric_op."""
add_default_numeric_op(op_name)
add_default_reverse_numeric_op(op_name)
add_default_numeric_ops("add")
add_default_numeric_ops("sub")
add_default_numeric_ops("mul")
if sys.hexversion < 0x03000000:
add_default_numeric_ops("div")
add_default_numeric_ops("truediv")
add_default_numeric_ops("floordiv")
add_default_numeric_ops("mod")
add_default_numeric_ops("pow")
add_default_numeric_ops("lshift")
add_default_numeric_ops("rshift")
add_numeric_op("and")
add_default_numeric_ops("xor")
add_numeric_op("or")
add_default_numeric_op("lt")
add_default_numeric_op("le")
add_default_numeric_op("eq")
add_default_numeric_op("ne")
add_default_numeric_op("ge")
add_default_numeric_op("gt")
return type.__new__(mcs, name, parent, attr)
@_metaclass(VTKArrayMetaClass)
class VTKArray(numpy.ndarray):
"""This is a sub-class of numpy ndarray that stores a
reference to a vtk array as well as the owning dataset.
The numpy array and vtk array should point to the same
memory location."""
def _numeric_op(self, other, attr_name):
"""Used to implement numpy-style numerical operations such as __add__,
__mul__, etc."""
l = reshape_append_ones(self, other)
return getattr(numpy.ndarray, attr_name)(l[0], l[1])
def _reverse_numeric_op(self, other, attr_name):
"""Used to implement numpy-style numerical operations such as __add__,
__mul__, etc."""
l = reshape_append_ones(self, other)
return getattr(numpy.ndarray, attr_name)(l[0], l[1])
def __new__(cls, input_array, array=None, dataset=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = numpy.asarray(input_array).view(cls)
obj.Association = ArrayAssociation.FIELD
# add the new attributes to the created instance
obj.VTKObject = array
if dataset:
obj._dataset = vtkWeakReference()
obj._dataset.Set(dataset.VTKObject)
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self,obj):
# Copy the VTK array only if the two share data
slf = _make_tensor_array_contiguous(self)
obj2 = _make_tensor_array_contiguous(obj)
self.VTKObject = None
try:
# This line tells us that they are referring to the same buffer.
# Much like two pointers referring to same memory location in C/C++.
if buffer_shared(slf, obj2):
self.VTKObject = getattr(obj, 'VTKObject', None)
except TypeError:
pass
self.Association = getattr(obj, 'Association', None)
self.DataSet = getattr(obj, 'DataSet', None)
def __getattr__(self, name):
"Forwards unknown attribute requests to VTK array."
try:
o = self.__dict__["VTKObject"]
except KeyError:
o = None
if o is None:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
return getattr(o, name)
def __array_wrap__(self, out_arr, context=None):
if out_arr.shape == ():
# Convert to scalar value
return out_arr[()]
else:
return numpy.ndarray.__array_wrap__(self, out_arr, context)
@property
def DataSet(self):
"""
Get the dataset this array is associated with. The reference to the
dataset is held through a vtkWeakReference to ensure it doesn't prevent
the dataset from being collected if necessary.
"""
if hasattr(self, '_dataset') and self._dataset and self._dataset.Get():
return WrapDataObject(self._dataset.Get())
return None
@DataSet.setter
def DataSet(self, dataset):
"""
Set the dataset this array is associated with. The reference is held
through a vtkWeakReference.
"""
# Do we have dataset to store
if dataset and dataset.VTKObject:
# Do we need to create a vtkWeakReference
if not hasattr(self, '_dataset') or self._dataset is None:
self._dataset = vtkWeakReference()
self._dataset.Set(dataset.VTKObject)
else:
self._dataset = None
class VTKNoneArrayMetaClass(type):
def __new__(mcs, name, parent, attr):
"""Simplify the implementation of the numeric/logical sequence API."""
def _add_op(attr_name, op):
"""Create an attribute named attr_name that calls
_numeric_op(self, other, op)."""
def closure(self, other):
return VTKNoneArray._op(self, other, op)
closure.__name__ = attr_name
attr[attr_name] = closure
def _add_default_reverse_op(op_name):
"""Adds '__r[op_name]__' attribute that uses operator.[op_name]"""
_add_op("__r%s__"%op_name, getattr(operator, op_name))
def _add_default_op(op_name):
"""Adds '__[op_name]__' attribute that uses operator.[op_name]"""
_add_op("__%s__"%op_name, getattr(operator, op_name))
def _add_default_ops(op_name):
"""Call both add_default_numeric_op and add_default_reverse_numeric_op."""
_add_default_op(op_name)
_add_default_reverse_op(op_name)
_add_default_ops("add")
_add_default_ops("sub")
_add_default_ops("mul")
if sys.hexversion < 0x03000000:
_add_default_ops("div")
_add_default_ops("truediv")
_add_default_ops("floordiv")
_add_default_ops("mod")
_add_default_ops("pow")
_add_default_ops("lshift")
_add_default_ops("rshift")
_add_op("__and__", operator.and_)
_add_op("__rand__", operator.and_)
_add_default_ops("xor")
_add_op("__or__", operator.or_)
_add_op("__ror__", operator.or_)
_add_default_op("lt")
_add_default_op("le")
_add_default_op("eq")
_add_default_op("ne")
_add_default_op("ge")
_add_default_op("gt")
return type.__new__(mcs, name, parent, attr)
@_metaclass(VTKNoneArrayMetaClass)
class VTKNoneArray(object):
"""VTKNoneArray is used to represent a "void" array. An instance
of this class (NoneArray) is returned instead of None when an
array that doesn't exist in a DataSetAttributes is requested.
All operations on the NoneArray return NoneArray. The main reason
for this is to support operations in parallel where one of the
processes may be working on an empty dataset. In such cases,
the process is still expected to evaluate a whole expression because
some of the functions may perform bulk MPI communication. None
cannot be used in these instances because it cannot properly override
operators such as __add__, __sub__ etc. This is the main raison
d'etre for VTKNoneArray."""
def __getitem__(self, index):
return NoneArray
def _op(self, other, op):
"""Used to implement numpy-style numerical operations such as __add__,
__mul__, etc."""
return NoneArray
def astype(self, dtype):
"""Implements numpy array's astype method."""
return NoneArray
NoneArray = VTKNoneArray()
class VTKCompositeDataArrayMetaClass(type):
def __new__(mcs, name, parent, attr):
"""Simplify the implementation of the numeric/logical sequence API."""
def add_numeric_op(attr_name, op):
"""Create an attribute named attr_name that calls
_numeric_op(self, other, op)."""
def closure(self, other):
return VTKCompositeDataArray._numeric_op(self, other, op)
closure.__name__ = attr_name
attr[attr_name] = closure
def add_reverse_numeric_op(attr_name, op):
"""Create an attribute named attr_name that calls
_reverse_numeric_op(self, other, op)."""
def closure(self, other):
return VTKCompositeDataArray._reverse_numeric_op(self, other, op)
closure.__name__ = attr_name
attr[attr_name] = closure
def add_default_reverse_numeric_op(op_name):
"""Adds '__r[op_name]__' attribute that uses operator.[op_name]"""
add_reverse_numeric_op("__r%s__"%op_name, getattr(operator, op_name))
def add_default_numeric_op(op_name):
"""Adds '__[op_name]__' attribute that uses operator.[op_name]"""
add_numeric_op("__%s__"%op_name, getattr(operator, op_name))
def add_default_numeric_ops(op_name):
"""Call both add_default_numeric_op and add_default_reverse_numeric_op."""
add_default_numeric_op(op_name)
add_default_reverse_numeric_op(op_name)
add_default_numeric_ops("add")
add_default_numeric_ops("sub")
add_default_numeric_ops("mul")
if sys.hexversion < 0x03000000:
add_default_numeric_ops("div")
add_default_numeric_ops("truediv")
add_default_numeric_ops("floordiv")
add_default_numeric_ops("mod")
add_default_numeric_ops("pow")
add_default_numeric_ops("lshift")
add_default_numeric_ops("rshift")
add_numeric_op("__and__", operator.and_)
add_reverse_numeric_op("__rand__", operator.and_)
add_default_numeric_ops("xor")
add_numeric_op("__or__", operator.or_)
add_reverse_numeric_op("__ror__", operator.or_)
add_default_numeric_op("lt")
add_default_numeric_op("le")
add_default_numeric_op("eq")
add_default_numeric_op("ne")
add_default_numeric_op("ge")
add_default_numeric_op("gt")
return type.__new__(mcs, name, parent, attr)
@_metaclass(VTKCompositeDataArrayMetaClass)
class VTKCompositeDataArray(object):
"""This class manages a set of arrays of the same name contained
within a composite dataset. Its main purpose is to provide a
Numpy-type interface to composite data arrays which are naturally
nothing but a collection of vtkDataArrays. A VTKCompositeDataArray
makes such a collection appear as a single Numpy array and support
all array operations that this module and the associated algorithm
module support. Note that this is not a subclass of a Numpy array
and as such cannot be passed to native Numpy functions. Instead
VTK modules should be used to process composite arrays.
"""
def __init__(self, arrays = [], dataset = None, name = None,
association = None):
"""Construct a composite array given a container of
arrays, a dataset, name and association. It is sufficient
to define a container of arrays to define a composite array.
It is also possible to initialize an array by defining
the dataset, name and array association. In that case,
the underlying arrays will be created lazily when they
are needed. It is recommended to use the latter method
when initializing from an existing composite dataset."""
self._Arrays = arrays
self.DataSet = dataset
self.Name = name
validAssociation = True
if association == None:
for array in self._Arrays:
if hasattr(array, "Association"):
if association == None:
association = array.Association
elif array.Association and association != array.Association:
validAssociation = False
break
if validAssociation:
self.Association = association
else:
self.Association = ArrayAssociation.FIELD
self.Initialized = False
def __init_from_composite(self):
if self.Initialized:
return
self.Initialized = True
if self.DataSet is None or self.Name is None:
return
self._Arrays = []
for ds in self.DataSet:
self._Arrays.append(ds.GetAttributes(self.Association)[self.Name])
def GetSize(self):
"Returns the number of elements in the array."
self.__init_from_composite()
size = numpy.int64(0)
for a in self._Arrays:
try:
size += a.size
except AttributeError:
pass
return size
size = property(GetSize)
def GetArrays(self):
"""Returns the internal container of VTKArrays. If necessary,
this will populate the array list from a composite dataset."""
self.__init_from_composite()
return self._Arrays
Arrays = property(GetArrays)
def __getitem__(self, index):
"""Overwritten to refer indexing to underlying VTKArrays.
For the most part, this will behave like Numpy. Note
that indexing is done per array - arrays are never treated
as forming a bigger array. If the index is another composite
array, a one-to-one mapping between arrays is assumed.
"""
self.__init_from_composite()
res = []
if type(index) == VTKCompositeDataArray:
for a, idx in izip(self._Arrays, index.Arrays):
if a is not NoneArray:
res.append(a.__getitem__(idx))
else:
res.append(NoneArray)
else:
for a in self._Arrays:
if a is not NoneArray:
res.append(a.__getitem__(index))
else:
res.append(NoneArray)
return VTKCompositeDataArray(res, dataset=self.DataSet)
def _numeric_op(self, other, op):
"""Used to implement numpy-style numerical operations such as __add__,
__mul__, etc."""
self.__init_from_composite()
res = []
if type(other) == VTKCompositeDataArray:
for a1, a2 in izip(self._Arrays, other.Arrays):
if a1 is not NoneArray and a2 is not NoneArray:
l = reshape_append_ones(a1, a2)
res.append(op(l[0],l[1]))
else:
res.append(NoneArray)
else:
for a in self._Arrays:
if a is not NoneArray:
l = reshape_append_ones(a, other)
res.append(op(l[0], l[1]))
else:
res.append(NoneArray)
return VTKCompositeDataArray(
res, dataset=self.DataSet, association=self.Association)
def _reverse_numeric_op(self, other, op):
"""Used to implement numpy-style numerical operations such as __add__,
__mul__, etc."""
self.__init_from_composite()
res = []
if type(other) == VTKCompositeDataArray:
for a1, a2 in izip(self._Arrays, other.Arrays):
if a1 is not NoneArray and a2 is notNoneArray:
l = reshape_append_ones(a2,a1)
res.append(op(l[0],l[1]))
else:
res.append(NoneArray)
else:
for a in self._Arrays:
if a is not NoneArray:
l = reshape_append_ones(other, a)
res.append(op(l[0], l[1]))
else:
res.append(NoneArray)
return VTKCompositeDataArray(
res, dataset=self.DataSet, association = self.Association)
def __str__(self):
return self.Arrays.__str__()
def astype(self, dtype):
"""Implements numpy array's as array method."""
res = []
if self is not NoneArray:
for a in self.Arrays:
if a is NoneArray:
res.append(NoneArray)
else:
res.append(a.astype(dtype))
return VTKCompositeDataArray(
res, dataset = self.DataSet, association = self.Association)
class DataSetAttributes(VTKObjectWrapper):
"""This is a python friendly wrapper of vtkDataSetAttributes. It
returns VTKArrays. It also provides the dictionary interface."""
def __init__(self, vtkobject, dataset, association):
super(DataSetAttributes, self).__init__(vtkobject)
# import weakref
# self.DataSet = weakref.ref(dataset)
self.DataSet = dataset
self.Association = association
def __getitem__(self, idx):
"""Implements the [] operator. Accepts an array name or index."""
return self.GetArray(idx)
def GetArray(self, idx):
"Given an index or name, returns a VTKArray."
if isinstance(idx, int) and idx >= self.VTKObject.GetNumberOfArrays():
raise IndexError("array index out of range")
vtkarray = self.VTKObject.GetArray(idx)
if not vtkarray:
vtkarray = self.VTKObject.GetAbstractArray(idx)
if vtkarray:
return vtkarray
return NoneArray
array = vtkDataArrayToVTKArray(vtkarray, self.DataSet)
array.Association = self.Association
return array
def keys(self):
"""Returns the names of the arrays as a list."""
kys = []
narrays = self.VTKObject.GetNumberOfArrays()
for i in range(narrays):
name = self.VTKObject.GetAbstractArray(i).GetName()
if name:
kys.append(name)
return kys
def values(self):
"""Returns the arrays as a list."""
vals = []
narrays = self.VTKObject.GetNumberOfArrays()
for i in range(narrays):
a = self.VTKObject.GetAbstractArray(i)
if a.GetName():
vals.append(a)
return vals
def PassData(self, other):
"A wrapper for vtkDataSet.PassData."
try:
self.VTKObject.PassData(other)
except TypeError:
self.VTKObject.PassData(other.VTKObject)
def append(self, narray, name):
"""Appends a new array to the dataset attributes."""
if narray is NoneArray:
# if NoneArray, nothing to do.
return
if self.Association == ArrayAssociation.POINT:
arrLength = self.DataSet.GetNumberOfPoints()
elif self.Association == ArrayAssociation.CELL:
arrLength = self.DataSet.GetNumberOfCells()
else:
if not isinstance(narray, numpy.ndarray):
arrLength = 1
else:
arrLength = narray.shape[0]
# Fixup input array length:
if not isinstance(narray, numpy.ndarray) or numpy.ndim(narray) == 0: # Scalar input
tmparray = numpy.empty(arrLength)
tmparray.fill(narray)
narray = tmparray
elif narray.shape[0] != arrLength: # Vector input
components = 1
for l in narray.shape:
components *= l
tmparray = numpy.empty((arrLength, components))
tmparray[:] = narray.flatten()
narray = tmparray
shape = narray.shape
if len(shape) == 3:
# Array of matrices. We need to make sure the order in memory is right.
# If column order (c order), transpose. VTK wants row order (fortran
# order). The deep copy later will make sure that the array is contiguous.
# If row order but not contiguous, transpose so that the deep copy below
# does not happen.
size = narray.dtype.itemsize
if (narray.strides[1]/size == 3 and narray.strides[2]/size == 1) or \
(narray.strides[1]/size == 1 and narray.strides[2]/size == 3 and \
not narray.flags.contiguous):
narray = narray.transpose(0, 2, 1)
# If array is not contiguous, make a deep copy that is contiguous
if not narray.flags.contiguous:
narray = numpy.ascontiguousarray(narray)
# Flatten array of matrices to array of vectors
if len(shape) == 3:
narray = narray.reshape(shape[0], shape[1]*shape[2])
# this handle the case when an input array is directly appended on the
# output. We want to make sure that the array added to the output is not
# referring to the input dataset.
copy = VTKArray(narray)
try:
copy.VTKObject = narray.VTKObject
except AttributeError: pass
arr = numpyTovtkDataArray(copy, name)
self.VTKObject.AddArray(arr)
class CompositeDataSetAttributes():
"""This is a python friendly wrapper for vtkDataSetAttributes for composite
datsets. Since composite datasets themselves don't have attribute data, but
the attribute data is associated with the leaf nodes in the composite
dataset, this class simulates a DataSetAttributes interface by taking a
union of DataSetAttributes associated with all leaf nodes."""
def __init__(self, dataset, association):
# import weakref
# self.DataSet = weakref.ref(dataset)
self.DataSet = dataset
self.Association = association
self.ArrayNames = []
self.Arrays = {}
# build the set of arrays available in the composite dataset. Since
# composite datasets can have partial arrays, we need to iterate over
# all non-null blocks in the dataset.
self.__determine_arraynames()
def __determine_arraynames(self):
array_set = set()
array_list = []
for dataset in self.DataSet:
dsa = dataset.GetAttributes(self.Association)
for array_name in dsa.keys():
if array_name not in array_set:
array_set.add(array_name)
array_list.append(array_name)
self.ArrayNames = array_list
def keys(self):
"""Returns the names of the arrays as a list."""
return self.ArrayNames
def __getitem__(self, idx):
"""Implements the [] operator. Accepts an array name."""
return self.GetArray(idx)
def append(self, narray, name):
"""Appends a new array to the composite dataset attributes."""
if narray is NoneArray:
# if NoneArray, nothing to do.
return
added = False
if not isinstance(narray, VTKCompositeDataArray): # Scalar input
for ds in self.DataSet:
ds.GetAttributes(self.Association).append(narray, name)
added = True
if added:
self.ArrayNames.append(name)
# don't add the narray since it's a scalar. GetArray() will create a
# VTKCompositeArray on-demand.
else:
for ds, array in izip(self.DataSet, narray.Arrays):
if array is not None:
ds.GetAttributes(self.Association).append(array, name)
added = True
if added:
self.ArrayNames.append(name)
self.Arrays[name] = weakref.ref(narray)
def GetArray(self, idx):
"""Given a name, returns a VTKCompositeArray."""
arrayname = idx
if arrayname not in self.ArrayNames:
return NoneArray
if arrayname not in self.Arrays or self.Arrays[arrayname]() is None:
array = VTKCompositeDataArray(
dataset = self.DataSet, name = arrayname, association = self.Association)
self.Arrays[arrayname] = weakref.ref(array)
else:
array = self.Arrays[arrayname]()
return array
def PassData(self, other):
"""Emulate PassData for composite datasets."""
for this,that in zip(self.DataSet, other.DataSet):
for assoc in [ArrayAssociation.POINT, ArrayAssociation.CELL]:
this.GetAttributes(assoc).PassData(that.GetAttributes(assoc))
class CompositeDataIterator(object):
"""Wrapper for a vtkCompositeDataIterator class to satisfy
the python iterator protocol. This iterator iterates
over non-empty leaf nodes. To iterate over empty or
non-leaf nodes, use the vtkCompositeDataIterator directly.
"""
def __init__(self, cds):
self.Iterator = cds.NewIterator()
if self.Iterator:
self.Iterator.UnRegister(None)
self.Iterator.GoToFirstItem()
def __iter__(self):
return self
def __next__(self):
if not self.Iterator:
raise StopIteration
if self.Iterator.IsDoneWithTraversal():
raise StopIteration
retVal = self.Iterator.GetCurrentDataObject()
self.Iterator.GoToNextItem()
return WrapDataObject(retVal)
def next(self):
return self.__next__()
def __getattr__(self, name):
"""Returns attributes from the vtkCompositeDataIterator."""
return getattr(self.Iterator, name)
class MultiCompositeDataIterator(CompositeDataIterator):
"""Iterator that can be used to iterate over multiple
composite datasets together. This iterator works only
with arrays that were copied from an original using
CopyStructured. The most common use case is to use
CopyStructure, then iterate over input and output together
while creating output datasets from corresponding input
datasets."""
def __init__(self, cds):
CompositeDataIterator.__init__(self, cds[0])
self.Datasets = cds
def __next__(self):
if not self.Iterator:
raise StopIteration
if self.Iterator.IsDoneWithTraversal():
raise StopIteration
retVal = []
retVal.append(WrapDataObject(self.Iterator.GetCurrentDataObject()))
if len(self.Datasets) > 1:
for cd in self.Datasets[1:]:
retVal.append(WrapDataObject(cd.GetDataSet(self.Iterator)))
self.Iterator.GoToNextItem()
return retVal
def next(self):
return self.__next__()
class DataObject(VTKObjectWrapper):
"""A wrapper for vtkDataObject that makes it easier to access FielData
arrays as VTKArrays
"""
def GetAttributes(self, type):
"""Returns the attributes specified by the type as a DataSetAttributes
instance."""
if type == ArrayAssociation.FIELD:
return DataSetAttributes(self.VTKObject.GetFieldData(), self, type)
return DataSetAttributes(self.VTKObject.GetAttributes(type), self, type)
def GetFieldData(self):
"Returns the field data as a DataSetAttributes instance."
return DataSetAttributes(self.VTKObject.GetFieldData(), self, ArrayAssociation.FIELD)
FieldData = property(GetFieldData, None, None, "This property returns the field data of a data object.")
class Table(DataObject):
"""A wrapper for vtkFielData that makes it easier to access RowData array as
VTKArrays
"""
def GetRowData(self):
"Returns the row data as a DataSetAttributes instance."
return self.GetAttributes(ArrayAssociation.ROW)
RowData = property(GetRowData, None, None, "This property returns the row data of the table.")
class CompositeDataSet(DataObject):
"""A wrapper for vtkCompositeData and subclasses that makes it easier
to access Point/Cell/Field data as VTKCompositeDataArrays. It also
provides a Python type iterator."""
def __init__(self, vtkobject):
DataObject.__init__(self, vtkobject)
self._PointData = None
self._CellData = None
self._FieldData = None
self._Points = None
def __iter__(self):
"Creates an iterator for the contained datasets."
return CompositeDataIterator(self)
def GetNumberOfElements(self, assoc):
"""Returns the total number of cells or points depending
on the value of assoc which can be ArrayAssociation.POINT or
ArrayAssociation.CELL."""
result = 0
for dataset in self:
result += dataset.GetNumberOfElements(assoc)
return int(result)
def GetNumberOfPoints(self):
"""Returns the total number of points of all datasets
in the composite dataset. Note that this traverses the
whole composite dataset every time and should not be
called repeatedly for large composite datasets."""
return self.GetNumberOfElements(ArrayAssociation.POINT)
def GetNumberOfCells(self):
"""Returns the total number of cells of all datasets
in the composite dataset. Note that this traverses the
whole composite dataset every time and should not be
called repeatedly for large composite datasets."""
return self.GetNumberOfElements(ArrayAssociation.CELL)
def GetAttributes(self, type):
"""Returns the attributes specified by the type as a
CompositeDataSetAttributes instance."""
return CompositeDataSetAttributes(self, type)
def GetPointData(self):
"Returns the point data as a DataSetAttributes instance."
if self._PointData is None or self._PointData() is None:
pdata = self.GetAttributes(ArrayAssociation.POINT)
self._PointData = weakref.ref(pdata)
return self._PointData()
def GetCellData(self):
"Returns the cell data as a DataSetAttributes instance."
if self._CellData is None or self._CellData() is None:
cdata = self.GetAttributes(ArrayAssociation.CELL)
self._CellData = weakref.ref(cdata)
return self._CellData()
def GetFieldData(self):
"Returns the field data as a DataSetAttributes instance."
if self._FieldData is None or self._FieldData() is None:
fdata = self.GetAttributes(ArrayAssociation.FIELD)
self._FieldData = weakref.ref(fdata)
return self._FieldData()
def GetPoints(self):
"Returns the points as a VTKCompositeDataArray instance."
if self._Points is None or self._Points() is None:
pts = []
for ds in self:
try:
_pts = ds.Points
except AttributeError:
_pts = None
if _pts is None:
pts.append(NoneArray)
else:
pts.append(_pts)
if len(pts) == 0 or all([a is NoneArray for a in pts]):
cpts = NoneArray
else:
cpts = VTKCompositeDataArray(pts, dataset=self)
self._Points = weakref.ref(cpts)
return self._Points()
PointData = property(GetPointData, None, None, "This property returns the point data of the dataset.")
CellData = property(GetCellData, None, None, "This property returns the cell data of a dataset.")
FieldData = property(GetFieldData, None, None, "This property returns the field data of a dataset.")
Points = property(GetPoints, None, None, "This property returns the points of the dataset.")
class DataSet(DataObject):
"""This is a python friendly wrapper of a vtkDataSet that defines
a few useful properties."""
def GetPointData(self):
"Returns the point data as a DataSetAttributes instance."
return self.GetAttributes(ArrayAssociation.POINT)
def GetCellData(self):
"Returns the cell data as a DataSetAttributes instance."
return self.GetAttributes(ArrayAssociation.CELL)
PointData = property(GetPointData, None, None, "This property returns the point data of the dataset.")
CellData = property(GetCellData, None, None, "This property returns the cell data of a dataset.")
class PointSet(DataSet):
"""This is a python friendly wrapper of a vtkPointSet that defines
a few useful properties."""
def GetPoints(self):
"""Returns the points as a VTKArray instance. Returns None if the
dataset has implicit points."""
if not self.VTKObject.GetPoints():
return None
array = vtkDataArrayToVTKArray(
self.VTKObject.GetPoints().GetData(), self)
array.Association = ArrayAssociation.POINT
return array
def SetPoints(self, pts):
"""Given a VTKArray instance, sets the points of the dataset."""
from vtk.vtkCommonCore import vtkPoints
if isinstance(pts, vtkPoints):
p = pts
else:
pts = numpyTovtkDataArray(pts)
p = vtkPoints()
p.SetData(pts)
self.VTKObject.SetPoints(p)
Points = property(GetPoints, SetPoints, None, "This property returns the point coordinates of dataset.")
class PolyData(PointSet):
"""This is a python friendly wrapper of a vtkPolyData that defines
a few useful properties."""
def GetPolygons(self):
"""Returns the polys as a VTKArray instance."""
if not self.VTKObject.GetPolys():
return None
return vtkDataArrayToVTKArray(
self.VTKObject.GetPolys().GetData(), self)
Polygons = property(GetPolygons, None, None, "This property returns the connectivity of polygons.")
class UnstructuredGrid(PointSet):
"""This is a python friendly wrapper of a vtkUnstructuredGrid that defines
a few useful properties."""
def GetCellTypes(self):
"""Returns the cell types as a VTKArray instance."""
if not self.VTKObject.GetCellTypesArray():
return None
return vtkDataArrayToVTKArray(
self.VTKObject.GetCellTypesArray(), self)
def GetCellLocations(self):
"""Returns the cell locations as a VTKArray instance."""
if not self.VTKObject.GetCellLocationsArray():
return None
return vtkDataArrayToVTKArray(
self.VTKObject.GetCellLocationsArray(), self)
def GetCells(self):
"""Returns the cells as a VTKArray instance."""
if not self.VTKObject.GetCells():
return None
return vtkDataArrayToVTKArray(
self.VTKObject.GetCells().GetData(), self)
def SetCells(self, cellTypes, cellLocations, cells):
"""Given cellTypes, cellLocations, cells as VTKArrays,
populates the unstructured grid data structures."""
from vtk import VTK_ID_TYPE
from vtk.vtkCommonDataModel import vtkCellArray
cellTypes = numpyTovtkDataArray(cellTypes)
cellLocations = numpyTovtkDataArray(cellLocations, array_type=VTK_ID_TYPE)
cells = numpyTovtkDataArray(cells, array_type=VTK_ID_TYPE)
ca = vtkCellArray()
ca.SetCells(cellTypes.GetNumberOfTuples(), cells)
self.VTKObject.SetCells(cellTypes, cellLocations, ca)
CellTypes = property(GetCellTypes, None, None, "This property returns the types of cells.")
CellLocations = property(GetCellLocations, None, None, "This property returns the locations of cells.")
Cells = property(GetCells, None, None, "This property returns the connectivity of cells.")
def WrapDataObject(ds):
"""Returns a Numpy friendly wrapper of a vtkDataObject."""
if ds.IsA("vtkPolyData"):
return PolyData(ds)
elif ds.IsA("vtkUnstructuredGrid"):
return UnstructuredGrid(ds)
elif ds.IsA("vtkPointSet"):
return PointSet(ds)
elif ds.IsA("vtkDataSet"):
return DataSet(ds)
elif ds.IsA("vtkCompositeDataSet"):
return CompositeDataSet(ds)
elif ds.IsA("vtkTable"):
return Table(ds)
|
py | b417e4207507114275cd3d752ce75b8d66d4fb97 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
The Univariate Log-Normal Distribution.
"""
from scipy.stats.distributions import lognorm
from qiskit.aqua.components.random_distributions.univariate_distribution import UnivariateDistribution
import numpy as np
class LogNormalDistribution(UnivariateDistribution):
"""
The Univariate Log-Normal Distribution.
"""
CONFIGURATION = {
'name': 'LogNormalDistribution',
'description': 'Log-Normal Distribution',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'LogNormalDistribution_schema',
'type': 'object',
'properties': {
'num_target_qubits': {
'type': 'integer',
'default': 2,
},
'mu': {
'type': 'number',
'default': 0,
},
'sigma': {
'type': 'number',
'default': 1,
},
'low': {
'type': 'number',
'default': 0,
},
'high': {
'type': 'number',
'default': 3,
},
},
'additionalProperties': False
}
}
def __init__(self, num_target_qubits, mu=0, sigma=1, low=0, high=1):
self.validate(locals())
probabilities, _ = UnivariateDistribution.\
pdf_to_probabilities(lambda x: lognorm.pdf(x, s=sigma, scale=np.exp(mu)), low, high, 2 ** num_target_qubits)
super().__init__(num_target_qubits, probabilities, low, high)
|
py | b417e615d638b9e27abb00cd32063a27109cd17b | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Network."""
import math
from functools import reduce
import numpy as np
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.communication.management import get_group_size
# ResNet
def _weight_variable(shape, factor=0.01):
init_value = np.random.randn(*shape).astype(np.float32) * factor
return Tensor(init_value)
def _conv3x3(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 3, 3)
weight = _weight_variable(weight_shape)
return nn.Conv2d(in_channel, out_channel,
kernel_size=3, stride=stride, padding=1, pad_mode='pad', weight_init=weight)
def _conv1x1(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 1, 1)
weight = _weight_variable(weight_shape)
return nn.Conv2d(in_channel, out_channel,
kernel_size=1, stride=stride, padding=0, pad_mode='pad', weight_init=weight)
def _conv7x7(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 7, 7)
weight = _weight_variable(weight_shape)
return nn.Conv2d(in_channel, out_channel,
kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight)
def _bn(channel):
return nn.BatchNorm2d(channel)
def _bn_last(channel):
return nn.BatchNorm2d(channel)
def _fc(in_channel, out_channel):
weight_shape = (out_channel, in_channel)
weight = _weight_variable(weight_shape)
return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
class ResidualBlock(nn.Cell):
expansion = 4
def __init__(self,
in_channel,
out_channel,
stride=1):
super(ResidualBlock, self).__init__()
channel = out_channel // self.expansion
self.conv1 = _conv1x1(in_channel, channel, stride=1)
self.bn1 = _bn(channel)
self.conv2 = _conv3x3(channel, channel, stride=stride)
self.bn2 = _bn(channel)
self.conv3 = _conv1x1(channel, out_channel, stride=1)
self.bn3 = _bn_last(out_channel)
self.relu = nn.ReLU()
self.down_sample = False
if stride != 1 or in_channel != out_channel:
self.down_sample = True
self.down_sample_layer = None
if self.down_sample:
self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride),
_bn(out_channel)])
self.add = ops.Add()
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.down_sample:
identity = self.down_sample_layer(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class ResNet(nn.Cell):
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
strides,
num_classes):
super(ResNet, self).__init__()
if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
self.conv1 = _conv7x7(3, 64, stride=2)
self.bn1 = _bn(64)
self.relu = ops.ReLU()
self.pad = ops.Pad(((0, 0), (0, 0), (1, 0), (1, 0)))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=in_channels[0],
out_channel=out_channels[0],
stride=strides[0])
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=in_channels[1],
out_channel=out_channels[1],
stride=strides[1])
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=in_channels[2],
out_channel=out_channels[2],
stride=strides[2])
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=in_channels[3],
out_channel=out_channels[3],
stride=strides[3])
self.mean = ops.ReduceMean(keep_dims=True)
self.flatten = nn.Flatten()
self.end_point = _fc(out_channels[3], num_classes)
def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
layers = []
resnet_block = block(in_channel, out_channel, stride=stride)
layers.append(resnet_block)
for _ in range(1, layer_num):
resnet_block = block(out_channel, out_channel, stride=1)
layers.append(resnet_block)
return nn.SequentialCell(layers)
def construct(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pad(x)
c1 = self.maxpool(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
out = self.mean(c5, (2, 3))
out = self.flatten(out)
out = self.end_point(out)
return c3, c4, c5
def resnet50(class_num=10):
return ResNet(ResidualBlock,
[3, 4, 6, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
class_num)
# RetinaFace
def Init_KaimingUniform(arr_shape, a=0, nonlinearity='leaky_relu', has_bias=False):
def _calculate_in_and_out(arr_shape):
dim = len(arr_shape)
if dim < 2:
raise ValueError("If initialize data with xavier uniform, the dimension of data must greater than 1.")
n_in = arr_shape[1]
n_out = arr_shape[0]
if dim > 2:
counter = reduce(lambda x, y: x * y, arr_shape[2:])
n_in *= counter
n_out *= counter
return n_in, n_out
def calculate_gain(nonlinearity, a=None):
linear_fans = ['linear', 'conv1d', 'conv2d', 'conv3d',
'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fans or nonlinearity == 'sigmoid':
return 1
if nonlinearity == 'tanh':
return 5.0 / 3
if nonlinearity == 'relu':
return math.sqrt(2.0)
if nonlinearity == 'leaky_relu':
if a is None:
negative_slope = 0.01
elif not isinstance(a, bool) and isinstance(a, int) or isinstance(a, float):
negative_slope = a
else:
raise ValueError("negative_slope {} not a valid number".format(a))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
fan_in, _ = _calculate_in_and_out(arr_shape)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan_in)
bound = math.sqrt(3.0) * std
weight = np.random.uniform(-bound, bound, arr_shape).astype(np.float32)
bias = None
if has_bias:
bound_bias = 1 / math.sqrt(fan_in)
bias = np.random.uniform(-bound_bias, bound_bias, arr_shape[0:1]).astype(np.float32)
bias = Tensor(bias)
return Tensor(weight), bias
class ConvBNReLU(nn.SequentialCell):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, norm_layer, leaky=0):
weight_shape = (out_planes, in_planes, kernel_size, kernel_size)
kaiming_weight, _ = Init_KaimingUniform(weight_shape, a=math.sqrt(5))
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_mode='pad', padding=padding, group=groups,
has_bias=False, weight_init=kaiming_weight),
norm_layer(out_planes),
nn.LeakyReLU(alpha=leaky)
)
class ConvBN(nn.SequentialCell):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, norm_layer):
weight_shape = (out_planes, in_planes, kernel_size, kernel_size)
kaiming_weight, _ = Init_KaimingUniform(weight_shape, a=math.sqrt(5))
super(ConvBN, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_mode='pad', padding=padding, group=groups,
has_bias=False, weight_init=kaiming_weight),
norm_layer(out_planes),
)
class SSH(nn.Cell):
def __init__(self, in_channel, out_channel):
super(SSH, self).__init__()
assert out_channel % 4 == 0
leaky = 0
if out_channel <= 64:
leaky = 0.1
norm_layer = nn.BatchNorm2d
self.conv3X3 = ConvBN(in_channel, out_channel // 2, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer)
self.conv5X5_1 = ConvBNReLU(in_channel, out_channel // 4, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer, leaky=leaky)
self.conv5X5_2 = ConvBN(out_channel // 4, out_channel // 4, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer)
self.conv7X7_2 = ConvBNReLU(out_channel // 4, out_channel // 4, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer, leaky=leaky)
self.conv7X7_3 = ConvBN(out_channel // 4, out_channel // 4, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer)
self.cat = ops.Concat(axis=1)
self.relu = nn.ReLU()
def construct(self, x):
conv3X3 = self.conv3X3(x)
conv5X5_1 = self.conv5X5_1(x)
conv5X5 = self.conv5X5_2(conv5X5_1)
conv7X7_2 = self.conv7X7_2(conv5X5_1)
conv7X7 = self.conv7X7_3(conv7X7_2)
out = self.cat((conv3X3, conv5X5, conv7X7))
out = self.relu(out)
return out
class FPN(nn.Cell):
def __init__(self):
super(FPN, self).__init__()
out_channels = 256
leaky = 0
if out_channels <= 64:
leaky = 0.1
norm_layer = nn.BatchNorm2d
self.output1 = ConvBNReLU(512, 256, kernel_size=1, stride=1, padding=0, groups=1,
norm_layer=norm_layer, leaky=leaky)
self.output2 = ConvBNReLU(1024, 256, kernel_size=1, stride=1, padding=0, groups=1,
norm_layer=norm_layer, leaky=leaky)
self.output3 = ConvBNReLU(2048, 256, kernel_size=1, stride=1, padding=0, groups=1,
norm_layer=norm_layer, leaky=leaky)
self.merge1 = ConvBNReLU(256, 256, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer, leaky=leaky)
self.merge2 = ConvBNReLU(256, 256, kernel_size=3, stride=1, padding=1, groups=1,
norm_layer=norm_layer, leaky=leaky)
def construct(self, input1, input2, input3):
output1 = self.output1(input1)
output2 = self.output2(input2)
output3 = self.output3(input3)
up3 = ops.ResizeNearestNeighbor([ops.Shape()(output2)[2], ops.Shape()(output2)[3]])(output3)
output2 = up3 + output2
output2 = self.merge2(output2)
up2 = ops.ResizeNearestNeighbor([ops.Shape()(output1)[2], ops.Shape()(output1)[3]])(output2)
output1 = up2 + output1
output1 = self.merge1(output1)
return output1, output2, output3
class ClassHead(nn.Cell):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHead, self).__init__()
self.num_anchors = num_anchors
weight_shape = (self.num_anchors * 2, inchannels, 1, 1)
kaiming_weight, kaiming_bias = Init_KaimingUniform(weight_shape, a=math.sqrt(5), has_bias=True)
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0,
has_bias=True, weight_init=kaiming_weight, bias_init=kaiming_bias)
self.permute = ops.Transpose()
self.reshape = ops.Reshape()
def construct(self, x):
out = self.conv1x1(x)
out = self.permute(out, (0, 2, 3, 1))
return self.reshape(out, (ops.Shape()(out)[0], -1, 2))
class BboxHead(nn.Cell):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
weight_shape = (num_anchors * 4, inchannels, 1, 1)
kaiming_weight, kaiming_bias = Init_KaimingUniform(weight_shape, a=math.sqrt(5), has_bias=True)
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0, has_bias=True,
weight_init=kaiming_weight, bias_init=kaiming_bias)
self.permute = ops.Transpose()
self.reshape = ops.Reshape()
def construct(self, x):
out = self.conv1x1(x)
out = self.permute(out, (0, 2, 3, 1))
return self.reshape(out, (ops.Shape()(out)[0], -1, 4))
class LandmarkHead(nn.Cell):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHead, self).__init__()
weight_shape = (num_anchors * 10, inchannels, 1, 1)
kaiming_weight, kaiming_bias = Init_KaimingUniform(weight_shape, a=math.sqrt(5), has_bias=True)
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0, has_bias=True,
weight_init=kaiming_weight, bias_init=kaiming_bias)
self.permute = ops.Transpose()
self.reshape = ops.Reshape()
def construct(self, x):
out = self.conv1x1(x)
out = self.permute(out, (0, 2, 3, 1))
return self.reshape(out, (ops.Shape()(out)[0], -1, 10))
class RetinaFace(nn.Cell):
def __init__(self, phase='train', backbone=None):
super(RetinaFace, self).__init__()
self.phase = phase
self.base = backbone
self.fpn = FPN()
self.ssh1 = SSH(256, 256)
self.ssh2 = SSH(256, 256)
self.ssh3 = SSH(256, 256)
self.ClassHead = self._make_class_head(fpn_num=3, inchannels=[256, 256, 256], anchor_num=[2, 2, 2])
self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=[256, 256, 256], anchor_num=[2, 2, 2])
self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=[256, 256, 256], anchor_num=[2, 2, 2])
self.cat = ops.Concat(axis=1)
def _make_class_head(self, fpn_num, inchannels, anchor_num):
classhead = nn.CellList()
for i in range(fpn_num):
classhead.append(ClassHead(inchannels[i], anchor_num[i]))
return classhead
def _make_bbox_head(self, fpn_num, inchannels, anchor_num):
bboxhead = nn.CellList()
for i in range(fpn_num):
bboxhead.append(BboxHead(inchannels[i], anchor_num[i]))
return bboxhead
def _make_landmark_head(self, fpn_num, inchannels, anchor_num):
landmarkhead = nn.CellList()
for i in range(fpn_num):
landmarkhead.append(LandmarkHead(inchannels[i], anchor_num[i]))
return landmarkhead
def construct(self, inputs):
f1, f2, f3 = self.base(inputs)
f1, f2, f3 = self.fpn(f1, f2, f3)
# SSH
f1 = self.ssh1(f1)
f2 = self.ssh2(f2)
f3 = self.ssh3(f3)
features = [f1, f2, f3]
bbox = ()
for i, feature in enumerate(features):
bbox = bbox + (self.BboxHead[i](feature),)
bbox_regressions = self.cat(bbox)
cls = ()
for i, feature in enumerate(features):
cls = cls + (self.ClassHead[i](feature),)
classifications = self.cat(cls)
landm = ()
for i, feature in enumerate(features):
landm = landm + (self.LandmarkHead[i](feature),)
ldm_regressions = self.cat(landm)
if self.phase == 'train':
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, ops.Softmax(-1)(classifications), ldm_regressions)
return output
class RetinaFaceWithLossCell(nn.Cell):
def __init__(self, network, multibox_loss, config):
super(RetinaFaceWithLossCell, self).__init__()
self.network = network
self.loc_weight = config['loc_weight']
self.class_weight = config['class_weight']
self.landm_weight = config['landm_weight']
self.multibox_loss = multibox_loss
def construct(self, img, loc_t, conf_t, landm_t):
pred_loc, pre_conf, pre_landm = self.network(img)
loss_loc, loss_conf, loss_landm = self.multibox_loss(pred_loc, loc_t, pre_conf, conf_t, pre_landm, landm_t)
return loss_loc * self.loc_weight + loss_conf * self.class_weight + loss_landm * self.landm_weight
class TrainingWrapper(nn.Cell):
def __init__(self, network, optimizer, sens=1.0):
super(TrainingWrapper, self).__init__(auto_prefix=False)
self.network = network
self.weights = ms.ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.reducer_flag = False
self.grad_reducer = None
self.parallel_mode = ms.get_auto_parallel_context("parallel_mode")
class_list = [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]
if self.parallel_mode in class_list:
self.reducer_flag = True
if self.reducer_flag:
mean = ms.get_auto_parallel_context("gradients_mean")
if auto_parallel_context().get_device_num_is_set():
degree = ms.get_auto_parallel_context("device_num")
else:
degree = get_group_size()
self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
def construct(self, *args):
weights = self.weights
loss = self.network(*args)
sens = ops.Fill()(ops.DType()(loss), ops.Shape()(loss), self.sens)
grads = self.grad(self.network, weights)(*args, sens)
if self.reducer_flag:
# apply grad reducer on grads
grads = self.grad_reducer(grads)
self.optimizer(grads)
return loss
|
py | b417e65c5da02a09cad0da1642418f538d79d7c1 | """
Utility solvers and calculators for 3D hydrostatic ocean model
"""
from __future__ import absolute_import
from .utility import *
import numpy
__all__ = [
"VerticalVelocitySolver",
"VerticalIntegrator",
"DensitySolver",
"DensitySolverWeak",
"VelocityMagnitudeSolver",
"Mesh3DConsistencyCalculator",
"ExpandFunctionTo3d",
"SubFunctionExtractor",
"ALEMeshUpdater",
"SmagorinskyViscosity",
"EquationOfState",
"JackettEquationOfState",
"LinearEquationOfState",
"get_horizontal_elem_size_3d",
]
class VerticalVelocitySolver(object):
r"""
Computes vertical velocity diagnostically from the continuity equation
Vertical velocity is obtained from the continuity equation
.. math::
\frac{\partial w}{\partial z} = -\nabla_h \cdot \textbf{u}
:label: continuity_eq_3d
and the bottom impermeability condition (:math:`h` denotes the bathymetry)
.. math::
\textbf{n}_h \cdot \textbf{u} + w n_z &= 0 \quad \forall \mathbf{x} \in \Gamma_{b} \\
\Leftrightarrow w &= -\nabla_h h \cdot \mathbf{u} \quad \forall \mathbf{x} \in \Gamma_{b}
:math:`w` can be solved with the weak form
.. math::
\int_{\Gamma_s} w n_z \varphi dS
+ \int_{\mathcal{I}_h} \text{avg}(w) \text{jump}(\varphi n_z) dS
- \int_{\Omega} w \frac{\partial \varphi}{\partial z} dx
= \\
\int_{\Omega} \mathbf{u} \cdot \nabla_h \varphi dx
- \int_{\mathcal{I}_h \cup \mathcal{I}_v} \text{avg}(\mathbf{u}) \cdot \text{jump}(\varphi \mathbf{n}_h) dS
- \int_{\Gamma_s} \mathbf{u} \cdot \varphi \mathbf{n}_h dS
where the :math:`\Gamma_b` terms vanish due to the bottom impermeability
condition.
"""
@PETSc.Log.EventDecorator("thetis.VerticalVelocitySolver.__init__")
def __init__(self, solution, uv, bathymetry, boundary_funcs={},
solver_parameters=None):
"""
:arg solution: w :class:`Function`
:arg uv: horizontal velocity :class:`Function`
:arg bathymetry: bathymetry :class:`Function`
:kwarg dict boundary_funcs: boundary conditions used in the 3D momentum
equation. Provides external values of uv (if any).
:kwarg dict solver_parameters: PETSc solver options
"""
if solver_parameters is None:
solver_parameters = {}
solver_parameters.setdefault('snes_type', 'ksponly')
solver_parameters.setdefault('ksp_type', 'preonly')
solver_parameters.setdefault('pc_type', 'bjacobi')
solver_parameters.setdefault('sub_ksp_type', 'preonly')
solver_parameters.setdefault('sub_pc_type', 'ilu')
solver_parameters.setdefault('sub_pc_factor_shift_type', 'inblocks')
fs = solution.function_space()
mesh = fs.mesh()
test = TestFunction(fs)
tri = TrialFunction(fs)
normal = FacetNormal(mesh)
# define measures with a reasonable quadrature degree
p, q = fs.ufl_element().degree()
self.quad_degree = (2*p, 2*q)
self.dx = dx(degree=self.quad_degree)
self.dS_h = dS_h(degree=self.quad_degree)
self.dS_v = dS_v(degree=self.quad_degree)
self.ds_surf = ds_surf(degree=self.quad_degree)
# NOTE weak dw/dz
a = tri[2]*test[2]*normal[2]*ds_surf + \
avg(tri[2])*jump(test[2], normal[2])*dS_h - Dx(test[2], 2)*tri[2]*self.dx
# NOTE weak div(uv)
uv_star = avg(uv)
# NOTE in the case of mimetic uv the div must be taken over all components
l_v_facet = (uv_star[0]*jump(test[2], normal[0])
+ uv_star[1]*jump(test[2], normal[1])
+ uv_star[2]*jump(test[2], normal[2]))*self.dS_v
l_h_facet = (uv_star[0]*jump(test[2], normal[0])
+ uv_star[1]*jump(test[2], normal[1])
+ uv_star[2]*jump(test[2], normal[2]))*self.dS_h
l_surf = (uv[0]*normal[0]
+ uv[1]*normal[1] + uv[2]*normal[2])*test[2]*self.ds_surf
l_vol = inner(uv, nabla_grad(test[2]))*self.dx
l = l_vol - l_v_facet - l_h_facet - l_surf
for bnd_marker in sorted(mesh.exterior_facets.unique_markers):
funcs = boundary_funcs.get(bnd_marker)
ds_bnd = ds_v(int(bnd_marker), degree=self.quad_degree)
if funcs is None:
# assume land boundary
continue
else:
# use symmetry condition
l += -(uv[0]*normal[0] + uv[1]*normal[1])*test[2]*ds_bnd
# NOTE For ALE mesh constant_jacobian should be False
# however the difference is very small as A is nearly independent of
# mesh stretching: only the normals vary in time
self.prob = LinearVariationalProblem(a, l, solution,
constant_jacobian=True)
self.solver = LinearVariationalSolver(self.prob,
solver_parameters=solver_parameters)
@PETSc.Log.EventDecorator("thetis.VerticalVelocitySolver.solve")
def solve(self):
"""Compute w"""
self.solver.solve()
class VerticalIntegrator(object):
"""
Computes vertical integral (or average) of a field.
"""
@PETSc.Log.EventDecorator("thetis.VerticalIntegrator.__init__")
def __init__(self, input, output, bottom_to_top=True,
bnd_value=Constant(0.0), average=False,
bathymetry=None, elevation=None, solver_parameters=None):
"""
:arg input: 3D field to integrate
:arg output: 3D field where the integral is stored
:kwarg bottom_to_top: Defines the integration direction: If True integration is performed along the z axis, from bottom surface to top surface.
:kwarg bnd_value: Value of the integral at the bottom (top) boundary if bottom_to_top is True (False)
:kwarg average: If True computes the vertical average instead. Requires bathymetry and elevation fields
:kwarg bathymetry: 3D field defining the bathymetry
:kwarg elevation: 3D field defining the free surface elevation
:kwarg dict solver_parameters: PETSc solver options
"""
self.output = output
space = output.function_space()
mesh = space.mesh()
e_continuity = element_continuity(space.ufl_element())
vertical_is_dg = e_continuity.vertical in ['dg', 'hdiv']
if solver_parameters is None:
solver_parameters = {}
solver_parameters.setdefault('snes_type', 'ksponly')
if e_continuity.vertical != 'hdiv':
solver_parameters.setdefault('ksp_type', 'preonly')
solver_parameters.setdefault('pc_type', 'bjacobi')
solver_parameters.setdefault('sub_ksp_type', 'preonly')
solver_parameters.setdefault('sub_pc_type', 'ilu')
tri = TrialFunction(space)
phi = TestFunction(space)
normal = FacetNormal(mesh)
# define measures with a reasonable quadrature degree
p, q = space.ufl_element().degree()
p_in, q_in = input.function_space().ufl_element().degree()
self.quad_degree = (p+p_in+1, q+q_in+1)
self.dx = dx(degree=self.quad_degree)
self.dS_h = dS_h(degree=self.quad_degree)
self.ds_surf = ds_surf(degree=self.quad_degree)
self.ds_bottom = ds_bottom(degree=self.quad_degree)
if bottom_to_top:
bnd_term = normal[2]*inner(bnd_value, phi)*self.ds_bottom
mass_bnd_term = normal[2]*inner(tri, phi)*self.ds_surf
else:
bnd_term = normal[2]*inner(bnd_value, phi)*self.ds_surf
mass_bnd_term = normal[2]*inner(tri, phi)*self.ds_bottom
self.a = -inner(Dx(phi, 2), tri)*self.dx + mass_bnd_term
if bottom_to_top:
up_value = tri('+')
else:
up_value = tri('-')
if vertical_is_dg:
if len(input.ufl_shape) > 0:
dim = input.ufl_shape[0]
for i in range(dim):
self.a += up_value[i]*jump(phi[i], normal[2])*self.dS_h
else:
self.a += up_value*jump(phi, normal[2])*self.dS_h
if average:
source = input/(elevation + bathymetry)
else:
source = input
self.l = inner(source, phi)*self.dx + bnd_term
self.prob = LinearVariationalProblem(self.a, self.l, output, constant_jacobian=average)
self.solver = LinearVariationalSolver(self.prob, solver_parameters=solver_parameters)
@PETSc.Log.EventDecorator("thetis.VerticalIntegrator.solve")
def solve(self):
"""
Computes the integral and stores it in the output field.
"""
self.solver.solve()
class DensitySolver(object):
r"""
Computes density from salinity and temperature using the equation of state.
Water density is defined as
.. math::
\rho = \rho'(T, S, p) + \rho_0
This method computes the density anomaly :math:`\rho'`.
Density is computed point-wise assuming that temperature, salinity and
density are in the same function space.
"""
@PETSc.Log.EventDecorator("thetis.DensitySolver.__init__")
def __init__(self, salinity, temperature, density, eos_class):
"""
:arg salinity: water salinity field
:type salinity: :class:`Function`
:arg temperature: water temperature field
:type temperature: :class:`Function`
:arg density: water density field
:type density: :class:`Function`
:arg eos_class: equation of state that defines water density
:type eos_class: :class:`EquationOfState`
"""
self.fs = density.function_space()
self.eos = eos_class
if isinstance(salinity, Function):
assert self.fs == salinity.function_space()
if isinstance(temperature, Function):
assert self.fs == temperature.function_space()
self.s = salinity
self.t = temperature
self.rho = density
def _get_array(self, function):
"""Returns numpy data array from a :class:`Function`"""
if isinstance(function, Function):
assert self.fs == function.function_space()
return function.dat.data[:]
if isinstance(function, Constant):
return float(function)
# assume that function is a float
return function
@PETSc.Log.EventDecorator("thetis.DensitySolver.solve")
def solve(self):
"""Compute density"""
s = self._get_array(self.s)
th = self._get_array(self.t)
p = 0.0 # NOTE ignore pressure for now
rho0 = self._get_array(physical_constants['rho0'])
self.rho.dat.data[:] = self.eos.compute_rho(s, th, p, rho0)
class DensitySolverWeak(object):
r"""
Computes density from salinity and temperature using the equation of state.
Water density is defined as
.. math::
\rho = \rho'(T, S, p) + \rho_0
This method computes the density anomaly :math:`\rho'`.
Density is computed in a weak sense by projecting the analytical expression
on the density field.
"""
@PETSc.Log.EventDecorator("thetis.DensitySolverWeak.__init__")
def __init__(self, salinity, temperature, density, eos_class):
"""
:arg salinity: water salinity field
:type salinity: :class:`Function`
:arg temperature: water temperature field
:type temperature: :class:`Function`
:arg density: water density field
:type density: :class:`Function`
:arg eos_class: equation of state that defines water density
:type eos_class: :class:`EquationOfState`
"""
self.fs = density.function_space()
self.eos = eos_class
assert isinstance(salinity, (Function, Constant))
assert isinstance(temperature, (Function, Constant))
self.s = salinity
self.t = temperature
self.density = density
self.p = Constant(0.)
rho0 = physical_constants['rho0']
f = self.eos.eval(self.s, self.t, self.p, rho0)
self.projector = Projector(f, self.density)
def ensure_positive_salinity(self):
"""
make sure salinity is not negative
some EOS depend on sqrt(salt).
"""
# FIXME this is really hacky and modifies the state variable
# NOTE if salt field is P2 checking nodal values is not enough ..
ix = self.s.dat.data < 0
self.s.dat.data[ix] = 0.0
@PETSc.Log.EventDecorator("thetis.DensitySolverWeak.solve")
def solve(self):
"""Compute density"""
self.ensure_positive_salinity()
self.projector.project()
class VelocityMagnitudeSolver(object):
"""
Computes magnitude of (u[0],u[1],w) and stores it in solution
"""
@PETSc.Log.EventDecorator("thetis.VelocityMagnitudeSolver.__init__")
def __init__(self, solution, u=None, w=None, min_val=1e-6,
solver_parameters=None):
"""
:arg solution: scalar field for velocity magnitude scalar :class:`Function`
:type solution: :class:`Function`
:kwarg u: horizontal velocity
:type u: :class:`Function`
:kwarg w: vertical velocity
:type w: :class:`Function`
:kwarg float min_val: minimum value of magnitude. Minimum value of solution
will be clipped to this value
:kwarg dict solver_parameters: PETSc solver options
If ``u`` is None computes magnitude of (0,0,w).
If ``w`` is None computes magnitude of (u[0],u[1],0).
"""
self.solution = solution
self.min_val = min_val
function_space = solution.function_space()
test = TestFunction(function_space)
tri = TrialFunction(function_space)
a = test*tri*dx
s = 0
if u is not None:
s += u[0]**2 + u[1]**2
if w is not None:
s += w**2
l = test*sqrt(s)*dx
self.prob = LinearVariationalProblem(a, l, solution)
self.solver = LinearVariationalSolver(self.prob, solver_parameters=solver_parameters)
@PETSc.Log.EventDecorator("thetis.VelocityMagnitudeSolver.solve")
def solve(self):
"""Compute the magnitude"""
self.solver.solve()
numpy.maximum(self.solution.dat.data, self.min_val, self.solution.dat.data)
class Mesh3DConsistencyCalculator(object):
r"""
Computes a hydrostatic consistency criterion metric on the 3D mesh.
Let :math:`\Delta x` and :math:`\Delta z` denote the horizontal and vertical
element sizes. The hydrostatic consistency criterion (HCC) can then be
expressed as
.. math::
R = \frac{|\nabla h| \Delta x}{\Delta z} < 1
where :math:`\nabla h` is the bathymetry gradient (or gradient of the
internal horizontal facet).
Violating the hydrostatic consistency criterion leads to internal pressure
gradient errors.
In practice one can violate the :math:`R < 1` condition without
jeopardizing numerical stability; typically :math:`R < 5`.
Mesh consistency can be improved by coarsening the vertical
mesh, refining the horizontal mesh, or smoothing the bathymetry.
For a 3D prism, let :math:`\delta z_t,\delta z_b` denote the maximal
:math:`z` coordinate difference in the surface and bottom facets,
respectively, and :math:`\Delta z` the height of the prism.
We can then compute :math:`R` for the two facets as
.. math::
R_t &= \frac{\delta z_t}{\Delta z} \\
R_b &= \frac{\delta z_b}{\Delta z}
For a straight prism we have :math:`R = 0`, and :math:`R = 1` in
the case where the highest bottom node is at the same level as the lowest
surface node.
"""
@PETSc.Log.EventDecorator("thetis.Mesh3DConsistencyCalculator.__init__")
def __init__(self, solver_obj):
"""
:arg solver_obj: :class:`FlowSolver` object
"""
self.solver_obj = solver_obj
self.output = self.solver_obj.fields.hcc_metric_3d
self.z_coord = solver_obj.fields.z_coord_3d
# create par loop for computing delta
self.fs_3d = self.solver_obj.function_spaces.P1DG
assert self.output.function_space() == self.fs_3d
nodes = get_facet_mask(self.fs_3d, 'bottom')
self.idx = op2.Global(len(nodes), nodes, dtype=numpy.int32, name='node_idx')
self.kernel = op2.Kernel("""
void my_kernel(double *output, double *z_field, int *idx) {
// compute max delta z on top and bottom facets
double z_top_max = -1e20;
double z_top_min = 1e20;
double z_bot_max = -1e20;
double z_bot_min = 1e20;
int i_top = 1;
int i_bot = 0;
for ( int d = 0; d < %(nodes)d; d++ ) {
double z_top = z_field[idx[d] + i_top];
double z_bot = z_field[idx[d] + i_bot];
z_top_max = fmax(z_top, z_top_max);
z_top_min = fmin(z_top, z_top_min);
z_bot_max = fmax(z_bot, z_bot_max);
z_bot_min = fmin(z_bot, z_bot_min);
}
double delta_z_top = z_top_max - z_top_min;
double delta_z_bot = z_bot_max - z_bot_min;
// compute R ratio
for ( int d = 0; d < %(nodes)d; d++ ) {
double z_top = z_field[idx[d] + i_top];
double z_bot = z_field[idx[d] + i_bot];
double h = z_top - z_bot;
output[idx[d] + i_top] = delta_z_top/h;
output[idx[d] + i_bot] = delta_z_bot/h;
}
}""" % {'nodes': len(nodes)},
'my_kernel')
@PETSc.Log.EventDecorator("thetis.Mesh3DConsistencyCalculator.solve")
def solve(self):
"""Compute the HCC metric"""
op2.par_loop(self.kernel, self.solver_obj.mesh.cell_set,
self.output.dat(op2.WRITE, self.output.function_space().cell_node_map()),
self.z_coord.dat(op2.READ, self.z_coord.function_space().cell_node_map()),
self.idx(op2.READ),
iterate=op2.ALL)
# compute global min/max
r_min = self.output.dat.data.min()
r_max = self.output.dat.data.max()
r_min = self.solver_obj.comm.allreduce(r_min, op=MPI.MIN)
r_max = self.solver_obj.comm.allreduce(r_max, op=MPI.MAX)
print_output('HCC: {:} .. {:}'.format(r_min, r_max))
class ExpandFunctionTo3d(object):
"""
Copy a 2D field to 3D
Copies a field from 2D mesh to 3D mesh, assigning the same value over the
vertical dimension. Horizontal function spaces must be the same.
.. code-block:: python
U = FunctionSpace(mesh, 'DG', 1)
U_2d = FunctionSpace(mesh2d, 'DG', 1)
func2d = Function(U_2d)
func3d = Function(U)
ex = ExpandFunctionTo3d(func2d, func3d)
ex.solve()
"""
@PETSc.Log.EventDecorator("thetis.ExpandFunctionTo3d.__init__")
def __init__(self, input_2d, output_3d, elem_height=None):
"""
:arg input_2d: 2D source field
:type input_2d: :class:`Function`
:arg output_3d: 3D target field
:type output_3d: :class:`Function`
:kwarg elem_height: scalar :class:`Function` in 3D mesh that defines
the vertical element size. Needed only in the case of HDiv function
spaces.
"""
self.input_2d = input_2d
self.output_3d = output_3d
self.fs_2d = self.input_2d.function_space()
self.fs_3d = self.output_3d.function_space()
family_2d = self.fs_2d.ufl_element().family()
base_element_3d = get_extruded_base_element(self.fs_3d.ufl_element())
assert isinstance(base_element_3d, ufl.TensorProductElement)
family_3dh = base_element_3d.sub_elements()[0].family()
if family_2d != family_3dh:
raise Exception('2D and 3D spaces do not match: {0:s} {1:s}'.format(family_2d, family_3dh))
self.do_hdiv_scaling = family_2d in ['Raviart-Thomas', 'RTCF', 'Brezzi-Douglas-Marini', 'BDMCF']
if self.do_hdiv_scaling and elem_height is None:
raise Exception('elem_height must be provided for HDiv spaces')
self.iter_domain = op2.ALL
# number of nodes in vertical direction
n_vert_nodes = self.fs_3d.finat_element.space_dimension() / self.fs_2d.finat_element.space_dimension()
nodes = get_facet_mask(self.fs_3d, 'bottom')
self.idx = op2.Global(len(nodes), nodes, dtype=numpy.int32, name='node_idx')
self.kernel = op2.Kernel("""
void my_kernel(double *func, double *func2d, int *idx) {
for ( int d = 0; d < %(nodes)d; d++ ) {
for ( int c = 0; c < %(func2d_dim)d; c++ ) {
for ( int e = 0; e < %(v_nodes)d; e++ ) {
func[%(func3d_dim)d*(idx[d]+e) + c] = func2d[%(func2d_dim)d*d + c];
}
}
}
}""" % {'nodes': self.fs_2d.finat_element.space_dimension(),
'func2d_dim': self.input_2d.function_space().value_size,
'func3d_dim': self.fs_3d.value_size,
'v_nodes': n_vert_nodes},
'my_kernel')
if self.do_hdiv_scaling:
solver_parameters = {}
solver_parameters.setdefault('ksp_atol', 1e-12)
solver_parameters.setdefault('ksp_rtol', 1e-16)
test = TestFunction(self.fs_3d)
tri = TrialFunction(self.fs_3d)
a = inner(tri, test)*dx
l = inner(self.output_3d, test)*elem_height*dx
prob = LinearVariationalProblem(a, l, self.output_3d)
self.rt_scale_solver = LinearVariationalSolver(
prob, solver_parameters=solver_parameters)
@PETSc.Log.EventDecorator("thetis.ExpandFunctionTo3d.solve")
def solve(self):
with timed_stage('copy_2d_to_3d'):
# execute par loop
op2.par_loop(
self.kernel, self.fs_3d.mesh().cell_set,
self.output_3d.dat(op2.WRITE, self.fs_3d.cell_node_map()),
self.input_2d.dat(op2.READ, self.fs_2d.cell_node_map()),
self.idx(op2.READ),
iterate=self.iter_domain)
if self.do_hdiv_scaling:
self.rt_scale_solver.solve()
class SubFunctionExtractor(object):
"""
Extract a 2D sub-function from a 3D function in an extruded mesh
Given 2D and 3D functions,
.. code-block:: python
U = FunctionSpace(mesh, 'DG', 1)
U_2d = FunctionSpace(mesh2d, 'DG', 1)
func2d = Function(U_2d)
func3d = Function(U)
Get surface value:
.. code-block:: python
ex = SubFunctionExtractor(func3d, func2d,
boundary='top', elem_facet='top')
ex.solve()
Get bottom value:
.. code-block:: python
ex = SubFunctionExtractor(func3d, func2d,
boundary='bottom', elem_facet='bottom')
ex.solve()
Get value at the top of bottom element:
.. code-block:: python
ex = SubFunctionExtractor(func3d, func2d,
boundary='bottom', elem_facet='top')
ex.solve()
"""
@PETSc.Log.EventDecorator("thetis.SubFunctionExtractor.__init__")
def __init__(self, input_3d, output_2d,
boundary='top', elem_facet=None,
elem_height=None):
"""
:arg input_3d: 3D source field
:type input_3d: :class:`Function`
:arg output_2d: 2D target field
:type output_2d: :class:`Function`
:kwarg str boundary: 'top'|'bottom'
Defines whether to extract from the surface or bottom 3D elements
:kwarg str elem_facet: 'top'|'bottom'|'average'
Defines which facet of the 3D element is extracted. The 'average'
computes mean of the top and bottom facets of the 3D element.
:kwarg elem_height: scalar :class:`Function` in 2D mesh that defines
the vertical element size. Needed only in the case of HDiv function
spaces.
"""
self.input_3d = input_3d
self.output_2d = output_2d
self.fs_3d = self.input_3d.function_space()
self.fs_2d = self.output_2d.function_space()
if elem_facet is None:
# extract surface/bottom face by default
elem_facet = boundary
family_2d = self.fs_2d.ufl_element().family()
base_element_3d = get_extruded_base_element(self.fs_3d.ufl_element())
assert isinstance(base_element_3d, ufl.TensorProductElement)
family_3dh = base_element_3d.sub_elements()[0].family()
if family_2d != family_3dh:
raise Exception('2D and 3D spaces do not match: {0:s} {1:s}'.format(family_2d, family_3dh))
self.do_hdiv_scaling = family_2d in ['Raviart-Thomas', 'RTCF', 'Brezzi-Douglas-Marini', 'BDMCF']
if self.do_hdiv_scaling and elem_height is None:
raise Exception('elem_height must be provided for HDiv spaces')
assert elem_facet in ['top', 'bottom', 'average'], 'Unsupported elem_facet: {:}'.format(elem_facet)
if elem_facet == 'average':
nodes = numpy.hstack((get_facet_mask(self.fs_3d, 'bottom'),
get_facet_mask(self.fs_3d, 'top')))
else:
nodes = get_facet_mask(self.fs_3d, elem_facet)
if boundary == 'top':
self.iter_domain = op2.ON_TOP
elif boundary == 'bottom':
self.iter_domain = op2.ON_BOTTOM
out_nodes = self.fs_2d.finat_element.space_dimension()
if elem_facet == 'average':
assert (len(nodes) == 2*out_nodes)
else:
assert (len(nodes) == out_nodes)
self.idx = op2.Global(len(nodes), nodes, dtype=numpy.int32, name='node_idx')
if elem_facet == 'average':
# compute average of top and bottom elem nodes
self.kernel = op2.Kernel("""
void my_kernel(double *func, double *func3d, int *idx) {
int nnodes = %(nodes)d;
for ( int d = 0; d < nnodes; d++ ) {
for ( int c = 0; c < %(func2d_dim)d; c++ ) {
func[%(func2d_dim)d*d + c] = 0.5*(func3d[%(func3d_dim)d*idx[d] + c] +
func3d[%(func3d_dim)d*idx[d + nnodes] + c]);
}
}
}""" % {'nodes': self.output_2d.cell_node_map().arity,
'func2d_dim': self.output_2d.function_space().value_size,
'func3d_dim': self.fs_3d.value_size},
'my_kernel')
else:
self.kernel = op2.Kernel("""
void my_kernel(double *func, double *func3d, int *idx) {
for ( int d = 0; d < %(nodes)d; d++ ) {
for ( int c = 0; c < %(func2d_dim)d; c++ ) {
func[%(func2d_dim)d*d + c] = func3d[%(func3d_dim)d*idx[d] + c];
}
}
}""" % {'nodes': self.output_2d.cell_node_map().arity,
'func2d_dim': self.output_2d.function_space().value_size,
'func3d_dim': self.fs_3d.value_size},
'my_kernel')
if self.do_hdiv_scaling:
solver_parameters = {}
solver_parameters.setdefault('ksp_atol', 1e-12)
solver_parameters.setdefault('ksp_rtol', 1e-16)
test = TestFunction(self.fs_2d)
tri = TrialFunction(self.fs_2d)
a = inner(tri, test)*dx
l = inner(self.output_2d, test)/elem_height*dx
prob = LinearVariationalProblem(a, l, self.output_2d)
self.rt_scale_solver = LinearVariationalSolver(
prob, solver_parameters=solver_parameters)
@PETSc.Log.EventDecorator("thetis.SubFunctionExtractor.solve")
def solve(self):
with timed_stage('copy_3d_to_2d'):
# execute par loop
op2.par_loop(self.kernel, self.fs_3d.mesh().cell_set,
self.output_2d.dat(op2.WRITE, self.fs_2d.cell_node_map()),
self.input_3d.dat(op2.READ, self.fs_3d.cell_node_map()),
self.idx(op2.READ),
iterate=self.iter_domain)
if self.do_hdiv_scaling:
self.rt_scale_solver.solve()
class ALEMeshUpdater(object):
"""
Class that handles vertically moving ALE mesh
Mesh geometry is updated to match the elevation field
(``solver.fields.elev_2d``). First the discontinuous elevation field is
projected to continuous space, and this field is used to update the mesh
coordinates.
This class stores the reference coordinate field and keeps track of the
updated mesh coordinates. It also provides a method for computing the mesh
velocity from two adjacent elevation fields.
"""
@PETSc.Log.EventDecorator("thetis.ALEMeshUpdater.__init__")
def __init__(self, solver):
"""
:arg solver: :class:`FlowSolver` object
"""
self.solver = solver
self.fields = solver.fields
if self.solver.options.use_ale_moving_mesh:
# continous elevation
self.elev_cg_2d = Function(self.solver.function_spaces.P1_2d,
name='elev cg 2d')
# w_mesh at surface
self.w_mesh_surf_2d = Function(
self.fields.bathymetry_2d.function_space(), name='w mesh surf 2d')
# elevation in coordinate space
self.proj_elev_to_cg_2d = Projector(self.fields.elev_2d,
self.elev_cg_2d)
self.proj_elev_cg_to_coords_2d = Projector(self.elev_cg_2d,
self.fields.elev_cg_2d)
self.cp_v_elem_size_to_2d = SubFunctionExtractor(self.fields.v_elem_size_3d,
self.fields.v_elem_size_2d,
boundary='top', elem_facet='top')
self.fs_3d = self.fields.z_coord_ref_3d.function_space()
self.fs_2d = self.fields.elev_cg_2d.function_space()
family_2d = self.fs_2d.ufl_element().family()
base_element_3d = get_extruded_base_element(self.fs_3d.ufl_element())
assert isinstance(base_element_3d, ufl.TensorProductElement)
family_3dh = base_element_3d.sub_elements()[0].family()
if family_2d != family_3dh:
raise Exception('2D and 3D spaces do not match: "{0:s}" != "{1:s}"'.format(family_2d, family_3dh))
# number of nodes in vertical direction
n_vert_nodes = self.fs_3d.finat_element.space_dimension() / self.fs_2d.finat_element.space_dimension()
nodes = get_facet_mask(self.fs_3d, 'bottom')
self.idx = op2.Global(len(nodes), nodes, dtype=numpy.int32, name='node_idx')
self.kernel_z_coord = op2.Kernel("""
void my_kernel(double *z_coord_3d, double *z_ref_3d, double *elev_2d, double *bath_2d, int *idx) {
for ( int d = 0; d < %(nodes)d; d++ ) {
for ( int c = 0; c < %(func2d_dim)d; c++ ) {
for ( int e = 0; e < %(v_nodes)d; e++ ) {
double eta = elev_2d[%(func2d_dim)d*d + c];
double bath = bath_2d[%(func2d_dim)d*d + c];
double z_ref = z_ref_3d[%(func3d_dim)d*(idx[d]+e) + c];
double new_z = eta*(z_ref + bath)/bath + z_ref;
z_coord_3d[%(func3d_dim)d*(idx[d]+e) + c] = new_z;
}
}
}
}""" % {'nodes': self.fs_2d.finat_element.space_dimension(),
'func2d_dim': self.fs_2d.value_size,
'func3d_dim': self.fs_3d.value_size,
'v_nodes': n_vert_nodes},
'my_kernel')
self.kernel_w_mesh = op2.Kernel("""
void my_kernel(double *w_mesh_3d, double *z_ref_3d, double *w_mesh_surf_2d, double *bath_2d, int *idx) {
for ( int d = 0; d < %(nodes)d; d++ ) {
for ( int c = 0; c < %(func2d_dim)d; c++ ) {
for ( int e = 0; e < %(v_nodes)d; e++ ) {
double w_mesh_surf = w_mesh_surf_2d[%(func2d_dim)d*d + c];
double bath = bath_2d[%(func2d_dim)d*d + c];
double z_ref = z_ref_3d[%(func3d_dim)d*(idx[d]+e) + c];
double new_w = w_mesh_surf * (z_ref + bath)/bath;
w_mesh_3d[%(func3d_dim)d*(idx[d]+e) + c] = new_w;
}
}
}
}""" % {'nodes': self.fs_2d.finat_element.space_dimension(),
'func2d_dim': self.fs_2d.value_size,
'func3d_dim': self.fs_3d.value_size,
'v_nodes': n_vert_nodes},
'my_kernel')
@PETSc.Log.EventDecorator("thetis.ALEMeshUpdater.intialize")
def initialize(self):
"""Set values for initial mesh (elevation at rest)"""
get_zcoord_from_mesh(self.fields.z_coord_ref_3d)
self.fields.z_coord_3d.assign(self.fields.z_coord_ref_3d)
self.update_elem_height()
@PETSc.Log.EventDecorator("thetis.ALEMeshUpdater.update_elem_height")
def update_elem_height(self):
"""Updates vertical element size fields"""
compute_elem_height(self.fields.z_coord_3d, self.fields.v_elem_size_3d)
self.cp_v_elem_size_to_2d.solve()
@PETSc.Log.EventDecorator("thetis.ALEMeshUpdater.compute_mesh_velocity_begin")
def compute_mesh_velocity_begin(self):
"""Stores the current 2D elevation state as the "old" field"""
assert self.solver.options.use_ale_moving_mesh
self.proj_elev_to_cg_2d.project()
self.proj_elev_cg_to_coords_2d.project()
@PETSc.Log.EventDecorator("thetis.ALEMeshUpdater.compute_mesh_velocity_finalize")
def compute_mesh_velocity_finalize(self, c=1.0, w_mesh_surf_expr=None):
"""
Computes mesh velocity from the elevation difference
Stores the current 2D elevation state as the "new" field,
and computes w_mesh using the given time step factor ``c``.
"""
assert self.solver.options.use_ale_moving_mesh
# compute w_mesh at surface
if w_mesh_surf_expr is None:
# default formulation
# w_mesh_surf = (elev_new - elev_old)/dt/c
self.w_mesh_surf_2d.assign(self.fields.elev_cg_2d)
self.proj_elev_to_cg_2d.project()
self.proj_elev_cg_to_coords_2d.project()
self.w_mesh_surf_2d += -self.fields.elev_cg_2d
self.w_mesh_surf_2d *= -1.0/self.solver.dt/c
else:
# user-defined formulation
self.w_mesh_surf_2d.assign(w_mesh_surf_expr)
op2.par_loop(
self.kernel_w_mesh, self.fs_3d.mesh().cell_set,
self.fields.w_mesh_3d.dat(op2.WRITE, self.fs_3d.cell_node_map()),
self.fields.z_coord_ref_3d.dat(op2.READ, self.fs_3d.cell_node_map()),
self.w_mesh_surf_2d.dat(op2.READ, self.fs_2d.cell_node_map()),
self.fields.bathymetry_2d.dat(op2.READ, self.fs_2d.cell_node_map()),
self.idx(op2.READ),
iterate=op2.ALL
)
@PETSc.Log.EventDecorator("thetis.ALEMeshUpdater.update_mesh_coordinates")
def update_mesh_coordinates(self):
"""
Updates 3D mesh coordinates to match current elev_2d field
elev_2d is first projected to continous space
"""
assert self.solver.options.use_ale_moving_mesh
self.proj_elev_to_cg_2d.project()
self.proj_elev_cg_to_coords_2d.project()
# compute new z coordinates -> self.fields.z_coord_3d
op2.par_loop(
self.kernel_z_coord, self.fs_3d.mesh().cell_set,
self.fields.z_coord_3d.dat(op2.WRITE, self.fs_3d.cell_node_map()),
self.fields.z_coord_ref_3d.dat(op2.READ, self.fs_3d.cell_node_map()),
self.fields.elev_cg_2d.dat(op2.READ, self.fs_2d.cell_node_map()),
self.fields.bathymetry_2d.dat(op2.READ, self.fs_2d.cell_node_map()),
self.idx(op2.READ),
iterate=op2.ALL
)
self.solver.mesh.coordinates.dat.data[:, 2] = self.fields.z_coord_3d.dat.data[:]
self.update_elem_height()
self.solver.mesh.clear_spatial_index()
class SmagorinskyViscosity(object):
r"""
Computes Smagorinsky subgrid scale horizontal viscosity
This formulation is according to Ilicak et al. (2012) and
Griffies and Hallberg (2000).
.. math::
\nu = (C_s \Delta x)^2 |S|
with the deformation rate
.. math::
|S| &= \sqrt{D_T^2 + D_S^2} \\
D_T &= \frac{\partial u}{\partial x} - \frac{\partial v}{\partial y} \\
D_S &= \frac{\partial u}{\partial y} + \frac{\partial v}{\partial x}
:math:`\Delta x` is the horizontal element size and :math:`C_s` is the
Smagorinsky coefficient.
To match a certain mesh Reynolds number :math:`Re_h` set
:math:`C_s = 1/\sqrt{Re_h}`.
Ilicak et al. (2012). Spurious dianeutral mixing and the role of
momentum closure. Ocean Modelling, 45-46(0):37-58.
http://dx.doi.org/10.1016/j.ocemod.2011.10.003
Griffies and Hallberg (2000). Biharmonic friction with a
Smagorinsky-like viscosity for use in large-scale eddy-permitting
ocean models. Monthly Weather Review, 128(8):2935-2946.
http://dx.doi.org/10.1175/1520-0493(2000)128%3C2935:BFWASL%3E2.0.CO;2
"""
@PETSc.Log.EventDecorator("thetis.SmagorinskyViscosity.__init__")
def __init__(self, uv, output, c_s, h_elem_size, max_val, min_val=1e-10,
weak_form=True, solver_parameters=None):
"""
:arg uv_3d: horizontal velocity
:type uv_3d: 3D vector :class:`Function`
:arg output: Smagorinsky viscosity field
:type output: 3D scalar :class:`Function`
:arg c_s: Smagorinsky coefficient
:type c_s: float or :class:`Constant`
:arg h_elem_size: field that defines the horizontal element size
:type h_elem_size: 3D scalar :class:`Function` or :class:`Constant`
:arg float max_val: Maximum allowed viscosity. Viscosity will be clipped at
this value.
:kwarg float min_val: Minimum allowed viscosity. Viscosity will be clipped at
this value.
:kwarg bool weak_form: Compute velocity shear by integrating by parts.
Necessary for some function spaces (e.g. P0).
:kwarg dict solver_parameters: PETSc solver options
"""
if solver_parameters is None:
solver_parameters = {}
solver_parameters.setdefault('ksp_atol', 1e-12)
solver_parameters.setdefault('ksp_rtol', 1e-16)
assert max_val.function_space() == output.function_space(), \
'max_val function must belong to the same space as output'
self.max_val = max_val
self.min_val = min_val
self.output = output
self.weak_form = weak_form
if self.weak_form:
# solve grad(u) weakly
mesh = output.function_space().mesh()
fs_grad = get_functionspace(mesh, 'DP', 1, 'DP', 1, vector=True, dim=4)
self.grad = Function(fs_grad, name='uv_grad')
tri_grad = TrialFunction(fs_grad)
test_grad = TestFunction(fs_grad)
normal = FacetNormal(mesh)
a = inner(tri_grad, test_grad)*dx
rhs_terms = []
for iuv in range(2):
for ix in range(2):
i = 2*iuv + ix
vol_term = -inner(Dx(test_grad[i], ix), uv[iuv])*dx
int_term = inner(avg(uv[iuv]), jump(test_grad[i], normal[ix]))*dS_v
ext_term = inner(uv[iuv], test_grad[i]*normal[ix])*ds_v
rhs_terms.extend([vol_term, int_term, ext_term])
l = sum(rhs_terms)
prob = LinearVariationalProblem(a, l, self.grad)
self.weak_grad_solver = LinearVariationalSolver(prob, solver_parameters=solver_parameters)
# rate of strain tensor
d_t = self.grad[0] - self.grad[3]
d_s = self.grad[1] + self.grad[2]
else:
# rate of strain tensor
d_t = Dx(uv[0], 0) - Dx(uv[1], 1)
d_s = Dx(uv[0], 1) + Dx(uv[1], 0)
fs = output.function_space()
tri = TrialFunction(fs)
test = TestFunction(fs)
nu = c_s**2*h_elem_size**2 * sqrt(d_t**2 + d_s**2)
a = test*tri*dx
l = test*nu*dx
self.prob = LinearVariationalProblem(a, l, output)
self.solver = LinearVariationalSolver(self.prob, solver_parameters=solver_parameters)
@PETSc.Log.EventDecorator("thetis.SmagorinskyViscosity.solve")
def solve(self):
"""Compute viscosity"""
if self.weak_form:
self.weak_grad_solver.solve()
self.solver.solve()
# remove negative values
ix = self.output.dat.data < self.min_val
self.output.dat.data[ix] = self.min_val
# crop too large values
ix = self.output.dat.data > self.max_val.dat.data
self.output.dat.data[ix] = self.max_val.dat.data[ix]
class EquationOfState(object):
"""
Base class of all equation of state objects
"""
__metaclass__ = ABCMeta
@abstractmethod
def compute_rho(self, s, th, p, rho0=0.0):
r"""
Compute sea water density.
:arg s: Salinity expressed on the Practical Salinity Scale 1978
:type s: float or numpy.array
:arg th: Potential temperature in Celsius, referenced to pressure
p_r = 0 dbar.
:type th: float or numpy.array
:arg p: Pressure in decibars (1 dbar = 1e4 Pa)
:type p: float or numpy.array
:kwarg float rho0: Optional reference density. If provided computes
:math:`\rho' = \rho(S, Th, p) - \rho_0`
:return: water density
:rtype: float or numpy.array
All pressures are gauge pressures: they are the absolute pressures minus standard atmosperic
pressure 10.1325 dbar.
"""
pass
@abstractmethod
def eval(self, s, th, p, rho0=0.0):
r"""
Compute sea water density.
"""
pass
class JackettEquationOfState(EquationOfState):
r"""
Equation of State according of Jackett et al. (2006) for computing sea
water density.
.. math ::
\rho = \rho'(T, S, p) + \rho_0
:label: equation_of_state
:math:`\rho'(T, S, p)` is a nonlinear rational function.
Jackett et al. (2006). Algorithms for Density, Potential Temperature,
Conservative Temperature, and the Freezing Temperature of Seawater.
Journal of Atmospheric and Oceanic Technology, 23(12):1709-1728.
http://dx.doi.org/10.1175/JTECH1946.1
"""
a = numpy.array([9.9984085444849347e2, 7.3471625860981584e0, -5.3211231792841769e-2,
3.6492439109814549e-4, 2.5880571023991390e0, -6.7168282786692355e-3,
1.9203202055760151e-3, 1.1798263740430364e-2, 9.8920219266399117e-8,
4.6996642771754730e-6, -2.5862187075154352e-8, -3.2921414007960662e-12])
b = numpy.array([1.0, 7.2815210113327091e-3, -4.4787265461983921e-5, 3.3851002965802430e-7,
1.3651202389758572e-10, 1.7632126669040377e-3, -8.8066583251206474e-6,
-1.8832689434804897e-10, 5.7463776745432097e-6, 1.4716275472242334e-9,
6.7103246285651894e-6, -2.4461698007024582e-17, -9.1534417604289062e-18])
def compute_rho(self, s, th, p, rho0=0.0):
r"""
Compute sea water density.
:arg s: Salinity expressed on the Practical Salinity Scale 1978
:type s: float or numpy.array
:arg th: Potential temperature in Celsius, referenced to pressure
p_r = 0 dbar.
:type th: float or numpy.array
:arg p: Pressure in decibars (1 dbar = 1e4 Pa)
:type p: float or numpy.array
:kwarg float rho0: Optional reference density. If provided computes
:math:`\rho' = \rho(S, Th, p) - \rho_0`
:return: water density
:rtype: float or numpy.array
All pressures are gauge pressures: they are the absolute pressures minus standard atmosperic
pressure 10.1325 dbar.
"""
s_pos = numpy.maximum(s, 0.0) # ensure salinity is positive
return self.eval(s_pos, th, p, rho0)
def eval(self, s, th, p, rho0=0.0):
a = self.a
b = self.b
pn = (a[0] + th*a[1] + th*th*a[2] + th*th*th*a[3] + s*a[4]
+ th*s*a[5] + s*s*a[6] + p*a[7] + p*th * th*a[8] + p*s*a[9]
+ p*p*a[10] + p*p*th*th * a[11])
pd = (b[0] + th*b[1] + th*th*b[2] + th*th*th*b[3]
+ th*th*th*th*b[4] + s*b[5] + s*th*b[6] + s*th*th*th*b[7]
+ pow(s, 1.5)*b[8] + pow(s, 1.5)*th*th*b[9] + p*b[10]
+ p*p*th*th*th*b[11] + p*p*p*th*b[12])
rho = pn/pd - rho0
return rho
class LinearEquationOfState(EquationOfState):
r"""
Linear Equation of State for computing sea water density
.. math::
\rho = \rho_{ref} - \alpha (T - T_{ref}) + \beta (S - S_{ref})
"""
def __init__(self, rho_ref, alpha, beta, th_ref, s_ref):
"""
:arg float rho_ref: reference density
:arg float alpha: thermal expansion coefficient
:arg float beta: haline contraction coefficient
:arg float th_ref: reference temperature
:arg float s_ref: reference salinity
"""
self.rho_ref = rho_ref
self.alpha = alpha
self.beta = beta
self.th_ref = th_ref
self.S_ref = s_ref
def compute_rho(self, s, th, p, rho0=0.0):
r"""
Compute sea water density.
:arg s: Salinity expressed on the Practical Salinity Scale 1978
:type s: float or numpy.array
:arg th: Potential temperature in Celsius
:type th: float or numpy.array
:arg p: Pressure in decibars (1 dbar = 1e4 Pa)
:type p: float or numpy.array
:kwarg float rho0: Optional reference density. If provided computes
:math:`\rho' = \rho(S, Th, p) - \rho_0`
:return: water density
:rtype: float or numpy.array
Pressure is ingored in this equation of state.
"""
rho = (self.rho_ref - rho0
- self.alpha*(th - self.th_ref)
+ self.beta*(s - self.S_ref))
return rho
def eval(self, s, th, p, rho0=0.0):
return self.compute_rho(s, th, p, rho0)
@PETSc.Log.EventDecorator("thetis.get_horizontal_elem_size_3d")
def get_horizontal_elem_size_3d(sol2d, sol3d):
"""
Computes horizontal element size from the 2D mesh, then copies it on a 3D
field
:arg sol2d: 2D :class:`Function` for the element size field
:arg sol3d: 3D :class:`Function` for the element size field
"""
get_horizontal_elem_size_2d(sol2d)
ExpandFunctionTo3d(sol2d, sol3d).solve()
|
py | b417e686f6815c97414703e7c9ae5ca914594455 | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import requests
from .utils.logging import get_logger
logger = get_logger(__name__)
ENDPOINT = "https://huggingface.co"
class ObjectInfo:
"""
Info about a public dataset or Metric accessible from our S3.
"""
def __init__(
self,
id: str,
key: str,
lastModified: Optional[str] = None,
description: Optional[str] = None,
citation: Optional[str] = None,
size: Optional[int] = None,
etag: Optional[str] = None,
siblings: List[Dict] = None,
author: str = None,
**kwargs,
):
self.id = id # id of dataset
self.key = key # S3 object key of config.json
self.lastModified = lastModified
self.description = description
self.citation = citation
self.size = size
self.etag = etag
self.siblings = siblings # list of files that constitute the dataset
self.author = author
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
single_line_description = self.description.replace("\n", "") if self.description is not None else ""
return f"datasets.ObjectInfo(\n\tid='{self.id}',\n\tdescription='{single_line_description}',\n\tfiles={self.siblings}\n)"
class HfApi:
ALLOWED_FILE_TYPES = ["datasets", "metrics"]
def __init__(self, endpoint=None):
"""Create Api using a specific endpoint and also the file types ('datasets' or 'metrics')"""
self.endpoint = endpoint if endpoint is not None else ENDPOINT
def dataset_list(self, with_community_datasets=True, id_only=False) -> List[ObjectInfo]:
"""
Get the public list of all the datasets on huggingface, including the community datasets
"""
path = "{}/api/datasets".format(self.endpoint)
r = requests.get(path)
r.raise_for_status()
d = r.json()
datasets = [ObjectInfo(**x) for x in d]
if not with_community_datasets:
datasets = [d for d in datasets if "/" not in d.id]
if id_only:
datasets = [d.id for d in datasets]
return datasets
def metric_list(self, with_community_metrics=True, id_only=False) -> List[ObjectInfo]:
"""
Get the public list of all the metrics on huggingface, including the community metrics
"""
path = "{}/api/metrics".format(self.endpoint)
r = requests.get(path)
r.raise_for_status()
d = r.json()
metrics = [ObjectInfo(**x) for x in d]
if not with_community_metrics:
metrics = [m for m in metrics if "/" not in m.id]
if id_only:
metrics = [m.id for m in metrics]
return metrics
|
py | b417e7038714ddb9ed2be26e02253e2c0b908265 | # coding: utf-8
"""
Cherwell REST API
Unofficial Python Cherwell REST API library. # noqa: E501
The version of the OpenAPI document: 9.3.2
Contact: See AUTHORS.
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import pycherwell
from pycherwell.models.save_response import SaveResponse # noqa: E501
from pycherwell.rest import ApiException
class TestSaveResponse(unittest.TestCase):
"""SaveResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSaveResponse(self):
"""Test SaveResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = pycherwell.models.save_response.SaveResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b417e809166a32185073bac489de6db40c8d2e71 | """Check your internet speed powered by speedtest.net
Syntax: .speedtest
Available Options: image, file, text"""
from telethon import events
from datetime import datetime
import io
import speedtest
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="speedtest ?(.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
as_text = False
as_document = True
if input_str == "image":
as_document = False
elif input_str == "file":
as_document = True
elif input_str == "text":
as_text = True
await event.edit("Calculating my internet speed. Please wait!")
start = datetime.now()
s = speedtest.Speedtest()
s.get_best_server()
s.download()
s.upload()
end = datetime.now()
ms = (end - start).microseconds / 1000
response = s.results.dict()
download_speed = response.get("download")
upload_speed = response.get("upload")
ping_time = response.get("ping")
client_infos = response.get("client")
i_s_p = client_infos.get("isp")
i_s_p_rating = client_infos.get("isprating")
reply_msg_id = event.message.id
if event.reply_to_msg_id:
reply_msg_id = event.reply_to_msg_id
try:
response = s.results.share()
speedtest_image = response
if as_text:
await event.edit("""**SpeedTest** completed in {} seconds
Download: {}
Upload: {}
Ping: {}
Internet Service Provider: {}
ISP Rating: {}""".format(ms, convert_from_bytes(download_speed), convert_from_bytes(upload_speed), ping_time, i_s_p, i_s_p_rating))
else:
await borg.send_file(
event.chat_id,
speedtest_image,
caption="**SpeedTest** completed in {} seconds".format(ms),
force_document=as_document,
reply_to=reply_msg_id,
allow_cache=False
)
await event.delete()
except Exception as exc:
await event.edit("""**SpeedTest** completed in {} seconds
Download: {}
Upload: {}
Ping: {}
__With the Following ERRORs__
{}""".format(ms, convert_from_bytes(download_speed), convert_from_bytes(upload_speed), ping_time, str(exc)))
def convert_from_bytes(size):
power = 2**10
n = 0
units = {
0: "",
1: "kilobytes",
2: "megabytes",
3: "gigabytes",
4: "terabytes"
}
while size > power:
size /= power
n += 1
return f"{round(size, 2)} {units[n]}"
|
py | b417e82ba4c08c7dfd20adf9135a636544ae83fc | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Ccij",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Ccij")
}
]
|
py | b417e8b2310f876bf56ae1080c02f34a6ae03fbe | # coding: utf-8
__author__ = 'deff'
from scanner.base import BaseScanner
##动态信息
class DynamicScanner(BaseScanner):
def __init__(self):
super().__init__()
##可单独直接开始
def start(self):
return ""
# 做初始化操作
def init(self):
pass
# 扫描
def scan(self):
pass
# 输出报告
def report(self):
pass
# 结束操作
def delete(self):
pass
def __del__(self):
pass
|
py | b417e8e3c2066d3a29786421423b728f0edf1eb4 | from numbers import Number
from typing import Tuple, Optional, List, Union
import torch
import numpy as np
import SimpleITK as sitk
from ....data.subject import Subject
from ....torchio import LABEL, DATA, AFFINE, TYPE, TypeRangeFloat
from .. import Interpolation, get_sitk_interpolator
from .. import RandomTransform
class RandomAffine(RandomTransform):
r"""Random affine transformation.
Args:
scales: Tuple :math:`(a, b)` defining the scaling
magnitude. The scaling values along each dimension are
:math:`(s_1, s_2, s_3)`, where :math:`s_i \sim \mathcal{U}(a, b)`.
For example, using ``scales=(0.5, 0.5)`` will zoom out the image,
making the objects inside look twice as small while preserving
the physical size and position of the image.
degrees: Tuple :math:`(a, b)` defining the rotation range in degrees.
The rotation angles around each axis are
:math:`(\theta_1, \theta_2, \theta_3)`,
where :math:`\theta_i \sim \mathcal{U}(a, b)`.
If only one value :math:`d` is provided,
:math:`\theta_i \sim \mathcal{U}(-d, d)`.
isotropic: If ``True``, the scaling factor along all dimensions is the
same, i.e. :math:`s_1 = s_2 = s_3`.
default_pad_value: As the image is rotated, some values near the
borders will be undefined.
If ``'minimum'``, the fill value will be the image minimum.
If ``'mean'``, the fill value is the mean of the border values.
If ``'otsu'``, the fill value is the mean of the values at the
border that lie under an
`Otsu threshold <https://ieeexplore.ieee.org/document/4310076>`_.
image_interpolation: See :ref:`Interpolation`.
p: Probability that this transform will be applied.
seed: See :py:class:`~torchio.transforms.augmentation.RandomTransform`.
.. note:: Rotations are performed around the center of the image.
Example:
>>> from torchio.transforms import RandomAffine, Interpolation
>>> sample = images_dataset[0] # instance of torchio.ImagesDataset
>>> transform = RandomAffine(
... scales=(0.9, 1.2),
... degrees=(10),
... isotropic=False,
... default_pad_value='otsu',
... image_interpolation=Interpolation.BSPLINE,
... )
>>> transformed = transform(sample)
From the command line::
$ torchio-transform t1.nii.gz RandomAffine -k "degrees=30 default_pad_value=minimum" -s 42 affine_min.nii.gz
"""
def __init__(
self,
scales: Tuple[float, float] = (0.9, 1.1),
degrees: TypeRangeFloat = 10,
isotropic: bool = False,
default_pad_value: Union[str, float] = 'otsu',
image_interpolation: Interpolation = Interpolation.LINEAR,
p: float = 1,
seed: Optional[int] = None,
is_tensor=False,
):
super().__init__(p=p, seed=seed, is_tensor=is_tensor)
self.scales = scales
self.degrees = self.parse_degrees(degrees)
self.isotropic = isotropic
self.default_pad_value = self.parse_default_value(default_pad_value)
self.interpolation = self.parse_interpolation(image_interpolation)
self.is_tensor = is_tensor
@staticmethod
def parse_default_value(value: Union[str, float]) -> Union[str, float]:
if isinstance(value, Number) or value in ('minimum', 'otsu', 'mean'):
return value
message = (
'Value for default_pad_value must be "minimum", "otsu", "mean"'
' or a number'
)
raise ValueError(message)
def apply_transform(self, sample: Subject) -> dict:
scaling_params, rotation_params = self.get_params(
self.scales, self.degrees, self.isotropic)
random_parameters_dict = {
'scaling': scaling_params,
'rotation': rotation_params,
}
if not self.is_tensor:
sample.check_consistent_shape()
for image_dict in sample.get_images(intensity_only=False):
if image_dict[TYPE] == LABEL:
interpolation = Interpolation.NEAREST
else:
interpolation = self.interpolation
image_dict[DATA] = self.apply_affine_transform(
image_dict[DATA],
image_dict[AFFINE],
scaling_params,
rotation_params,
interpolation,
)
sample.add_transform(self, random_parameters_dict)
else:
sample = self.apply_affine_transform(
sample,
np.identity(4),
scaling_params,
rotation_params,
self.interpolation,
)
return sample
@staticmethod
def get_params(
scales: Tuple[float, float],
degrees: Tuple[float, float],
isotropic: bool,
) -> Tuple[List[float], List[float]]:
scaling_params = torch.FloatTensor(3).uniform_(*scales)
if isotropic:
scaling_params.fill_(scaling_params[0])
rotation_params = torch.FloatTensor(3).uniform_(*degrees)
return scaling_params.tolist(), rotation_params.tolist()
@staticmethod
def get_scaling_transform(
scaling_params: List[float],
) -> sitk.ScaleTransform:
# scaling_params are inverted so that they are more intuitive
# For example, 1.5 means the objects look 1.5 times larger
transform = sitk.ScaleTransform(3)
scaling_params = 1 / np.array(scaling_params)
transform.SetScale(scaling_params)
return transform
@staticmethod
def get_rotation_transform(
degrees: List[float],
) -> sitk.Euler3DTransform:
transform = sitk.Euler3DTransform()
radians = np.radians(degrees)
transform.SetRotation(*radians)
return transform
def apply_affine_transform(
self,
tensor: torch.Tensor,
affine: np.ndarray,
scaling_params: List[float],
rotation_params: List[float],
interpolation: Interpolation,
) -> torch.Tensor:
assert len(tensor) == 1
if len(tensor.shape) == 4:
tensor = self.affine_transform(tensor, affine, scaling_params, rotation_params, interpolation)
elif len(tensor.shape) == 5:
for channel in range(tensor.shape[-1]):
tensor[..., channel] = self.affine_transform(tensor[..., channel], affine, scaling_params, rotation_params, interpolation)
else:
raise Exception('Input dimension must be either (1, x, y, z) or (1, x, y, z, c)')
return tensor
def affine_transform(
self,
tensor: torch.Tensor,
affine: np.ndarray,
scaling_params: List[float],
rotation_params: List[float],
interpolation: Interpolation,
) -> torch.Tensor:
assert len(tensor.shape) == 4
assert len(tensor) == 1
image = self.nib_to_sitk(tensor[0], affine)
floating = reference = image
scaling_transform = self.get_scaling_transform(scaling_params)
rotation_transform = self.get_rotation_transform(rotation_params)
transform = sitk.Transform(3, sitk.sitkComposite)
transform.AddTransform(scaling_transform)
transform.AddTransform(rotation_transform)
if self.default_pad_value == 'minimum':
default_value = tensor.min().item()
elif self.default_pad_value == 'mean':
default_value = get_borders_mean(image, filter_otsu=False)
elif self.default_pad_value == 'otsu':
default_value = get_borders_mean(image, filter_otsu=True)
else:
default_value = self.default_pad_value
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(get_sitk_interpolator(interpolation))
resampler.SetReferenceImage(reference)
resampler.SetDefaultPixelValue(float(default_value))
resampler.SetOutputPixelType(sitk.sitkFloat32)
resampler.SetTransform(transform)
resampled = resampler.Execute(floating)
np_array = sitk.GetArrayFromImage(resampled)
np_array = np_array.transpose() # ITK to NumPy
tensor[0] = torch.from_numpy(np_array)
return tensor
def get_borders_mean(image, filter_otsu=True):
array = sitk.GetArrayViewFromImage(image)
borders = np.array((
array[0],
array[-1],
array[0, :, :],
array[-1, :, :],
array[:, 0, :],
array[:, -1, :],
array[:, :, 0],
array[:, :, -1],
))
borders = np.hstack([border.flatten() for border in borders])
if not filter_otsu:
return borders.mean()
borders = borders.reshape(1, 1, -1)
borders_image = sitk.GetImageFromArray(borders)
otsu = sitk.OtsuThresholdImageFilter()
otsu.Execute(borders_image)
threshold = otsu.GetThreshold()
values = borders[borders < threshold]
if values.any():
default_value = values.mean()
else:
default_value = borders.mean()
return default_value
|
py | b417e9f78210bf360834c02169770f32deab1833 | import numpy as np
import tensorflow as tf
import tfutil
#----------------------------------------------------------------------------
# Convenience func that casts all of its arguments to tf.float32.
def fp32(*values):
if len(values) == 1 and isinstance(values[0], tuple):
values = values[0]
values = tuple(tf.cast(v, tf.float32) for v in values)
return values if len(values) >= 2 else values[0]
#----------------------------------------------------------------------------
# Generator loss function.
def G_wgan_acgan(G, D, lod, labels, well_facies, prob_images, minibatch_size,
Wellfaciesloss_weight = 0.25, MudProp_weight = 0.2, Width_weight = 0.2, Sinuosity_weight = 0.2, orig_weight = 1, labeltypes = None, Probimageloss_weight = 0.2, batch_multiplier = 8, lossnorm = False):
#labeltypes, e.g., labeltypes = [1] # can include: 0 for 'channelorientation', 1 for 'mudproportion', 2 for 'channelwidth', 3 for 'channelsinuosity', set in config file
# loss for channel orientation is not designed below, so do not include "0" in labeltypes.
# lossnorm: True to normalize loss into standard Gaussian before multiplying with weights.
label_size = len(labeltypes)
if label_size == 0:
labels_in = labels
else:
labels_list = []
for k in range(label_size):
labels_list.append(tf.random.uniform(([minibatch_size]), minval=-1, maxval=1))
if 1 in labeltypes: # mud proportion
ind = labeltypes.index(1)
labels_list[ind] = tf.clip_by_value(labels[:, ind] + tf.random.uniform([minibatch_size], minval=-0.2, maxval=0.2), -1, 1)
labels_in = tf.stack(labels_list, axis = 1)
latents = tf.random_normal([minibatch_size * batch_multiplier] + G.input_shapes[0][1:])
labels_lg = tf.reshape(tf.tile(tf.expand_dims(labels_in, 1), [1, batch_multiplier, 1]), ([-1] + G.input_shapes[1][1:]))
well_facies = tf.cast(well_facies, tf.float32)
well_facies_lg = tf.reshape(tf.tile(tf.expand_dims(well_facies, 1), [1, batch_multiplier, 1, 1, 1]), ([-1] + G.input_shapes[2][1:]))
prob_images = tf.cast(prob_images, tf.float32)
prob_images_lg = tf.reshape(tf.tile(tf.expand_dims(prob_images, 1), [1, batch_multiplier, 1, 1, 1]), ([-1] + G.input_shapes[3][1:]))
fake_images_out = G.get_output_for(latents, labels_lg, well_facies_lg, prob_images_lg, is_training=True)
fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
loss = -fake_scores_out
if lossnorm: loss = (loss -14.6829250772099) / 4.83122039859412 #To Normalize
loss = tfutil.autosummary('Loss_G/GANloss', loss)
loss = loss * orig_weight
with tf.name_scope('LabelPenalty'):
def addMudPropPenalty(index):
MudPropPenalty = tf.nn.l2_loss(labels_lg[:, index] - fake_labels_out[:, index]) # [:,0] is the inter-channel mud facies ratio
if lossnorm: MudPropPenalty = (MudPropPenalty -0.36079434843794) / 0.11613414177144 # To normalize this loss
MudPropPenalty = tfutil.autosummary('Loss_G/MudPropPenalty', MudPropPenalty)
MudPropPenalty = MudPropPenalty * MudProp_weight
return loss+MudPropPenalty
if 1 in labeltypes:
ind = labeltypes.index(1)
loss = addMudPropPenalty(ind)
def addWidthPenalty(index):
WidthPenalty = tf.nn.l2_loss(labels_lg[:, index] - fake_labels_out[:, index]) # [:,0] is the inter-channel mud facies ratio
if lossnorm: WidthPenalty = (WidthPenalty -0.600282781464712) / 0.270670509379704 # To normalize this loss
WidthPenalty = tfutil.autosummary('Loss_G/WidthPenalty', WidthPenalty)
WidthPenalty = WidthPenalty * Width_weight
return loss+WidthPenalty
if 2 in labeltypes:
ind = labeltypes.index(2)
loss = tf.cond(tf.math.less(lod, tf.fill([], 5.)), lambda: addWidthPenalty(ind), lambda: loss)
def addSinuosityPenalty(index):
SinuosityPenalty = tf.nn.l2_loss(labels_lg[:, index] - fake_labels_out[:, index]) # [:,0] is the inter-channel mud facies ratio
if lossnorm: SinuosityPenalty = (SinuosityPenalty -0.451279248935835) / 0.145642580091667 # To normalize this loss
SinuosityPenalty = tfutil.autosummary('Loss_G/SinuosityPenalty', SinuosityPenalty)
SinuosityPenalty = SinuosityPenalty * Sinuosity_weight
return loss+SinuosityPenalty
if 3 in labeltypes:
ind = labeltypes.index(3)
loss = tf.cond(tf.math.less(lod, tf.fill([], 5.)), lambda: addSinuosityPenalty(ind), lambda: loss)
def Wellpoints_L2loss(well_facies, fake_images):
loss = tf.nn.l2_loss(well_facies[:,0:1]* (well_facies[:,1:2] - tf.where((fake_images+1)/2>0.4, tf.fill(tf.shape(fake_images), 1.), (fake_images+1)/2)))
loss = loss / tf.reduce_sum(well_facies[:, 0:1])
return loss
def addwellfaciespenalty(well_facies, fake_images_out, loss, Wellfaciesloss_weight):
with tf.name_scope('WellfaciesPenalty'):
WellfaciesPenalty = Wellpoints_L2loss(well_facies, fake_images_out) # as levee is 0.5, in well_facies data, levee and channels' codes are all 1
if lossnorm: WellfaciesPenalty = (WellfaciesPenalty -0.00887323171768953) / 0.00517647244943928
WellfaciesPenalty = tfutil.autosummary('Loss_G/WellfaciesPenalty', WellfaciesPenalty)
loss += WellfaciesPenalty * Wellfaciesloss_weight
return loss
loss = tf.cond(tf.math.less_equal(lod, tf.fill([], 3.)), lambda: addwellfaciespenalty(well_facies_lg, fake_images_out, loss, Wellfaciesloss_weight), lambda: loss)
def addfaciescodedistributionloss(probs, fakes, weight, batchsize, relzs, loss): # used when resolution is 64x64
with tf.name_scope('ProbimagePenalty'):
# In paper, only probability map for channel complex is condisered. If multiple probability maps for multiple facies are considered, needs to calculate channelindicator and probPenalty for each facies.
channelindicator = 1 / (1+tf.math.exp(-16*(fakes+0.5))) # use adjusted sigmoid to replace thresholding.
probs_fake = tf.reduce_mean(tf.reshape(channelindicator, ([batchsize, relzs] + G.input_shapes[3][1:])), 1)
#****** L2 loss
ProbPenalty = tf.nn.l2_loss(probs - probs_fake) # L2 loss
if lossnorm: ProbPenalty = ((ProbPenalty*tf.cast(relzs, tf.float32))-19134)/5402 # normalize
ProbPenalty = tfutil.autosummary('Loss_G/ProbPenalty', ProbPenalty)
loss += ProbPenalty * weight
return loss
loss = addfaciescodedistributionloss(prob_images, fake_images_out, Probimageloss_weight, minibatch_size, batch_multiplier, loss)
loss = tfutil.autosummary('Loss_G/Total_loss', loss)
return loss
#----------------------------------------------------------------------------
# Discriminator loss function.
def D_wgangp_acgan(G, D, opt, minibatch_size, reals, labels, well_facies, prob_images,
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
wgan_target = 1.0, # Target value for gradient magnitudes.
label_weight = 10, # Weight of the conditioning terms.
batch_multiplier = 1):
latents = tf.random_normal([minibatch_size * batch_multiplier] + G.input_shapes[0][1:])
prob_images = tf.reshape(tf.tile(tf.expand_dims(prob_images, 1), [1, batch_multiplier, 1, 1, 1]), ([-1] + G.input_shapes[3][1:]))
well_facies = tf.reshape(tf.tile(tf.expand_dims(well_facies, 1), [1, batch_multiplier, 1, 1, 1]), ([-1] + G.input_shapes[2][1:]))
labels = tf.reshape(tf.tile(tf.expand_dims(labels, 1), [1, batch_multiplier, 1]), ([-1] + G.input_shapes[1][1:]))
fake_images_out = G.get_output_for(latents, labels, well_facies, prob_images, is_training=True)
reals = tf.reshape(tf.tile(tf.expand_dims(reals, 1), [1, batch_multiplier, 1, 1, 1]), ([-1] + G.input_shapes[3][1:]))
real_scores_out, real_labels_out = fp32(D.get_output_for(reals, is_training=True))
fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
real_scores_out = tfutil.autosummary('Loss_D/real_scores', real_scores_out)
fake_scores_out = tfutil.autosummary('Loss_D/fake_scores', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size * batch_multiplier, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True))
#mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
loss = tfutil.autosummary('Loss_D/WGAN_GP_loss', loss)
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = tfutil.autosummary('Loss_D/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
with tf.name_scope('LabelPenalty'):
label_penalty_reals = tf.nn.l2_loss(labels - real_labels_out)
label_penalty_fakes = tf.nn.l2_loss(labels - fake_labels_out)
label_penalty_reals = tfutil.autosummary('Loss_D/label_penalty_reals', label_penalty_reals)
label_penalty_fakes = tfutil.autosummary('Loss_D/label_penalty_fakes', label_penalty_fakes)
loss += (label_penalty_reals + label_penalty_fakes) * label_weight
loss = tfutil.autosummary('Loss_D/Total_loss', loss)
return loss |
py | b417eaa9a20738d481f4bc9655a5c859e7ff71d3 | """This file defines tokenizer class object.
"""
class Tokenizer(object):
"""Base tokenizer class.
"""
def __init__(self):
pass
def tokenize(self, text):
raise NotImplementedError
class SimpleTokenizer(Tokenizer):
"""Simple version FMM(Forward Maximun Matching) word tokenizer. This tokenizer can only
be used in topic model demo, but not in real business application scenarios.
Notes: This tokenizer can only recognize the words in the corresponding vocab file.
"""
def __init__(self, vocab_path):
super().__init__()
self.__max_word_len = 0
self.__vocab = set()
self.__load_vocab(vocab_path)
def tokenize(self, text):
"""Tokenize the input string `text`, and return the tokenize result.
"""
text_len = len(text)
result = []
i = 0
while i < text_len:
word = found_word = ""
# Deal with English characters.
if self.__is_eng_char(text[i]):
for j in range(i, text_len + 1):
if j < text_len and self.__is_eng_char(text[j]):
word += self.__tolower(text[j])
else:
# Forward matching by character granularity.
if word in self.__vocab:
result.append(word)
i = j - 1
break
else:
for j in range(i, min(i + self.__max_word_len, text_len)):
word += text[j]
if word in self.__vocab:
found_word = word
if len(found_word) > 0:
result.append(found_word)
i += len(found_word) - 1
i += 1
return result
def contains(self, word):
"""Check whether the word is in the vocabulary.
"""
return word in self.__vocab
def __load_vocab(self, vocab_path):
"""Load the word dictionary.
"""
with open(vocab_path, 'r', encoding='utf-8') as fin:
vocab_size = 0
for line in fin.readlines():
fields = line.strip().split('\t')
assert len(fields) >= 2
word = fields[1]
self.__max_word_len = max(self.__max_word_len, len(word))
self.__vocab.add(word)
vocab_size += 1
def __is_eng_char(self, c):
"""Check whether char c is an English character.
"""
return (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z')
def __tolower(self, c):
"""Return the lowercase character of the corresponding character, or return
the original character if there is no corresponding lowercase character.
"""
return c.lower()
class LACTokenizer(Tokenizer):
def __init__(self, vocab_path, lac):
super().__init__()
self.__max_word_len = 0
self.__vocab = set()
self.__lac = lac
self.__load_vocab(vocab_path)
def __load_vocab(self, vocab_path):
"""Load the word dictionary.
"""
with open(vocab_path, 'r', encoding='utf-8') as fin:
vocab_size = 0
for line in fin.readlines():
fields = line.strip().split('\t')
assert len(fields) >= 2
word = fields[1]
self.__max_word_len = max(self.__max_word_len, len(word))
self.__vocab.add(word)
vocab_size += 1
def tokenize(self, text):
results = self.__lac.lexical_analysis(
texts=[text], use_gpu=False, batch_size=1, return_tag=True)
# Change English words to lower case.
# And just preserve the word in vocab.
words = results[0]["word"]
result = []
for word in words:
word = word.lower()
if word in self.__vocab:
result.append(word)
return result
def contains(self, word):
"""Check whether the word is in the vocabulary.
"""
return word in self.__vocab
|
py | b417ed30c4b8a89d47d23af92b643ae9765709c9 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PodSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'active_deadline_seconds': 'int',
'affinity': 'V1Affinity',
'automount_service_account_token': 'bool',
'containers': 'list[V1Container]',
'dns_config': 'V1PodDNSConfig',
'dns_policy': 'str',
'enable_service_links': 'bool',
'ephemeral_containers': 'list[V1EphemeralContainer]',
'host_aliases': 'list[V1HostAlias]',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'hostname': 'str',
'image_pull_secrets': 'list[V1LocalObjectReference]',
'init_containers': 'list[V1Container]',
'node_name': 'str',
'node_selector': 'dict(str, str)',
'os': 'V1PodOS',
'overhead': 'dict(str, str)',
'preemption_policy': 'str',
'priority': 'int',
'priority_class_name': 'str',
'readiness_gates': 'list[V1PodReadinessGate]',
'restart_policy': 'str',
'runtime_class_name': 'str',
'scheduler_name': 'str',
'security_context': 'V1PodSecurityContext',
'service_account': 'str',
'service_account_name': 'str',
'set_hostname_as_fqdn': 'bool',
'share_process_namespace': 'bool',
'subdomain': 'str',
'termination_grace_period_seconds': 'int',
'tolerations': 'list[V1Toleration]',
'topology_spread_constraints': 'list[V1TopologySpreadConstraint]',
'volumes': 'list[V1Volume]'
}
attribute_map = {
'active_deadline_seconds': 'activeDeadlineSeconds',
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'containers': 'containers',
'dns_config': 'dnsConfig',
'dns_policy': 'dnsPolicy',
'enable_service_links': 'enableServiceLinks',
'ephemeral_containers': 'ephemeralContainers',
'host_aliases': 'hostAliases',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'hostname': 'hostname',
'image_pull_secrets': 'imagePullSecrets',
'init_containers': 'initContainers',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'os': 'os',
'overhead': 'overhead',
'preemption_policy': 'preemptionPolicy',
'priority': 'priority',
'priority_class_name': 'priorityClassName',
'readiness_gates': 'readinessGates',
'restart_policy': 'restartPolicy',
'runtime_class_name': 'runtimeClassName',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'service_account': 'serviceAccount',
'service_account_name': 'serviceAccountName',
'set_hostname_as_fqdn': 'setHostnameAsFQDN',
'share_process_namespace': 'shareProcessNamespace',
'subdomain': 'subdomain',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'tolerations': 'tolerations',
'topology_spread_constraints': 'topologySpreadConstraints',
'volumes': 'volumes'
}
def __init__(self, active_deadline_seconds=None, affinity=None, automount_service_account_token=None, containers=None, dns_config=None, dns_policy=None, enable_service_links=None, ephemeral_containers=None, host_aliases=None, host_ipc=None, host_network=None, host_pid=None, hostname=None, image_pull_secrets=None, init_containers=None, node_name=None, node_selector=None, os=None, overhead=None, preemption_policy=None, priority=None, priority_class_name=None, readiness_gates=None, restart_policy=None, runtime_class_name=None, scheduler_name=None, security_context=None, service_account=None, service_account_name=None, set_hostname_as_fqdn=None, share_process_namespace=None, subdomain=None, termination_grace_period_seconds=None, tolerations=None, topology_spread_constraints=None, volumes=None, local_vars_configuration=None): # noqa: E501
"""V1PodSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._active_deadline_seconds = None
self._affinity = None
self._automount_service_account_token = None
self._containers = None
self._dns_config = None
self._dns_policy = None
self._enable_service_links = None
self._ephemeral_containers = None
self._host_aliases = None
self._host_ipc = None
self._host_network = None
self._host_pid = None
self._hostname = None
self._image_pull_secrets = None
self._init_containers = None
self._node_name = None
self._node_selector = None
self._os = None
self._overhead = None
self._preemption_policy = None
self._priority = None
self._priority_class_name = None
self._readiness_gates = None
self._restart_policy = None
self._runtime_class_name = None
self._scheduler_name = None
self._security_context = None
self._service_account = None
self._service_account_name = None
self._set_hostname_as_fqdn = None
self._share_process_namespace = None
self._subdomain = None
self._termination_grace_period_seconds = None
self._tolerations = None
self._topology_spread_constraints = None
self._volumes = None
self.discriminator = None
if active_deadline_seconds is not None:
self.active_deadline_seconds = active_deadline_seconds
if affinity is not None:
self.affinity = affinity
if automount_service_account_token is not None:
self.automount_service_account_token = automount_service_account_token
self.containers = containers
if dns_config is not None:
self.dns_config = dns_config
if dns_policy is not None:
self.dns_policy = dns_policy
if enable_service_links is not None:
self.enable_service_links = enable_service_links
if ephemeral_containers is not None:
self.ephemeral_containers = ephemeral_containers
if host_aliases is not None:
self.host_aliases = host_aliases
if host_ipc is not None:
self.host_ipc = host_ipc
if host_network is not None:
self.host_network = host_network
if host_pid is not None:
self.host_pid = host_pid
if hostname is not None:
self.hostname = hostname
if image_pull_secrets is not None:
self.image_pull_secrets = image_pull_secrets
if init_containers is not None:
self.init_containers = init_containers
if node_name is not None:
self.node_name = node_name
if node_selector is not None:
self.node_selector = node_selector
if os is not None:
self.os = os
if overhead is not None:
self.overhead = overhead
if preemption_policy is not None:
self.preemption_policy = preemption_policy
if priority is not None:
self.priority = priority
if priority_class_name is not None:
self.priority_class_name = priority_class_name
if readiness_gates is not None:
self.readiness_gates = readiness_gates
if restart_policy is not None:
self.restart_policy = restart_policy
if runtime_class_name is not None:
self.runtime_class_name = runtime_class_name
if scheduler_name is not None:
self.scheduler_name = scheduler_name
if security_context is not None:
self.security_context = security_context
if service_account is not None:
self.service_account = service_account
if service_account_name is not None:
self.service_account_name = service_account_name
if set_hostname_as_fqdn is not None:
self.set_hostname_as_fqdn = set_hostname_as_fqdn
if share_process_namespace is not None:
self.share_process_namespace = share_process_namespace
if subdomain is not None:
self.subdomain = subdomain
if termination_grace_period_seconds is not None:
self.termination_grace_period_seconds = termination_grace_period_seconds
if tolerations is not None:
self.tolerations = tolerations
if topology_spread_constraints is not None:
self.topology_spread_constraints = topology_spread_constraints
if volumes is not None:
self.volumes = volumes
@property
def active_deadline_seconds(self):
"""Gets the active_deadline_seconds of this V1PodSpec. # noqa: E501
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. # noqa: E501
:return: The active_deadline_seconds of this V1PodSpec. # noqa: E501
:rtype: int
"""
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
"""Sets the active_deadline_seconds of this V1PodSpec.
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. # noqa: E501
:param active_deadline_seconds: The active_deadline_seconds of this V1PodSpec. # noqa: E501
:type: int
"""
self._active_deadline_seconds = active_deadline_seconds
@property
def affinity(self):
"""Gets the affinity of this V1PodSpec. # noqa: E501
:return: The affinity of this V1PodSpec. # noqa: E501
:rtype: V1Affinity
"""
return self._affinity
@affinity.setter
def affinity(self, affinity):
"""Sets the affinity of this V1PodSpec.
:param affinity: The affinity of this V1PodSpec. # noqa: E501
:type: V1Affinity
"""
self._affinity = affinity
@property
def automount_service_account_token(self):
"""Gets the automount_service_account_token of this V1PodSpec. # noqa: E501
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. # noqa: E501
:return: The automount_service_account_token of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._automount_service_account_token
@automount_service_account_token.setter
def automount_service_account_token(self, automount_service_account_token):
"""Sets the automount_service_account_token of this V1PodSpec.
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. # noqa: E501
:param automount_service_account_token: The automount_service_account_token of this V1PodSpec. # noqa: E501
:type: bool
"""
self._automount_service_account_token = automount_service_account_token
@property
def containers(self):
"""Gets the containers of this V1PodSpec. # noqa: E501
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. # noqa: E501
:return: The containers of this V1PodSpec. # noqa: E501
:rtype: list[V1Container]
"""
return self._containers
@containers.setter
def containers(self, containers):
"""Sets the containers of this V1PodSpec.
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. # noqa: E501
:param containers: The containers of this V1PodSpec. # noqa: E501
:type: list[V1Container]
"""
if self.local_vars_configuration.client_side_validation and containers is None: # noqa: E501
raise ValueError("Invalid value for `containers`, must not be `None`") # noqa: E501
self._containers = containers
@property
def dns_config(self):
"""Gets the dns_config of this V1PodSpec. # noqa: E501
:return: The dns_config of this V1PodSpec. # noqa: E501
:rtype: V1PodDNSConfig
"""
return self._dns_config
@dns_config.setter
def dns_config(self, dns_config):
"""Sets the dns_config of this V1PodSpec.
:param dns_config: The dns_config of this V1PodSpec. # noqa: E501
:type: V1PodDNSConfig
"""
self._dns_config = dns_config
@property
def dns_policy(self):
"""Gets the dns_policy of this V1PodSpec. # noqa: E501
Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501
:return: The dns_policy of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
"""Sets the dns_policy of this V1PodSpec.
Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501
:param dns_policy: The dns_policy of this V1PodSpec. # noqa: E501
:type: str
"""
self._dns_policy = dns_policy
@property
def enable_service_links(self):
"""Gets the enable_service_links of this V1PodSpec. # noqa: E501
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501
:return: The enable_service_links of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._enable_service_links
@enable_service_links.setter
def enable_service_links(self, enable_service_links):
"""Sets the enable_service_links of this V1PodSpec.
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501
:param enable_service_links: The enable_service_links of this V1PodSpec. # noqa: E501
:type: bool
"""
self._enable_service_links = enable_service_links
@property
def ephemeral_containers(self):
"""Gets the ephemeral_containers of this V1PodSpec. # noqa: E501
List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. # noqa: E501
:return: The ephemeral_containers of this V1PodSpec. # noqa: E501
:rtype: list[V1EphemeralContainer]
"""
return self._ephemeral_containers
@ephemeral_containers.setter
def ephemeral_containers(self, ephemeral_containers):
"""Sets the ephemeral_containers of this V1PodSpec.
List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. # noqa: E501
:param ephemeral_containers: The ephemeral_containers of this V1PodSpec. # noqa: E501
:type: list[V1EphemeralContainer]
"""
self._ephemeral_containers = ephemeral_containers
@property
def host_aliases(self):
"""Gets the host_aliases of this V1PodSpec. # noqa: E501
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. # noqa: E501
:return: The host_aliases of this V1PodSpec. # noqa: E501
:rtype: list[V1HostAlias]
"""
return self._host_aliases
@host_aliases.setter
def host_aliases(self, host_aliases):
"""Sets the host_aliases of this V1PodSpec.
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. # noqa: E501
:param host_aliases: The host_aliases of this V1PodSpec. # noqa: E501
:type: list[V1HostAlias]
"""
self._host_aliases = host_aliases
@property
def host_ipc(self):
"""Gets the host_ipc of this V1PodSpec. # noqa: E501
Use the host's ipc namespace. Optional: Default to false. # noqa: E501
:return: The host_ipc of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
"""Sets the host_ipc of this V1PodSpec.
Use the host's ipc namespace. Optional: Default to false. # noqa: E501
:param host_ipc: The host_ipc of this V1PodSpec. # noqa: E501
:type: bool
"""
self._host_ipc = host_ipc
@property
def host_network(self):
"""Gets the host_network of this V1PodSpec. # noqa: E501
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. # noqa: E501
:return: The host_network of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""Sets the host_network of this V1PodSpec.
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. # noqa: E501
:param host_network: The host_network of this V1PodSpec. # noqa: E501
:type: bool
"""
self._host_network = host_network
@property
def host_pid(self):
"""Gets the host_pid of this V1PodSpec. # noqa: E501
Use the host's pid namespace. Optional: Default to false. # noqa: E501
:return: The host_pid of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
"""Sets the host_pid of this V1PodSpec.
Use the host's pid namespace. Optional: Default to false. # noqa: E501
:param host_pid: The host_pid of this V1PodSpec. # noqa: E501
:type: bool
"""
self._host_pid = host_pid
@property
def hostname(self):
"""Gets the hostname of this V1PodSpec. # noqa: E501
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. # noqa: E501
:return: The hostname of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""Sets the hostname of this V1PodSpec.
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. # noqa: E501
:param hostname: The hostname of this V1PodSpec. # noqa: E501
:type: str
"""
self._hostname = hostname
@property
def image_pull_secrets(self):
"""Gets the image_pull_secrets of this V1PodSpec. # noqa: E501
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod # noqa: E501
:return: The image_pull_secrets of this V1PodSpec. # noqa: E501
:rtype: list[V1LocalObjectReference]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""Sets the image_pull_secrets of this V1PodSpec.
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod # noqa: E501
:param image_pull_secrets: The image_pull_secrets of this V1PodSpec. # noqa: E501
:type: list[V1LocalObjectReference]
"""
self._image_pull_secrets = image_pull_secrets
@property
def init_containers(self):
"""Gets the init_containers of this V1PodSpec. # noqa: E501
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ # noqa: E501
:return: The init_containers of this V1PodSpec. # noqa: E501
:rtype: list[V1Container]
"""
return self._init_containers
@init_containers.setter
def init_containers(self, init_containers):
"""Sets the init_containers of this V1PodSpec.
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ # noqa: E501
:param init_containers: The init_containers of this V1PodSpec. # noqa: E501
:type: list[V1Container]
"""
self._init_containers = init_containers
@property
def node_name(self):
"""Gets the node_name of this V1PodSpec. # noqa: E501
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. # noqa: E501
:return: The node_name of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1PodSpec.
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. # noqa: E501
:param node_name: The node_name of this V1PodSpec. # noqa: E501
:type: str
"""
self._node_name = node_name
@property
def node_selector(self):
"""Gets the node_selector of this V1PodSpec. # noqa: E501
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
:return: The node_selector of this V1PodSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this V1PodSpec.
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
:param node_selector: The node_selector of this V1PodSpec. # noqa: E501
:type: dict(str, str)
"""
self._node_selector = node_selector
@property
def os(self):
"""Gets the os of this V1PodSpec. # noqa: E501
:return: The os of this V1PodSpec. # noqa: E501
:rtype: V1PodOS
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this V1PodSpec.
:param os: The os of this V1PodSpec. # noqa: E501
:type: V1PodOS
"""
self._os = os
@property
def overhead(self):
"""Gets the overhead of this V1PodSpec. # noqa: E501
Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md # noqa: E501
:return: The overhead of this V1PodSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._overhead
@overhead.setter
def overhead(self, overhead):
"""Sets the overhead of this V1PodSpec.
Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md # noqa: E501
:param overhead: The overhead of this V1PodSpec. # noqa: E501
:type: dict(str, str)
"""
self._overhead = overhead
@property
def preemption_policy(self):
"""Gets the preemption_policy of this V1PodSpec. # noqa: E501
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
:return: The preemption_policy of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._preemption_policy
@preemption_policy.setter
def preemption_policy(self, preemption_policy):
"""Sets the preemption_policy of this V1PodSpec.
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
:param preemption_policy: The preemption_policy of this V1PodSpec. # noqa: E501
:type: str
"""
self._preemption_policy = preemption_policy
@property
def priority(self):
"""Gets the priority of this V1PodSpec. # noqa: E501
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501
:return: The priority of this V1PodSpec. # noqa: E501
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this V1PodSpec.
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501
:param priority: The priority of this V1PodSpec. # noqa: E501
:type: int
"""
self._priority = priority
@property
def priority_class_name(self):
"""Gets the priority_class_name of this V1PodSpec. # noqa: E501
If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
:return: The priority_class_name of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._priority_class_name
@priority_class_name.setter
def priority_class_name(self, priority_class_name):
"""Sets the priority_class_name of this V1PodSpec.
If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
:param priority_class_name: The priority_class_name of this V1PodSpec. # noqa: E501
:type: str
"""
self._priority_class_name = priority_class_name
@property
def readiness_gates(self):
"""Gets the readiness_gates of this V1PodSpec. # noqa: E501
If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates # noqa: E501
:return: The readiness_gates of this V1PodSpec. # noqa: E501
:rtype: list[V1PodReadinessGate]
"""
return self._readiness_gates
@readiness_gates.setter
def readiness_gates(self, readiness_gates):
"""Sets the readiness_gates of this V1PodSpec.
If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates # noqa: E501
:param readiness_gates: The readiness_gates of this V1PodSpec. # noqa: E501
:type: list[V1PodReadinessGate]
"""
self._readiness_gates = readiness_gates
@property
def restart_policy(self):
"""Gets the restart_policy of this V1PodSpec. # noqa: E501
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501
:return: The restart_policy of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""Sets the restart_policy of this V1PodSpec.
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501
:param restart_policy: The restart_policy of this V1PodSpec. # noqa: E501
:type: str
"""
self._restart_policy = restart_policy
@property
def runtime_class_name(self):
"""Gets the runtime_class_name of this V1PodSpec. # noqa: E501
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class # noqa: E501
:return: The runtime_class_name of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._runtime_class_name
@runtime_class_name.setter
def runtime_class_name(self, runtime_class_name):
"""Sets the runtime_class_name of this V1PodSpec.
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class # noqa: E501
:param runtime_class_name: The runtime_class_name of this V1PodSpec. # noqa: E501
:type: str
"""
self._runtime_class_name = runtime_class_name
@property
def scheduler_name(self):
"""Gets the scheduler_name of this V1PodSpec. # noqa: E501
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. # noqa: E501
:return: The scheduler_name of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._scheduler_name
@scheduler_name.setter
def scheduler_name(self, scheduler_name):
"""Sets the scheduler_name of this V1PodSpec.
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. # noqa: E501
:param scheduler_name: The scheduler_name of this V1PodSpec. # noqa: E501
:type: str
"""
self._scheduler_name = scheduler_name
@property
def security_context(self):
"""Gets the security_context of this V1PodSpec. # noqa: E501
:return: The security_context of this V1PodSpec. # noqa: E501
:rtype: V1PodSecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1PodSpec.
:param security_context: The security_context of this V1PodSpec. # noqa: E501
:type: V1PodSecurityContext
"""
self._security_context = security_context
@property
def service_account(self):
"""Gets the service_account of this V1PodSpec. # noqa: E501
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501
:return: The service_account of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._service_account
@service_account.setter
def service_account(self, service_account):
"""Sets the service_account of this V1PodSpec.
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501
:param service_account: The service_account of this V1PodSpec. # noqa: E501
:type: str
"""
self._service_account = service_account
@property
def service_account_name(self):
"""Gets the service_account_name of this V1PodSpec. # noqa: E501
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
:return: The service_account_name of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""Sets the service_account_name of this V1PodSpec.
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
:param service_account_name: The service_account_name of this V1PodSpec. # noqa: E501
:type: str
"""
self._service_account_name = service_account_name
@property
def set_hostname_as_fqdn(self):
"""Gets the set_hostname_as_fqdn of this V1PodSpec. # noqa: E501
If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. # noqa: E501
:return: The set_hostname_as_fqdn of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._set_hostname_as_fqdn
@set_hostname_as_fqdn.setter
def set_hostname_as_fqdn(self, set_hostname_as_fqdn):
"""Sets the set_hostname_as_fqdn of this V1PodSpec.
If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. # noqa: E501
:param set_hostname_as_fqdn: The set_hostname_as_fqdn of this V1PodSpec. # noqa: E501
:type: bool
"""
self._set_hostname_as_fqdn = set_hostname_as_fqdn
@property
def share_process_namespace(self):
"""Gets the share_process_namespace of this V1PodSpec. # noqa: E501
Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. # noqa: E501
:return: The share_process_namespace of this V1PodSpec. # noqa: E501
:rtype: bool
"""
return self._share_process_namespace
@share_process_namespace.setter
def share_process_namespace(self, share_process_namespace):
"""Sets the share_process_namespace of this V1PodSpec.
Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. # noqa: E501
:param share_process_namespace: The share_process_namespace of this V1PodSpec. # noqa: E501
:type: bool
"""
self._share_process_namespace = share_process_namespace
@property
def subdomain(self):
"""Gets the subdomain of this V1PodSpec. # noqa: E501
If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all. # noqa: E501
:return: The subdomain of this V1PodSpec. # noqa: E501
:rtype: str
"""
return self._subdomain
@subdomain.setter
def subdomain(self, subdomain):
"""Sets the subdomain of this V1PodSpec.
If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all. # noqa: E501
:param subdomain: The subdomain of this V1PodSpec. # noqa: E501
:type: str
"""
self._subdomain = subdomain
@property
def termination_grace_period_seconds(self):
"""Gets the termination_grace_period_seconds of this V1PodSpec. # noqa: E501
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # noqa: E501
:return: The termination_grace_period_seconds of this V1PodSpec. # noqa: E501
:rtype: int
"""
return self._termination_grace_period_seconds
@termination_grace_period_seconds.setter
def termination_grace_period_seconds(self, termination_grace_period_seconds):
"""Sets the termination_grace_period_seconds of this V1PodSpec.
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # noqa: E501
:param termination_grace_period_seconds: The termination_grace_period_seconds of this V1PodSpec. # noqa: E501
:type: int
"""
self._termination_grace_period_seconds = termination_grace_period_seconds
@property
def tolerations(self):
"""Gets the tolerations of this V1PodSpec. # noqa: E501
If specified, the pod's tolerations. # noqa: E501
:return: The tolerations of this V1PodSpec. # noqa: E501
:rtype: list[V1Toleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this V1PodSpec.
If specified, the pod's tolerations. # noqa: E501
:param tolerations: The tolerations of this V1PodSpec. # noqa: E501
:type: list[V1Toleration]
"""
self._tolerations = tolerations
@property
def topology_spread_constraints(self):
"""Gets the topology_spread_constraints of this V1PodSpec. # noqa: E501
TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. # noqa: E501
:return: The topology_spread_constraints of this V1PodSpec. # noqa: E501
:rtype: list[V1TopologySpreadConstraint]
"""
return self._topology_spread_constraints
@topology_spread_constraints.setter
def topology_spread_constraints(self, topology_spread_constraints):
"""Sets the topology_spread_constraints of this V1PodSpec.
TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. # noqa: E501
:param topology_spread_constraints: The topology_spread_constraints of this V1PodSpec. # noqa: E501
:type: list[V1TopologySpreadConstraint]
"""
self._topology_spread_constraints = topology_spread_constraints
@property
def volumes(self):
"""Gets the volumes of this V1PodSpec. # noqa: E501
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501
:return: The volumes of this V1PodSpec. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this V1PodSpec.
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501
:param volumes: The volumes of this V1PodSpec. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodSpec):
return True
return self.to_dict() != other.to_dict()
|
py | b417edac23e81f9adc137f5ecabbdffe5d73d9eb | import datetime as dt
import os
import warnings
import sqlalchemy as sa
import pcse
from pcse import settings
from pcse.db.pcse import fetch_cropdata, fetch_sitedata, fetch_soildata, GridWeatherDataProvider, \
AgroManagementDataProvider
from pcse.base_classes import ParameterProvider
db_location = os.path.join(settings.PCSE_USER_HOME,"pcse.db")
db_location = os.path.normpath(db_location)
dsn = "sqlite:///" + db_location
db_engine = sa.create_engine(dsn)
db_metadata = sa.MetaData(db_engine)
grid = 31031
crop = 1
year = 2000
# Get input parameters from database
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sited = fetch_sitedata(db_metadata, grid, year)
cropd = fetch_cropdata(db_metadata, grid, year, crop)
soild = fetch_soildata(db_metadata, grid)
parameters = ParameterProvider(sitedata=sited, cropdata=cropd, soildata=soild)
# Get Agromanagement
agromanagement = AgroManagementDataProvider(db_engine, grid, crop, year)
start_date = list(agromanagement[0].keys())[0]
end_date = start_date + dt.timedelta(days=365)
weather = GridWeatherDataProvider(db_engine, grid_no=grid, start_date=start_date, end_date=end_date)
|
py | b417ee380c0519e61d4445092042f5a078665f93 | # encoding: UTF-8
import json
import logging
import requests
########################################################################
class dingApi ():
BASE_API_URL = 'https://oapi.dingtalk.com/robot/send?access_token='
#钉钉token
DEFAULT_TOKEN = ''
JSON_HEADER = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json',
'Content-Type': 'application/json;charset=utf-8'
}
def __init__(self,token=None):
if token != None:
self.BASE_API_URL = self.BASE_API_URL + token
else:
self.BASE_API_URL = self.BASE_API_URL + self.DEFAULT_TOKEN
def msg(self, content, atAll=True):
if not content or not self.DEFAULT_TOKEN: return
text = {'msgtype': 'text', 'text': {'content':content}, 'at': {'isAtAll':atAll}}
text = json.dumps(text)
response = requests.post(self.BASE_API_URL, data=text, headers=self.JSON_HEADER)
self._response(response)
def data(self, content, atAll=True):
if not content or not self.DEFAULT_TOKEN: return
title = content.get('title', '!!!警告!!!')
msg_str = "# "+title+"\r\n------"
for msg in content:
if msg == 'title':
continue
msg_str = msg_str+"\r\n#### "+str(msg)+" : "+str(content[msg])
text = {'msgtype':'markdown', 'markdown': {'title': title, 'text': msg_str}, 'at': {'isAtAll': atAll}}
text = json.dumps(text)
response = requests.post(self.BASE_API_URL, data=text, headers=self.JSON_HEADER)
self._response(response)
def _response(self,response):
if response.status_code == 200:
try:
text = json.loads(response.text)
if text.get('errmsg') != 'ok':
logging.error('DingApi send failed: '+text['errmsg'])
except:
logging.error('DingApi is fail')
else:
logging.error('DingApi error status_code: '+str(response.status_code))
if __name__ == '__main__':
ding = dingApi()
ding.msg('操作失败')
ding.data({
"msg": "操作失败"
})
|
py | b417ee47a148f106da9e00f952a94ef62a7e793c | from unittest import TestCase
from src.util.get_embeddings import get_embeddings
from src.tests._data_test import DataTest
from scipy import spatial
import random
from src.util.config import *
import logging
class Test(TestCase):
run_configuration()
embeddings_dict = DataTest().embeddings_dict()
def test_parse_embedding_pickle(self):
embeddings_dict = Test.embeddings_dict
if embeddings_dict:
a_word = random.choice(list(embeddings_dict.keys()))
distance = spatial.distance.euclidean(embeddings_dict[a_word], embeddings_dict[a_word])
self.assertEqual(distance, 0)
else:
logging.info('Test Not Fail - Embedding data not found, check embedding data path')
pass
def test_get_embeddings(self):
embeddings_dict = Test.embeddings_dict
if embeddings_dict:
word2idx, idx2word, vectors = get_embeddings(embeddings_dict)
a_word = "king"
idx_of_word = word2idx[a_word]
word_of_idx = idx2word[idx_of_word]
vector_dims = [len(i) for i in vectors]
self.assertEqual(a_word, word_of_idx)
self.assertEqual(vector_dims[0], DataTest().embedding_dim)
self.assertTrue(all(i == DataTest().embedding_dim for i in vector_dims))
self.assertEqual(DataTest().embedding_dim, vectors.size()[1])
else:
logging.info('Test Not Fail - Embedding data not found, check embedding data path')
pass
def test_torch_glove_embeddings(self):
embeddings = DataTest().compare_embedding_dicts()
if embeddings:
embedding_dict_1 = embeddings[0]
embedding_dict_2 = embeddings[1]
a_word = "queen"
self.assertIn(a_word, embedding_dict_2.keys())
a_vector_1 = embedding_dict_1[a_word]
a_vector_2 = embedding_dict_2[a_word]
self.assertEqual(a_vector_1.all(), a_vector_2.all())
else:
logging.info('Test Not Fail - Embedding data not found, check embedding data path')
pass
|
py | b417eebc30b578223b40deda80d59947187430d4 |
# python standard library
from collections import OrderedDict
import textwrap
import importlib
# third party
from configobj import ConfigObj
from validate import Validator
# this package
from base_plugin import BasePlugin, BaseConfiguration
from theape.parts.dummy.dummy import DummyClass
from theape.parts.dummy.dummy import CrashDummy
from theape.parts.dummy.dummy import HangingDummy
DESCRIPTION = """{bold}DummyClass{reset} logs its calls and then returns. It is meant to be used as changes are made to the infrastructure so that the {blue}Ape{reset} can be tested without using any other components."""
EXAMPLES = """{bold}dummy(){reset}"""
NOTE = "The {bold}DummyClass{reset} will change as the infrastructure changes. In particular the building and testing of plugins and components will likely evolve once real plugins are created."
output_documentation = __name__ == '__builtin__'
class Dummy(BasePlugin):
"""
A plugin to test the infrastructure (a no-op)
"""
def __init__(self, *args, **kwargs):
super(Dummy, self).__init__(*args, **kwargs)
return
@property
def sections(self):
"""
An ordered dict for the help page
"""
if self._sections is None:
bold = "{bold}"
reset = '{reset}'
self._sections = OrderedDict()
self._sections['Name'] = (bold + 'DummyClass' + reset +
' -- a no-op')
self._sections['Description'] = DESCRIPTION
self._sections['Example'] = EXAMPLES
self._sections['Note'] = NOTE
return self._sections
@property
def product(self):
"""
builds and returns a DummyClass
:precondition: self.configuration map has been set
"""
if self._product is None:
kwargs = self.configuration[self.section_header]
self._product = DummyClass(**kwargs)
return self._product
def fetch_config(self):
"""
prints a sample configuration
"""
print(textwrap.dedent("""
[[DUMMY]]
# the section name is arbitrary but must match the name in the [APE] section
# so the 'plugin' option is what actually specifies the plugin
plugin = Dummy
# the dummy will take anything you set here and log it
any_arbitrary_option = any_arbitrary_value
another_option = another_value"""))
return
# end class Dummy
class CrashTestDummyConstants(object):
__slots__ = ()
error_module_option = 'error_module'
error_option = 'error'
error_message_option = 'error_message'
function_option = 'function'
error_module_default = 'exceptions'
error_default = 'Exception'
error_message_default = 'My work is done, why wait?'
function_default = '__call__'
crash_configspec = """
plugin = option('CrashTestDummy')
error_module = string(default='exceptions')
error = string(default='Exception')
error_message = string(default='My work is done, why wait?')
function = string(default='__call__')
"""
class CrashTestDummyConfiguration(BaseConfiguration):
"""
Translates the configobj configuration to a CrashTestDummy
"""
def __init__(self, *args, **kwargs):
"""
CrashTestDummyConfiguration
:param:
- `section_name`: name in the configuration with settings
- `configuration`: dict of configuration values
"""
super(CrashTestDummyConfiguration, self).__init__(*args, **kwargs)
return
@property
def configspec_source(self):
"""
the configuration specification source
"""
if self._configspec_source is None:
self._configspec_source = crash_configspec
return self._configspec_source
@property
def product(self):
"""
A crash test dummy
"""
if self._product is None:
error_module = self.configuration['error_module']
error = self.configuration['error']
module = importlib.import_module(error_module)
err = getattr(module, error)
self.configuration['error'] = err
self._product = CrashDummy(**self.configuration)
return self._product
# end class CrashTestDummyConfiguration
class CrashTestDummy(BasePlugin):
"""
A plugin to test the infrastructure by crashing
"""
def __init__(self, *args, **kwargs):
super(CrashTestDummy, self).__init__(*args, **kwargs)
self._config_builder = None
return
@property
def config_builder(self):
"""
A CrashTestDummy configuration
"""
if self._config_builder is None:
self._config_builder = CrashTestDummyConfiguration(section_name=self.section_header,
source=self.configuration)
return self._config_builder
@property
def sections(self):
"""
An ordered dict for the help page
"""
if self._sections is None:
bold = "{bold}"
reset = '{reset}'
self._sections = OrderedDict()
self._sections['Name'] = (bold + 'CrashTestDummy' + reset +
' -- a crashing module')
self._sections['Description'] = (DESCRIPTION.replace('DummyClass',
'CrashTestDummy') +
"\It takes an error to raise as an argument and raises it when called.")
self._sections['Example'] = EXAMPLES.replace('dummy',
'crashtestdummy')
self._sections['Note'] = NOTE
return self._sections
@property
def product(self):
"""
builds and returns a CrashTestDummy
:precondition: self.configuration map has been set
:return: CrashTestDummy
"""
if self._product is None:
self._product = self.config_builder.product
return self._product
def fetch_config(self):
"""
prints a message saying there is no configuration
"""
print(textwrap.dedent("""
[[CRASHTESTDUMMY]]
plugin = CrashTestDummy
# the dummy will take anything you set here and log it
any_arbitrary_option = any_arbitrary_value
another_option = another_value
# but these will set the error to raise
# if these are missing will default to Exception
error_message = AUUUUUUUGGGGGHHHHHHH
error_module = exceptions
error = RuntimeError
# to have it crash somewhere specific in the component interface
# can be any attribute you want to call
# e.g. function = umma will crash if dummy.umma() is called or dummy.umma
function = __call__"""))
return
# end class CrashTestDummy
stuck_dummy_configspec = """
plugin = option('StuckDummy)
__many__ = string
"""
class StuckDummyConfiguration(BaseConfiguration):
"""
Configuration builder for the dummy that hangs
:param:
- `section_name`: name in the configuration with settings
- `configuration`: dict of configuration values
"""
def __init__(self, *args, **kwargs):
super(StuckDummyConfiguration, self).__init__(*args, **kwargs)
return
@property
def configspec_source(self):
"""
configuration specification source
"""
if self._configspec_source is None:
self._configspec_source = stuck_dummy_configspec
return self._configspec_source
@property
def product(self):
"""
The HangingDummy callable object
"""
if self._product is None:
self._product = HangingDummy(**self.configuration)
return self._product
# end class StuckDummyConfiguration
class StuckDummy(BasePlugin):
"""
A plugin to test the infrastructure by hanging
"""
def __init__(self, *args, **kwargs):
super(StuckDummy, self).__init__(*args, **kwargs)
self._config_builder = None
return
@property
def config_builder(self):
"""
Stuck Dummy Configuration
"""
if self._config_builder is None:
self._config_builder = StuckDummyConfiguration(source=self.configuration,
section_name=self.section_header)
return self._config_builder
@property
def sections(self):
"""
An ordered dict for the help page
"""
if self._sections is None:
bold = "{bold}"
reset = '{reset}'
self._sections = OrderedDict()
self._sections['Name'] = (bold + 'StuckDummy' + reset +
' -- a stuck (hanging) module')
self._sections['Description'] = (DESCRIPTION.replace('DummyClass',
'StuckDummy') +
"Sleeps for three years at a time in an infinite loop.")
self._sections['Example'] = EXAMPLES.replace('dummy',
'stuckdummy')
self._sections['Note'] = NOTE
return self._sections
@property
def product(self):
"""
builds and returns a HangingDummy
:precondition: self.configuration map has been set
"""
if self._product is None:
# get the random inputs and create a keyword-argument- dictionary
self._product = self.config_builder.product
return self._product
def fetch_config(self):
"""
prints a message saying there is no configuration
"""
print(textwrap.dedent("""
[[HANGINGDUMMY]]
# the dummy will take anything you set here and log it
any_arbitrary_option = any_arbitrary_value
another_option = another_value
"""))
return
# end class HangingDummy |
py | b417ef46c961c22940926308c8b56bc7b537b2b6 | from .BaseTranslator import BaseTranslator
from .GenericTranslator import GenericTranslator
from .MySQLTranslator import MySQLTranslator |
py | b417ef77b9bfaf768dc82bdc3e1ce3ce2a8407fc | #!/usr/bin/python3
# INTEL CONFIDENTIAL
# Copyright 2018-2019 Intel Corporation All Rights Reserved.
# The source code contained or described herein and all documents related to the
# source code ("Material") are owned by Intel Corporation or its suppliers or
# licensors. Title to the Material remains with Intel Corporation or its
# suppliers and licensors. The Material may contain trade secrets and proprietary
# and confidential information of Intel Corporation and its suppliers and
# licensors, and is protected by worldwide copyright and trade secret laws and
# treaty provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed, or disclosed
# in any way without Intel's prior express written permission.
# No license under any patent, copyright, trade secret or other intellectual
# property right is granted to or conferred upon you by disclosure or delivery of
# the Materials, either expressly, by implication, inducement, estoppel or
# otherwise. Any license under such intellectual property rights must be express
# and approved by Intel in writing.
# Include any supplier copyright notices as supplier requires Intel to use.
# Include supplier trademarks or logos as supplier requires Intel to use,
# preceded by an asterisk. An asterisked footnote can be added as follows:
# *Third Party trademarks are the property of their respective owners.
# Unless otherwise agreed by Intel in writing, you may not remove or alter
# this notice or any other notice embedded in Materials by Intel or Intel's
# suppliers or licensors in any way.
import requests
import jenkins
import logging
from retrying import retry
# Logging
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
_RETRY_LIMIT = 3
_RETRY_COOLDOWN_MS = 5000
class JenkinsWrapper:
"""Class wrapping Python-Jenkins API.
The purpose of this class is to wrap methods from Python-Jenkins API used in Watchdog, for less error-prone and
more convenient use. Docs for used API, including wrapped methods can be found at:
https://python-jenkins.readthedocs.io/en/latest/
:param jenkins_token: Token used for Jenkins
:param jenkins_user: Username used to connect to Jenkins
:param jenkins_server: Jenkins server address
:type jenkins_token: String
:type jenkins_user: String
:type jenkins_server: String
"""
def __init__(self, jenkins_token, jenkins_user, jenkins_server):
self.jenkins_server = jenkins_server
self.jenkins = jenkins.Jenkins(jenkins_server, username=jenkins_user,
password=jenkins_token)
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
def get_build_console_output(self, job_name, build_number):
return self.jenkins.get_build_console_output(job_name, build_number)
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
def get_job_info(self, job_name):
return self.jenkins.get_job_info(job_name)
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
def get_build_info(self, job_name, build_number):
return self.jenkins.get_build_info(job_name, build_number)
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
def get_queue_item(self, queue_id):
"""Attempt to retrieve Jenkins job queue item.
Exception communicating queue doesn't exist is expected,
in that case method returns empty dict.
:param queue_id: Jenkins job queue ID number
:type queue_id: int
:return: Dictionary representing Jenkins job queue item
:rtype: dict
"""
try:
return self.jenkins.get_queue_item(queue_id)
except Exception as e:
# Exception 'queue does not exist' is expected behaviour when job is running
if 'queue' in str(e) and 'does not exist' in str(e):
return {}
else:
raise
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
def get_idle_ci_hosts(self):
"""Query Jenkins for idle servers.
Send GET request to Jenkins server, querying for idle servers labeled
for nGraph-ONNX CI job.
:return: Number of idle hosts delegated to nGraph-ONNX CI
:rtype: int
"""
jenkins_request_url = self.jenkins_server + 'label/ci&&onnx/api/json?pretty=true'
try:
log.info('Sending request to Jenkins: %s', jenkins_request_url)
r = requests.Request(method='GET', url=jenkins_request_url, verify=False)
response = self.jenkins.jenkins_request(r).json()
return int(response['totalExecutors']) - int(response['busyExecutors'])
except Exception as e:
log.exception('Failed to send request to Jenkins!\nException message: %s', str(e))
raise
|
py | b417ef8deb8d7245cab8ff8b0a83f01d9faed210 | #!/usr/bin/python3
# <xbar.title>OddsShark Bar</xbar.title>
# <xbar.version>v1.0</xbar.version>
# <xbar.author>Joel Goodbody</xbar.author>
# <xbar.author.github>jgoodbody</xbar.author.github>
# <xbar.desc>Displays OddsShark Info</xbar.desc>
# <xbar.image>https://www.bigonsports.com/wp-content/uploads/2016/10/sports-betting-odds-explained.jpg</xbar.image>
# <xbar.dependencies>python</xbar.dependencies>
# <xbar.abouturl>https://github.com/jgoodbody/oddsshark-bar</xbar.abouturl>
# <xbar.var>select(VAR_FUTUREODDS="Bovada"): How to sort future odds? [Team, Opening, Bovada, BetOnline, SportsBetting]</xbar.var>
# <xbar.var>boolean(VAR_NFL=true): NFL odds?</xbar.var>
# <xbar.var>boolean(VAR_MLB=true): MLB odds?</xbar.var>
# <xbar.var>boolean(VAR_NBA=true): NBA odds?</xbar.var>
# <xbar.var>boolean(VAR_NHL=true): NHL odds?</xbar.var>
# <xbar.var>boolean(VAR_UFC=true): UFC odds?</xbar.var>
# <xbar.var>boolean(VAR_NCAAF=true): College Football odds?</xbar.var>
# <xbar.var>boolean(VAR_NCAAB=true): College Basketball odds?</xbar.var>
import os
import requests
import collections
from bs4 import BeautifulSoup
# For monospaced font
font = '| font=Courier | size=14'
site = 'https://www.oddsshark.com'
lgs = collections.defaultdict(dict)
lgs['UFC']['odds'] = requests.get(
'https://io.oddsshark.com/ticker/ufc',
headers = {
'referer': site
}
)
for league, active in lgs['UFC']['odds'].json()['leagues'].items():
if os.environ.get('VAR_'+league.upper()) == 'true':
lgs[league.upper()]['futures'] = BeautifulSoup(requests.get(f"{site}/{league}/odds/futures").content, 'html.parser')
if active==True:
lgs[league.upper()]['odds'] = requests.get(
'https://io.oddsshark.com/ticker/' + league,
headers = {
'referer': site
}
).json()['matchups']
class Future_Odds:
def __init__(self, future, team, opening, bovada, betonline, sportsbetting):
self.future = future
self.team = team
self.opening = opening
self.bovada = bovada
self.betonline = betonline
self.sportsbetting = sportsbetting
def __repr__(self):
return repr((self.future, self.team, self.opening, self.bovada, self.betonline, self.sportsbetting))
def sorting_provider(x, provider):
odd = getattr(x, provider)
if odd == '':
return float('inf')
else:
return int(odd)
def simple_odds(odds):
for game in odds:
if game['type'] == 'date':
print('--', game['date']['fullday'], game['date']['month'], game['date']['day'], font)
if game["type"] == "matchup":
if 'matchup_link' in game:
print('----', game['status'], font, '| href=' + site + game['matchup_link'])
else:
print('----', game['status'], font)
if game['status'].startswith('FINAL'):
print('----', game['away_short_name'], game['away_score'], font)
print('----', game['home_short_name'], game['home_score'], font)
else:
print('----', game['away_short_name'], game['away_odds'] if game['away_odds'].startswith('-') else '+' + game['away_odds'], font)
print('----', game['home_short_name'], game['home_odds'] if game['home_odds'].startswith('-') else '+' + game['home_odds'], font)
def ufc_odds(odds):
print('UFC', font)
for fight in odds:
if fight['type'] == 'event':
print('--', fight['event'], font)
if fight["type"] == 'matchup':
if not fight['status']:
print('----', fight['event_date'][11:], font)
print('----', fight['away_name'], fight['away_odds'] if fight['away_odds'].startswith('-') else '+' + fight['away_odds'], font)
print('----', fight['home_name'], fight['home_odds'] if fight['home_odds'].startswith('-') else '+' + fight['home_odds'], font)
else:
print('----', 'FINAL', font)
print('----', fight['away_name'], fight['status'] if fight['away_name'] == fight['winner'] else '', font)
print('----', fight['home_name'], fight['status'] if fight['home_name'] == fight['winner'] else '', font)
def process_odds_section(soup, html_type, html_class):
data = soup.find_all(html_type, class_=html_class)
init_list = []
daysofweek = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for elem in data:
if not elem.get_text().startswith(tuple(daysofweek)):
init_list.append(elem.get_text().strip())
return init_list
def create_futures_data(teams, opening, currents):
futures_trios = []
for a, b, c in zip(*[iter(currents)]*3):
futures_trios.append([a,b,c])
rows = []
odds_inc = 0
sports = ['MLB','NFL','NHL','NBA','College Basketball','College Football']
for team in teams:
if not team.startswith(tuple(sports)):
rows.append(Future_Odds(future, team,
opening[odds_inc],
futures_trios[odds_inc][0],
futures_trios[odds_inc][1],
futures_trios[odds_inc][2]))
odds_inc += 1
else:
rows.append([team])
future = team
return rows
def print_all_odds(lgs):
for sport in lgs:
if sport not in ['UFC','SOCCER']:
print(sport, font)
if 'odds' in lgs[sport]:
simple_odds(lgs[sport]['odds'])
print('-- Futures', font)
elif sport == 'UFC':
ufc_odds(lgs[sport]['odds'])
opening_odds = process_odds_section(lgs[sport]['futures'], 'div', ['op-item op-future-item', 'op-item op-future-item '])
current_odds = process_odds_section(lgs[sport]['futures'], 'div', 'op-item op-future-item no-vegas')
team_list = process_odds_section(lgs[sport]['futures'], ['div','span'], ['align-bottom',
'op-team baseball op-odd',
'op-team baseball op-even',
'op-team basketball op-odd',
'op-team basketball op-even',
'op-team football op-odd',
'op-team football op-even',
'op-team hockey op-odd',
'op-team hockey op-even'])
all_odds = create_futures_data(team_list, opening_odds, current_odds)
for odds in all_odds:
if type(odds) is list:
print('----', odds[0], font)
print('------ {0:<21}{1:>8}{2:>8}{3:>14}{4:>15}{5}'.format('Team','Opening','Bovada','BetOnline.AG','Sports Betting',font))
futures = [fut for fut in all_odds if hasattr(fut, 'future') and fut.future == odds[0]]
futures = sorted(futures, key=lambda x: sorting_provider(x, os.environ.get('VAR_FUTUREODDS').lower()))
for odds in futures:
print('------ {0:<21}{1:>8}{2:>8}{3:>14}{4:>15}{5}'.format(odds.team, odds.opening, odds.bovada, odds.betonline, odds.sportsbetting, font))
print('OddsShark Bar', font)
print('---')
print_all_odds(lgs)
|
py | b417f00d31353c9ab096d3922cd9a5f2a50cdf80 | import time
from kafka import KafkaProducer
from kafka.errors import KafkaError, NoBrokersAvailable, KafkaTimeoutError
from config import KAFKA_BOOTSTRAP_SERVER
class BasicKafkaProducer(object):
"""
This is a basic Kafka Producer class which can push String messages into kafka
KAFKA_BOOTSTRAP_SERVER is the connection details to the Kafka broker
it could be one or a list of brokers example: ['localhost:9092']
This producer tries 5 times to push the message into kafka in case of failure
"""
def __init__(self):
try:
print ("Initialising Kafka Producer")
self.producer = KafkaProducer(bootstrap_servers=KAFKA_BOOTSTRAP_SERVER,
retries=5)
except NoBrokersAvailable:
print (u'Kafka Host not available: {}'.format(KAFKA_BOOTSTRAP_SERVER))
self.producer = None
def send_message(self, topic_name, message, key=None):
"""
:param topic_name: topic name
:param key: key to decide partition
:param message: String object to send
:return:
"""
if not self.producer:
print(u'Kafka Host not available: {}'.format(KAFKA_BOOTSTRAP_SERVER))
return
try:
start = time.time()
self.producer.send(topic_name, value=message)
print(u'Time taken to push to Kafka: {}'.format(time.time() - start))
except KafkaTimeoutError as e:
print (u'Message not sent: {}'.format(KAFKA_BOOTSTRAP_SERVER))
print(e)
pass
except Exception as e:
print(u'Message not sent: {}'.format(KAFKA_BOOTSTRAP_SERVER))
print (e)
pass
def close(self):
try:
if not self.producer:
print(u'No active producer: {}'.format(KAFKA_BOOTSTRAP_SERVER))
else:
self.producer.close()
except KafkaError as e:
print(u'Error closing connection to Kafka Host: {}'.format(KAFKA_BOOTSTRAP_SERVER))
print (e)
# if __name__ == "__main__":
# _producer = BasicKafkaProducer()
# _producer.send_message(topic_name='test-topic-example',
# message='{"test_col": "column_value"}'
# )
# _producer.close()
|
py | b417f071318bbfa48e8d4be6f7d315ebc0cadff3 | import argparse
import inspect
import json
import logging
import os
from collections import deque, namedtuple
from enum import Enum
import igibson
import printree
import pyinstrument
from igibson import object_states
from igibson.examples.learning import demo_replaying_example
from igibson.object_states import ROOM_STATES, factory
from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState, RelativeObjectState
from igibson.robots.behavior_robot import BRBody
from igibson.tasks.bddl_backend import ObjectStateBinaryPredicate, ObjectStateUnaryPredicate
import behavior
StateRecord = namedtuple("StateRecord", ["state_type", "objects", "value"])
StateEntry = namedtuple("StateEntry", ["frame_count", "state_records"])
Segment = namedtuple("DiffEntry", ["start", "duration", "end", "state_records", "sub_segments"])
class SegmentationObjectSelection(Enum):
ALL_OBJECTS = 1
TASK_RELEVANT_OBJECTS = 2
ROBOTS = 3
class SegmentationStateSelection(Enum):
ALL_STATES = 1
GOAL_CONDITION_RELEVANT_STATES = 2
class SegmentationStateDirection(Enum):
BOTH_DIRECTIONS = 1
FALSE_TO_TRUE = 2
TRUE_TO_FALSE = 3
STATE_DIRECTIONS = {
# Note that some of these states already only go False-to-True so they are left as BOTH_DIRECTIONS
# so as not to add filtering work.
object_states.Burnt: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Cooked: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Dusty: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Frozen: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.InFOVOfRobot: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.InHandOfRobot: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.InReachOfRobot: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.InSameRoomAsRobot: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.Inside: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.NextTo: SegmentationStateDirection.FALSE_TO_TRUE,
# OnFloor: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.OnTop: SegmentationStateDirection.FALSE_TO_TRUE,
object_states.Open: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Sliced: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Soaked: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Stained: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.ToggledOn: SegmentationStateDirection.BOTH_DIRECTIONS,
# Touching: SegmentationStateDirection.BOTH_DIRECTIONS,
object_states.Under: SegmentationStateDirection.FALSE_TO_TRUE,
}
STATE_DIRECTIONS.update({state: SegmentationStateDirection.FALSE_TO_TRUE for state in ROOM_STATES})
ALLOWED_SUB_SEGMENTS_BY_STATE = {
object_states.Burnt: {object_states.OnTop, object_states.ToggledOn, object_states.Open, object_states.Inside},
object_states.Cooked: {object_states.OnTop, object_states.ToggledOn, object_states.Open, object_states.Inside},
object_states.Dusty: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
object_states.Frozen: {
object_states.InReachOfRobot,
object_states.OnTop,
object_states.ToggledOn,
object_states.Open,
object_states.Inside,
},
object_states.InFOVOfRobot: {},
object_states.InHandOfRobot: {},
object_states.InReachOfRobot: {},
object_states.InSameRoomAsRobot: {},
object_states.Inside: {
object_states.Open,
object_states.InSameRoomAsRobot,
object_states.InReachOfRobot,
object_states.InHandOfRobot,
},
object_states.NextTo: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
# OnFloor: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
object_states.OnTop: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
object_states.Open: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
object_states.Sliced: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
object_states.Soaked: {
object_states.ToggledOn,
object_states.InSameRoomAsRobot,
object_states.InReachOfRobot,
object_states.InHandOfRobot,
},
object_states.Stained: {
object_states.Soaked,
object_states.InSameRoomAsRobot,
object_states.InReachOfRobot,
object_states.InHandOfRobot,
},
object_states.ToggledOn: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot},
# Touching: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
object_states.Under: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot},
}
def process_states(objects, state_types):
"""
Process/Analyze the state of the relevant objects for the segmentation
:param objects: Objects to analyze for the segmentation
:param state_types: State types to analyze for the segmentation
:return: Set of predicates in the form of StateRecords that contain the type of state, the object(s) involved and the value of the state
"""
predicate_states = set()
for obj in objects:
for state_type in state_types:
# If this type of state does not apply to this object (e.g., frozen for a table)
if state_type not in obj.states:
continue
# For some reason, the state types have to be all Booleans
assert issubclass(state_type, BooleanState)
# Get the state of this state type
state = obj.states[state_type]
if issubclass(state_type, AbsoluteObjectState):
# Add only one instance of absolute state
try:
value = bool(state.get_value())
record = StateRecord(state_type, (obj,), value)
predicate_states.add(record)
except ValueError:
pass
elif issubclass(state_type, RelativeObjectState):
# Add one instance per state pair
for other in objects:
try:
value = state.get_value(other)
record = StateRecord(state_type, (obj, other), value)
predicate_states.add(record)
except ValueError:
pass
else:
raise ValueError("Unusable state for segmentation.")
return predicate_states
def _get_goal_condition_states(env):
state_types = set()
q = deque()
q.extend(env.task.goal_conditions)
while q:
pred = q.popleft()
if isinstance(pred, ObjectStateUnaryPredicate) or isinstance(pred, ObjectStateBinaryPredicate):
state_types.add(pred.STATE_CLASS)
q.extend(pred.children)
return state_types
class DemoSegmentationProcessor(object):
"""
Processing object for demos that segments them
"""
def __init__(
self,
state_types=None,
object_selection=SegmentationObjectSelection.TASK_RELEVANT_OBJECTS,
label_by_instance=False,
hierarchical=False,
diff_initial=False,
state_directions=STATE_DIRECTIONS,
profiler=None,
):
# List of StateEntry's
self.state_history = []
self.last_state = None
# List of state types to use for segmentation
self.state_types_option = state_types
# To be populated in initialize(). It will contain the types of states to track for segmenting
self.state_types = None
# Direction we want states to change, e.g., from True to False or vice versa.
# For different state types can be different
self.state_directions = state_directions
# Objects to analyze for the segmentation (e.g., task relevant or all)
self.object_selection = object_selection
self.label_by_instance = label_by_instance
self.hierarchical = hierarchical
self.all_state_types = None
# We save a first empty StateEntry that works as initial one for doing diff
if diff_initial:
self.state_history.append(StateEntry(0, set()))
self.last_state = set()
self.profiler = profiler
def start_callback(self, env, _):
"""
Initial callback to call at the beginning of the segmentation process.
It populates state_types, the types of states to listen to in order to segment the demos.
:param env: Environment to evaluate
:param _: ?
:return: None
"""
self.all_state_types = [
state
for state in factory.get_all_states()
if (
issubclass(state, BooleanState)
and (issubclass(state, AbsoluteObjectState) or issubclass(state, RelativeObjectState))
)
] # All the state types we use for segmentation
if isinstance(self.state_types_option, list) or isinstance(self.state_types_option, set):
self.state_types = self.state_types_option
elif self.state_types_option == SegmentationStateSelection.ALL_STATES:
self.state_types = self.all_state_types
elif self.state_types_option == SegmentationStateSelection.GOAL_CONDITION_RELEVANT_STATES:
self.state_types = _get_goal_condition_states(env)
else:
raise ValueError("Unknown segmentation state selection.")
if self.state_types is None:
from IPython import embed
embed()
def step_callback(self, env, _):
"""
Callback to call at each step of the segmentation process.
:param env:
:param _:
:return: None
"""
if self.profiler:
self.profiler.start()
# Get the objects to analyze for the segmentation based on the object_selection
if self.object_selection == SegmentationObjectSelection.TASK_RELEVANT_OBJECTS:
objects = [obj for obj in env.task.object_scope.values() if not isinstance(obj, BRBody)]
elif self.object_selection == SegmentationObjectSelection.ROBOTS:
objects = [obj for obj in env.task.object_scope.values() if isinstance(obj, BRBody)]
elif self.object_selection == SegmentationObjectSelection.ALL_OBJECTS:
objects = env.scene.get_objects()
else:
raise ValueError("Incorrect SegmentationObjectSelection %r" % self.object_selection)
# Get the processed state -> states of the state types indicated for the objects indicated
state_types_to_use = self.state_types if not self.hierarchical else self.all_state_types
processed_state = process_states(objects, state_types_to_use)
# If this is the first step (last_state is None) or there is a change in the logic states ("-" between
# sets returns the non-common entries), we add this processed state to the history
if self.last_state is None or (processed_state - self.last_state):
self.state_history.append(StateEntry(env.simulator.frame_count, processed_state))
self.last_state = processed_state
if self.profiler:
self.profiler.stop()
def obj2str(self, obj):
return obj.name if self.label_by_instance else obj.category
def _hierarchical_segments(self, state_entries, state_types):
"""
Create a "hierarchical" list of segments: segments followed by subsegments
So far we only have flat segmentations, so there won't be much recursion, but this function is recursive
:param state_entries: History of states for the relevant objects
:param state_types: State types to analyze for the segmentation
:return:
"""
if not state_types:
print("No state_types. Returning empty list")
return []
segments = []
before_idx = 0
after_idx = 1
# Keep iterating until we reach the end of our state entries.
while after_idx < len(state_entries):
# Get the state entries at these keys.
before = state_entries[before_idx]
after = state_entries[after_idx]
# Check if there is a valid diff at this range.
# Diff is a change in state that is relevant for the segmentation, so it will crate a segment
# Check only the states that are not shared between after and state entries ("-" of sets)
diffs = self.filter_diffs(after.state_records - before.state_records, state_types)
if diffs is not None:
# If there is a diff, prepare to do sub-segmentation on the segment.
sub_segment_states = set()
if self.hierarchical:
for state_record in diffs:
corresponding_sub_states = ALLOWED_SUB_SEGMENTS_BY_STATE[state_record.state_type]
sub_segment_states.update(corresponding_sub_states)
sub_segments = self._hierarchical_segments(
state_entries[before_idx : after_idx + 1], sub_segment_states
)
segments.append(
Segment(
before.frame_count,
after.frame_count - before.frame_count,
after.frame_count,
diffs,
sub_segments,
)
)
# Continue segmentation by moving the before_idx to start here.
before_idx = after_idx
# Increase the range of elements we're looking at by one.
after_idx += 1
return segments
def get_segments(self):
"""
Gets the segmentation as a list of segments
:return: Single segment representing the entire demo, with subsegments for each of the found segments
"""
segments = self._hierarchical_segments(self.state_history, self.state_types)
if len(segments) > 0:
return Segment(segments[0].start, segments[-1].end - segments[0].start, segments[-1].end, [], segments)
else:
# When there are no segments, e.g., when we do a roomlocation segmentation but the robot does not change of rooms
return Segment(self.state_history[0].frame_count, 0, self.state_history[0].frame_count, [], [])
def filter_diffs(self, state_records, state_types):
"""
Filter the segments so that only objects in the given state directions are monitored.
:param state_records: Record of organized states on the relevant state types for the segmentation
:param state_types: Relevant state types to segment
:return: The records from the input that
"""
new_records = set()
# Go through the records in the segment.
for state_record in state_records:
# Check if the state type is on our list
if state_record.state_type not in state_types:
continue
logging.debug(
"state_type: {}, objects: {}, value: {}".format(state_record[0], state_record[1], state_record[2])
)
# Check if any object in the record is on our list.
# Mode is the type of state change direction we are interested in, e.g., inHandOfRobot from False to True
mode = self.state_directions[state_record.state_type]
accept = True
if mode == SegmentationStateDirection.FALSE_TO_TRUE:
accept = state_record.value
elif mode == SegmentationStateDirection.TRUE_TO_FALSE:
accept = not state_record.value
# If an object in our list is part of the record, keep the record.
if accept:
new_records.add(state_record)
# If we haven't kept any of this segment's records, drop the segment.
if not new_records:
return None
return new_records
def _serialize_segment(self, segment):
"""
Serializes each segment of a segmentation
The serialization includes the information of the segment such as start/end time, duration, state changes, and any subsegments
:param segment: Segment to seriaze
:return: Serialized segment in the form of a dictionary
"""
stringified_entries = [
{
"name": state_record.state_type.__name__,
"objects": [self.obj2str(obj) for obj in state_record.objects],
"value": state_record.value,
}
for state_record in segment.state_records
]
return {
"start": segment.start,
"end": segment.end,
"duration": segment.duration,
"state_records": stringified_entries,
"sub_segments": [self._serialize_segment(sub_segment) for sub_segment in segment.sub_segments],
}
def _segment_to_dict_tree(self, segment, output_dict):
stringified_entries = [
(
state_record.state_type.__name__,
", ".join(obj.category for obj in state_record.objects),
state_record.value,
)
for state_record in segment.state_records
]
entry_strs = ["%s(%r) = %r" % entry for entry in stringified_entries]
key = "%d-%d: %s" % (segment.start, segment.end, ", ".join(entry_strs))
sub_segments = {}
for sub in segment.sub_segments:
self._segment_to_dict_tree(sub, sub_segments)
output_dict[key] = sub_segments
def serialize_segments(self):
# Make the root call to recursive function.
return self._serialize_segment(self.get_segments())
def __str__(self):
out = ""
out += "---------------------------------------------------\n"
out += "Segmentation of %s\n" % self.object_selection.name
out += "Considered states: %s\n" % ", ".join(x.__name__ for x in self.state_types)
out += "---------------------------------------------------\n"
output = {}
self._segment_to_dict_tree(self.get_segments(), output)
out += printree.ftree(output) + "\n"
out += "---------------------------------------------------\n"
return out
def parse_args(defaults=False):
args_dict = dict()
args_dict["demo_file"] = os.path.join(
igibson.ig_dataset_path,
"tests",
"cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5",
)
args_dict["out_dir"] = os.path.join(behavior.examples_path, "data")
args_dict["replay_demo_file"] = os.path.splitext(args_dict["demo_file"])[0] + "_segm_replay.json"
args_dict["profile"] = False
args_dict["check_determinism"] = False
if not defaults:
parser = argparse.ArgumentParser(description="Run segmentation on an ATUS demo.")
parser.add_argument(
"--demo_file", type=str, help="Path (and filename) of demo to replay. If empty, test demo will be used."
)
parser.add_argument(
"--out_dir", type=str, help="Directory to store results in. If empty, test directory will be used."
)
parser.add_argument(
"--replay_demo_file", type=str, help="Path (and filename) of demo to save from the replay (for debugging)."
)
parser.add_argument(
"--profile",
action="store_true",
help="Whether to profile the segmentation, outputting a profile HTML in the out path.",
)
parser.add_argument(
"--check_determinism",
action="store_true",
help="Whether to check for determinism in the replay after segmenting.",
)
args = parser.parse_args()
args_dict["demo_file"] = args.demo_file
args_dict["out_dir"] = args.out_dir
args_dict["replay_demo_file"] = args.replay_demo_file
args_dict["profile"] = args.profile
args_dict["check_determinism"] = args.check_determinism
return args_dict
def get_default_segmentation_processors(profiler=None):
"""
Create a set of callbacks to process demos when replaying and perform segmentation
It returns two types of processors for segmentation: flat and per room (allows to see what room the agent is in)
:param profiler: Profiler to measure performance when segmenting
:return: A dictionary with two sets of segmentation processors: flat and per room
"""
# This applies a "flat" segmentation (e.g. not hierarchical) using only the states supported by our simple action
# primitives, i.e., can be caused by our action primitives
flat_states = [
object_states.Open,
object_states.OnTop,
object_states.Inside,
object_states.InHandOfRobot,
object_states.InReachOfRobot,
] # States that can be achieved by the robot through action primitives
flat_object_segmentation = DemoSegmentationProcessor(
flat_states, SegmentationObjectSelection.TASK_RELEVANT_OBJECTS, label_by_instance=True, profiler=profiler
)
# This applies a hierarchical segmentation based on goal condition states. It's WIP and currently unused.
goal_segmentation = DemoSegmentationProcessor(
SegmentationStateSelection.GOAL_CONDITION_RELEVANT_STATES,
SegmentationObjectSelection.TASK_RELEVANT_OBJECTS,
hierarchical=True,
label_by_instance=True,
profiler=profiler,
)
# This applies a flat segmentation that allows us to see what room the agent is in during which frames.
room_presence_segmentation = DemoSegmentationProcessor(
ROOM_STATES, SegmentationObjectSelection.ROBOTS, diff_initial=True, profiler=profiler
)
return {
# "goal": goal_segmentation,
"flat": flat_object_segmentation,
"room": room_presence_segmentation,
}
def main(selection="user", headless=False, short_exec=False):
"""
Segment a given demo into a sequence of predefined action primitives
It assumes a predefined map of logic changes to action primitives that cause them
"""
print("*" * 80 + "\nDescription:" + main.__doc__ + "\n" + "*" * 80)
defaults = selection == "random" and headless and short_exec
args_dict = parse_args(defaults=defaults)
# Create output directory if needed.
if not os.path.exists(args_dict["out_dir"]):
os.mkdirs(args_dict["out_dir"])
# Set up the profiler
profiler = None
if args_dict["profile"]:
profiler = pyinstrument.Profiler()
# Create default segmentation processors.
segmentation_processors = get_default_segmentation_processors(profiler)
# Run the segmentations.
print("Run segmentation")
if args_dict["check_determinism"]:
demo_replaying_example.replay_demo_with_determinism_check(
args_dict["demo_file"],
replay_demo_file=args_dict["out_path"],
start_callbacks=[sp.start_callback for sp in segmentation_processors.values()],
step_callbacks=[sp.step_callback for sp in segmentation_processors.values()],
)
else:
demo_replaying_example.replay_demo(
args_dict["demo_file"],
replay_demo_file=args_dict["replay_demo_file"],
start_callbacks=[sp.start_callback for sp in segmentation_processors.values()],
step_callbacks=[sp.step_callback for sp in segmentation_processors.values()],
)
print("Save segmentation")
demo_basename = os.path.splitext(os.path.basename(args_dict["demo_file"]))[0]
for segmentation_name, segmentation_processor in segmentation_processors.items():
json_file = "%s_%s_segm.json" % (demo_basename, segmentation_name)
json_fullpath = os.path.join(args_dict["out_dir"], json_file)
with open(json_fullpath, "w") as f:
json.dump(segmentation_processor.serialize_segments(), f)
# Print the segmentations.
combined_output = ""
for segmentation_processor in segmentation_processors.values():
combined_output += str(segmentation_processor) + "\n"
print(combined_output)
# Save profiling information.
if args_dict["profile"]:
print("Save profiling")
html = profiler.output_html()
html_file = demo_basename + "_segm_profile.html"
html_path = os.path.join(args_dict["out_dir"], html_file)
with open(html_path, "w") as f:
f.write(html)
RUN_AS_TEST = True # Change to True to run this example in test mode
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if RUN_AS_TEST:
main(selection="random", headless=True, short_exec=True)
else:
main()
|
py | b417f16613038813f7eeff7655051d1b1f022074 | #This is only CNN, with SMOTE oversampling
#Python imports
import numpy as np
import random
import codecs
from nltk.corpus import stopwords
from os import listdir
#Keras imports
from keras.models import Sequential
from keras.layers import Conv1D, MaxPooling1D, Dropout, Dense, Flatten
from keras.layers import LSTM, Flatten
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
#Scikit learn imports
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import f1_score
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
#SMOTE for oversampling
from imblearn.over_sampling import SMOTE
def try_cnn(X, Y):
np.random.seed(7)
top_words = 5000
max_review_length = 150
#Use one-hot encoding to encode the sentences
encoded_sentences = [one_hot(d, top_words) for d in X]
#Pad sentences if the length is greater than maxlen and truncate if length is lesser tan maxlen
padded_sentences = pad_sequences(encoded_sentences, maxlen = max_review_length, padding='post')
#Perform oversampling using SMOTE
sm = SMOTE(ratio = 'minority', random_state=1, kind='regular', k_neighbors = 5)
X_res, Y_res = sm.fit_sample(padded_sentences, Y)
#Split the dataset for trainng and testing using a 67--33 training - test split
X_train, X_test, y_train, y_test = train_test_split(X_res, Y_res, test_size = 0.33, random_state = 1, shuffle = True)
#calculate class weights to for using them in loss function
myclass_weight = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
class_weight_dict = dict(enumerate(myclass_weight))
# create the model
#Use word embeddings of 32 dimensions, fewer dimensions in this case provide better performance due to smaller dataset
embedding_vecor_length = 100
model = Sequential()
#Add the embedding layer at the beginning to accept input of max length
model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
#Create a CNN architecture with 2 1D convolutional layers with 128 filters and filter size 5. Relu activation is used
model.add(Conv1D(128, 5, activation='relu', padding='same'))
model.add(Conv1D(128, 5, activation='relu', padding='same'))
#Add a maxpooling layer with size 5
model.add(MaxPooling1D(5))
#Add dropout with rate 0.5 to prevent overfitting
model.add(Dropout(0.5))
model.add(Flatten())
#Add two Dense NN layers with 128 units and 1 units for classification with a dropout layer to prevent overfitting
model.add(Dense(128, activation='relu', name='dense1'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
#Train the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(X_train, y_train, epochs=3, batch_size=64, class_weight=class_weight_dict)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
#Predict the classes and the print Precision, Recall, Fscore and Accuracy
a = model.predict_classes(X_test, verbose=1)
print(precision_recall_fscore_support(y_test, a))
print("F-Score: ", f1_score(y_test, a))
print("Accuracy: %.2f%%" % (scores[1]*100))
def generateData(dataFolder):
stops = set(stopwords.words("english"))
X = []
Y = []
sentence = ''
relevance = 0
for fileName in listdir(dataFolder):
f = codecs.open(dataFolder + fileName, encoding='utf-8')
#f = open(dataFolder + fileName, 'r')
#with open(dataFolder + fileName, 'r') as f:
for line in f:
if '\n' in line and len(line) == 2: #End of a Sentence
if sentence != '':
X.append(sentence)
Y.append(relevance)
#Reset sentence and relevance for next line
sentence = ''
relevance = 0
else: #Same sentence still being continued
token = line.split(' ')[0].lower()
#Keep only words of letters and not stop words
if token not in stops and token not in ",!.:<>":
if sentence == '':
sentence = token
else:
sentence += ' ' + token
if line[:-1].split(' ')[-1] != 'O': #if it is annotated, then it is relevant
relevance = 1
return X,Y
#Get the training data
X, Y = generateData("training_material/data/tokenized/")
try_cnn(X, Y)
|
py | b417f26cc70d67016498624c240af56877f4f6a9 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import AppPlatformManagementClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class AppPlatformManagementClient(MultiApiClientMixin, _SDKClient):
"""REST API for Azure Spring Cloud.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2020-07-01'
_PROFILE_TAG = "azure.mgmt.appplatform.AppPlatformManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'sku': '2019-05-01-preview',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
api_version=None, # type: Optional[str]
base_url=None, # type: Optional[str]
profile=KnownProfiles.default, # type: KnownProfiles
**kwargs # type: Any
):
if not base_url:
base_url = 'https://management.azure.com'
self._config = AppPlatformManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(AppPlatformManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2019-05-01-preview: :mod:`v2019_05_01_preview.models<azure.mgmt.appplatform.v2019_05_01_preview.models>`
* 2020-07-01: :mod:`v2020_07_01.models<azure.mgmt.appplatform.v2020_07_01.models>`
* 2020-11-01-preview: :mod:`v2020_11_01_preview.models<azure.mgmt.appplatform.v2020_11_01_preview.models>`
* 2021-06-01-preview: :mod:`v2021_06_01_preview.models<azure.mgmt.appplatform.v2021_06_01_preview.models>`
"""
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview import models
return models
elif api_version == '2020-07-01':
from .v2020_07_01 import models
return models
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview import models
return models
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def apps(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`AppsOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.AppsOperations>`
* 2020-07-01: :class:`AppsOperations<azure.mgmt.appplatform.v2020_07_01.operations.AppsOperations>`
* 2020-11-01-preview: :class:`AppsOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.AppsOperations>`
* 2021-06-01-preview: :class:`AppsOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.AppsOperations>`
"""
api_version = self._get_api_version('apps')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import AppsOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import AppsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import AppsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import AppsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'apps'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def bindings(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`BindingsOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.BindingsOperations>`
* 2020-07-01: :class:`BindingsOperations<azure.mgmt.appplatform.v2020_07_01.operations.BindingsOperations>`
* 2020-11-01-preview: :class:`BindingsOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.BindingsOperations>`
* 2021-06-01-preview: :class:`BindingsOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.BindingsOperations>`
"""
api_version = self._get_api_version('bindings')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import BindingsOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import BindingsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import BindingsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import BindingsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'bindings'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def certificates(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`CertificatesOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.CertificatesOperations>`
* 2020-07-01: :class:`CertificatesOperations<azure.mgmt.appplatform.v2020_07_01.operations.CertificatesOperations>`
* 2020-11-01-preview: :class:`CertificatesOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.CertificatesOperations>`
* 2021-06-01-preview: :class:`CertificatesOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.CertificatesOperations>`
"""
api_version = self._get_api_version('certificates')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import CertificatesOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import CertificatesOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import CertificatesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import CertificatesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'certificates'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def config_servers(self):
"""Instance depends on the API version:
* 2020-07-01: :class:`ConfigServersOperations<azure.mgmt.appplatform.v2020_07_01.operations.ConfigServersOperations>`
* 2020-11-01-preview: :class:`ConfigServersOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.ConfigServersOperations>`
* 2021-06-01-preview: :class:`ConfigServersOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.ConfigServersOperations>`
"""
api_version = self._get_api_version('config_servers')
if api_version == '2020-07-01':
from .v2020_07_01.operations import ConfigServersOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import ConfigServersOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import ConfigServersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'config_servers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def custom_domains(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`CustomDomainsOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.CustomDomainsOperations>`
* 2020-07-01: :class:`CustomDomainsOperations<azure.mgmt.appplatform.v2020_07_01.operations.CustomDomainsOperations>`
* 2020-11-01-preview: :class:`CustomDomainsOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.CustomDomainsOperations>`
* 2021-06-01-preview: :class:`CustomDomainsOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.CustomDomainsOperations>`
"""
api_version = self._get_api_version('custom_domains')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import CustomDomainsOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import CustomDomainsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import CustomDomainsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import CustomDomainsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'custom_domains'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def deployments(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`DeploymentsOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.DeploymentsOperations>`
* 2020-07-01: :class:`DeploymentsOperations<azure.mgmt.appplatform.v2020_07_01.operations.DeploymentsOperations>`
* 2020-11-01-preview: :class:`DeploymentsOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.DeploymentsOperations>`
* 2021-06-01-preview: :class:`DeploymentsOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.DeploymentsOperations>`
"""
api_version = self._get_api_version('deployments')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import DeploymentsOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import DeploymentsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import DeploymentsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import DeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def monitoring_settings(self):
"""Instance depends on the API version:
* 2020-07-01: :class:`MonitoringSettingsOperations<azure.mgmt.appplatform.v2020_07_01.operations.MonitoringSettingsOperations>`
* 2020-11-01-preview: :class:`MonitoringSettingsOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.MonitoringSettingsOperations>`
* 2021-06-01-preview: :class:`MonitoringSettingsOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.MonitoringSettingsOperations>`
"""
api_version = self._get_api_version('monitoring_settings')
if api_version == '2020-07-01':
from .v2020_07_01.operations import MonitoringSettingsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import MonitoringSettingsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import MonitoringSettingsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'monitoring_settings'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`Operations<azure.mgmt.appplatform.v2019_05_01_preview.operations.Operations>`
* 2020-07-01: :class:`Operations<azure.mgmt.appplatform.v2020_07_01.operations.Operations>`
* 2020-11-01-preview: :class:`Operations<azure.mgmt.appplatform.v2020_11_01_preview.operations.Operations>`
* 2021-06-01-preview: :class:`Operations<azure.mgmt.appplatform.v2021_06_01_preview.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import Operations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import Operations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import Operations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def runtime_versions(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`RuntimeVersionsOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.RuntimeVersionsOperations>`
* 2020-07-01: :class:`RuntimeVersionsOperations<azure.mgmt.appplatform.v2020_07_01.operations.RuntimeVersionsOperations>`
* 2020-11-01-preview: :class:`RuntimeVersionsOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.RuntimeVersionsOperations>`
* 2021-06-01-preview: :class:`RuntimeVersionsOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.RuntimeVersionsOperations>`
"""
api_version = self._get_api_version('runtime_versions')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import RuntimeVersionsOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import RuntimeVersionsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import RuntimeVersionsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import RuntimeVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'runtime_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def services(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`ServicesOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.ServicesOperations>`
* 2020-07-01: :class:`ServicesOperations<azure.mgmt.appplatform.v2020_07_01.operations.ServicesOperations>`
* 2020-11-01-preview: :class:`ServicesOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.ServicesOperations>`
* 2021-06-01-preview: :class:`ServicesOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.ServicesOperations>`
"""
api_version = self._get_api_version('services')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import ServicesOperations as OperationClass
elif api_version == '2020-07-01':
from .v2020_07_01.operations import ServicesOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import ServicesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import ServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'services'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def sku(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`SkuOperations<azure.mgmt.appplatform.v2019_05_01_preview.operations.SkuOperations>`
"""
api_version = self._get_api_version('sku')
if api_version == '2019-05-01-preview':
from .v2019_05_01_preview.operations import SkuOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'sku'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def skus(self):
"""Instance depends on the API version:
* 2020-07-01: :class:`SkusOperations<azure.mgmt.appplatform.v2020_07_01.operations.SkusOperations>`
* 2020-11-01-preview: :class:`SkusOperations<azure.mgmt.appplatform.v2020_11_01_preview.operations.SkusOperations>`
* 2021-06-01-preview: :class:`SkusOperations<azure.mgmt.appplatform.v2021_06_01_preview.operations.SkusOperations>`
"""
api_version = self._get_api_version('skus')
if api_version == '2020-07-01':
from .v2020_07_01.operations import SkusOperations as OperationClass
elif api_version == '2020-11-01-preview':
from .v2020_11_01_preview.operations import SkusOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import SkusOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'skus'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
|
py | b417f2e2bce07acd35029832c7bce7f7b8bdfdcd | import os
import sys
from utils.submitter import Submitter
def make_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir, exist_ok=True)
def main(argv):
sbatch_cfg = {
# Account name
'account': 'rrg-whitem',
# Job name
'job-name': 'catcher',
# Job time
'time': '0-10:00:00',
# GPU/CPU type
'--cpus-per-task': 1,
# Memory
'mem-per-cpu': '2000M',
# Email address
'mail-user': '[email protected]'
}
# sbatch configs backup for different games
# sbatch_cfg['job-name'], sbatch_cfg['time'], sbatch_cfg['mem-per-cpu'] = 'catcher', '0-10:00:00', '2000M'
# sbatch_cfg['job-name'], sbatch_cfg['time'], sbatch_cfg['mem-per-cpu'] = 'copter', '0-05:00:00', '2000M'
# sbatch_cfg['job-name'], sbatch_cfg['time'], sbatch_cfg['mem-per-cpu'] = 'lunar', '0-07:00:00', '2000M'
# sbatch_cfg['job-name'], sbatch_cfg['time'], sbatch_cfg['mem-per-cpu'] = 'minatar', '1-08:00:00', '4000M'
general_cfg = {
# User name
'user': 'qlan3',
# Sbatch script path
'script-path': './sbatch.sh',
# Check time interval in minutes
'check-time-interval': 5,
# Clusters info: {name: capacity}
'clusters': {'Cedar': 3000},
# Job indexes list
'job-list': list(range(1, 30+1))
}
make_dir(f"output/{sbatch_cfg['job-name']}")
submitter = Submitter(general_cfg, sbatch_cfg)
submitter.submit()
if __name__=='__main__':
main(sys.argv) |
py | b417f3b634257f7ddcec64027b9374c05167ffc8 | import random
import haravasto
import json
import time
tiedoston_nimi = "tulokset.txt"
tila = {
"kentta": [],
"jaljella": [],
"nakyva_kentta": [],
"tutkittava_kentta": []
}
kentan_speksit = {
"leveys": 0,
"korkeus": 0,
"miinat_kpl": 0
}
pelin_tiedot = {
"pvm": 0,
"leveys": 0,
"korkeus": 0,
"miinat": 0,
"kesto": 0,
"siirrot": 0,
"tulos": 0
}
"""
Pelin alkaessa tämä ensin pitää nollata
ja sitten syötetään pelin tiedot tähän,
jotta ne voidaan pelin päättyessä tallentaa
"""
def alkuvalikko():
"""
Alku_valikko, mistä valitaan mitä halutaan elämällä tehdä
"""
print("Halluukko nää pelata miinaharavaa, sillee")
print("""
1. Uusi peli
2. Quit
3. Highscores
""")
while True:
valinta = input("Valinta 1, 2 tai 3: ").strip()
if valinta == "1":
main()
print(" ")
break
#aloittaa uuden pelin
elif valinta == "2":
print("aijjaa")
break
#lopettaa ohjelman
elif valinta == "3":
tulostusfunktioxd(tulos_lataus(tiedoston_nimi))
#tulostaa tulokset tiedostosta
def aikatiedot():
'''
Ottaa talteen aloitetun pelin päivämäärän ja aloitus sekunnit
'''
paiva = time.localtime()[2]
kuukausi = time.localtime()[1]
vuosi = time.localtime()[0]
pelin_tiedot["pvm"] = "{}.{}.{}".format(paiva, kuukausi, vuosi)
pelin_tiedot["kesto"] = time.time()
def nollaa_tiedot():
'''
Nollaa pelikerran tiedot
'''
pelin_tiedot["pvm"] = 0
pelin_tiedot["leveys"] = 0
pelin_tiedot["korkeus"] = 0
pelin_tiedot["miinat"] = 0
pelin_tiedot["kesto"] = 0
pelin_tiedot["siirrot"] = 0
pelin_tiedot["tulos"] = 0
def tallennus(lista, tiedosto):
"""
Tallentaa pelin statsit tiedostoon.
Lista sisältää tulokset, jotka halutaan tallentaa
"""
tuloksia = []
try:
with open(tiedosto, "r+") as lahde:
data = json.load(lahde)
data.append(lista)
lahde.seek(0)
json.dump(data, lahde)
#Päivittää juuri pelatun pelin tiedot aikaisempien pelien joukkoon
except IOError:
with open(tiedosto, "w") as lahde:
tuloksia.append(lista)
json.dump(tuloksia, lahde)
#Luo uuden tiedoston tuloksia varten ja tallettaa sinne ensimmäisen pelin
def tulos_lataus(tiedosto):
"""
Lataa tulokset tiedostosta
"""
try:
with open(tiedosto, "r") as lahde:
tulostaulu = json.load(lahde)
return tulostaulu
except (IOError, json.JSONDecodeError):
print("Homma ei niin sanotusti pelitä")
def tulostusfunktioxd(tulostettavat):
"""
Tulostaa pelaajan _mahtavat_ saavutukset
"""
try:
for i, tulos in enumerate(tulostettavat):
if tulos == None:
continue
else:
print("pelikerta #{}. pvm: {}\nkenttä {}x{}, miinojen lkm: {}kpl\nkesto: {}s, siirrot: {}, lopputulos: {}\n".format(
i + 1,
tulos["pvm"],
tulos["leveys"],
tulos["korkeus"],
tulos["miinat"],
tulos["kesto"],
tulos["siirrot"],
tulos["tulos"]
))
except TypeError:
return
def kasittele_hiiri(x, y, painike, muokkaus):
"""
Tätä funktiota kutsutaan kun käyttäjä klikkaa sovellusikkunaa hiirellä.
Sisältää toiminnot hiiren oikealle ja vasemmalle napille
"""
if painike == haravasto.HIIRI_VASEN:
painike = "vasen"
tulvataytto(tila["kentta"], x // 40, y // 40)
pelin_tiedot["siirrot"] += 1
if tila["tutkittava_kentta"] == tila["kentta"]:
print("Voitit pelin")
haravasto.aseta_hiiri_kasittelija(kasittele_hiiri_loppu)
pelin_tiedot["kesto"] = round(time.time() - pelin_tiedot["kesto"], 1)
pelin_tiedot["tulos"] = "Voitto"
tallennus(pelin_tiedot, tiedoston_nimi)
#Tutkii onko kentältä avattu kaikki ruudut, joissa ei ole miinaa
#Jos on, niin peli tulkitaan voitetuksi
#Ottaa myös pelatun pelin tiedot talteen ja tallentaa ne
elif painike == haravasto.HIIRI_OIKEA:
painike = "oikea"
pelin_tiedot["siirrot"] += 1
if tila["nakyva_kentta"][y // 40][x // 40] == " ":
tila["nakyva_kentta"][y // 40][x // 40] = "f"
#Laittaa avaamattomaan ruutuun lipun
elif tila["nakyva_kentta"][y // 40][x // 40] == "f":
tila["nakyva_kentta"][y // 40][x // 40] = " "
#Ottaa lipun pois ruudusta
def kasittele_hiiri_loppu(x, y, painike, muokkaus):
'''
Poistaa hiiren toiminnot käytöstä.
Käytetään kun peli on päättynyt (hävitty tai voitettu)
'''
if painike == haravasto.HIIRI_VASEN:
painike = None
elif painike == haravasto.HIIRI_OIKEA:
painike = None
def maaraa_kentta():
'''
Pelaaja määrittelee pelattavan kentän leveyden, korkeuden ja miinojen lukumäärän
'''
while True:
leveys = input("Anna kentän leveys: ")
try:
leveys = int(leveys)
kentan_speksit["leveys"] = leveys
break;
except ValueError:
print("Anna kokonaisluku")
#Pelaaja määrittelee pelattavan kentän leveyden
while True:
korkeus = input("Anna kentän korkeus: ")
try:
korkeus = int(korkeus)
kentan_speksit["korkeus"] = korkeus
break;
except ValueError:
print("Anna kokonaisluku")
#Pelaaja määrittelee pelattavan kentän korkeuden
while True:
miinat_kpl = input("Anna miinojen lukumäärä: ")
try:
miinat_kpl = int(miinat_kpl)
if miinat_kpl > leveys * korkeus:
print("Miinojen lukumäärä ei voi olla isompi kuin kentän ruutujen määrä.")
continue
kentan_speksit["miinat_kpl"] = miinat_kpl
break;
except ValueError:
print("Anna kokonaisluku")
#Pelaaja määrittelee pelattavan kentän miinojen lukumäärän
pelin_tiedot["leveys"] = kentan_speksit["leveys"]
pelin_tiedot["korkeus"] = kentan_speksit["korkeus"]
pelin_tiedot["miinat"] = kentan_speksit["miinat_kpl"]
#Päivittää pelattavan pelin tiedot tuloksia varten
def miinoita(kentta, jaljella, miinat_kpl):
"""
Asettaa kentälle N kpl miinoja satunnaisiin paikkoihin.
"""
kerrat = 0
while kerrat < miinat_kpl:
miina = random.choice(jaljella)
x = miina[0]
y = miina[1]
kentta[y][x] = "x"
tila["tutkittava_kentta"][y][x] = "x"
jaljella.remove(miina)
kerrat += 1
def luo_kentta():
maaraa_kentta()
kentta = []
for rivi in range(kentan_speksit["korkeus"]):
kentta.append([])
for sarake in range(kentan_speksit["leveys"]):
kentta[-1].append(" ")
tila["kentta"] = kentta
#luo todellisen kentän
kentta2 = []
for rivi in range(kentan_speksit["korkeus"]):
kentta2.append([])
for sarake in range(kentan_speksit["leveys"]):
kentta2[-1].append(" ")
tila["nakyva_kentta"] = kentta2
#luo pelaajalle näkyvän kentän
kentta3 = []
for rivi in range(kentan_speksit["korkeus"]):
kentta3.append([])
for sarake in range(kentan_speksit["leveys"]):
kentta3[-1].append(" ")
tila["tutkittava_kentta"] = kentta3
#Luo kentän jota käytetään pelin loppumisen tutkintaan
jaljella = []
for x in range(kentan_speksit["leveys"]):
for y in range(kentan_speksit["korkeus"]):
jaljella.append((x, y))
tila["jaljella"] = jaljella
#luo listan, josta ohjelma tunnistaa laatat, joihin ei ole vielä generoitu miinaa miinoita-funktiossa
miinoita(tila["kentta"], tila["jaljella"], kentan_speksit["miinat_kpl"])
numerot()
def piirra_kentta():
"""
Käsittelijäfunktio, joka piirtää kaksiulotteisena listana kuvatun miinakentän
ruudut näkyviin peli-ikkunaan. Funktiota kutsutaan aina kun pelimoottori pyytää
ruudun näkymän päivitystä.
"""
haravasto.tyhjaa_ikkuna()
haravasto.aloita_ruutujen_piirto()
for j, rivi in enumerate(tila["nakyva_kentta"]):
for i, ruutu in enumerate(rivi):
haravasto.lisaa_piirrettava_ruutu(ruutu, i * 40, j * 40)
haravasto.piirra_ruudut()
def tulvataytto(kentta, sx, sy):
"""
Sisältää toiminnot, joita tapahtuu, kun kentällä klikkaa eri asioita
"""
safe = []
safe.append((sx, sy)) #Lisää listaan argumentteina annetut kordinaatit
if "x" in kentta[sy][sx]:
tila["nakyva_kentta"] = tila["kentta"]
print("Hävisit pelin")
haravasto.aseta_hiiri_kasittelija(kasittele_hiiri_loppu)
pelin_tiedot["kesto"] = round(time.time() - pelin_tiedot["kesto"], 1)
pelin_tiedot["tulos"] = "Häviö"
tallennus(pelin_tiedot, tiedoston_nimi)
#Tunnistaa pelin hävityksi, jos klikataan ruutua, jossa on miina
#Ottaa pelin tiedot talteen ja tallentaa ne
return
for rr in range(1, 8):
if "{}".format(rr) in kentta[sy][sx]:
tila["nakyva_kentta"][sy][sx] = "{}".format(rr)
tila["tutkittava_kentta"][sy][sx] = "{}".format(rr)
return
#jos on klikattu numeroruutua, laitetaan se näkyviin
#lisätään ruutu myös tutkittavaan kenttään pelin loppumisen tutkimista varten
alkio = 0
while alkio < len(safe):
x, y = safe[alkio]
tila["nakyva_kentta"][y][x] = "0" #Merkitsee ruudun turvalliseksi
tila["tutkittava_kentta"][y][x] = "0"
for i in range(y - 1, y + 2): #Käy läpi alkukordinaattien viereiset ruudut (8 kpl)
for j in range(x - 1, x + 2):
try:
if kentta[i][j] == "0" and (j, i) not in safe and i >= 0 and j >= 0:
safe.append((j, i))
#Lisää avattujen laattojen listaan ruudut, jotka
#1. eivät ole pommeja tai numeroita, 2. eivät ole jo listassa, 3. eivät ole kentän ulkopuolella
for rr in range(1, 8):
if "{}".format(rr) in kentta[i][j] and i >= 0 and j >= 0:
tila["nakyva_kentta"][i][j] = "{}".format(rr)
tila["tutkittava_kentta"][i][j] = "{}".format(rr)
#Lisää avattujen laattojen listaan numero
except IndexError:
continue
alkio += 1
#jos on klikattu tyhjää ruutua, avataan koko kyseinen numeroiden ympäröimä tyhjien ruutujen alue
#lisätään ruutu myös tutkittavaan kenttään pelin loppumisen tutkimista varten
safe = []
def main():
"""
Lataa pelin grafiikat, nollaa pelin tiedot ja tallentaa uuden pelin aikatiedot,
luo peli-ikkunan ja asettaa siihen piirtokäsittelijän.
"""
haravasto.lataa_kuvat("spritet")
nollaa_tiedot()
luo_kentta()
haravasto.luo_ikkuna(kentan_speksit["leveys"] * 40, kentan_speksit["korkeus"] * 40)
haravasto.aseta_piirto_kasittelija(piirra_kentta)
haravasto.aseta_hiiri_kasittelija(kasittele_hiiri)
aikatiedot()
haravasto.aloita()
def numerot():
'''
Asettaa ruutuun numeron sen mukaan, montako pommia sen ympärillä on.
'''
pommit = 0
for a, rivi in enumerate(tila["kentta"]):
for b, ruutu in enumerate(rivi):
#käy kaikki ruudut läpi
for i in range(a - 1, a + 2):
for j in range(b - 1, b + 2):
#tutkii, onko ruudun ympärillä pommia
try:
if tila["kentta"][i][j] == "x" and i >= 0 and j >= 0:
pommit += 1
except IndexError:
continue
except a == i and b == j:
continue
else:
continue
if tila["kentta"][a][b] != "x":
tila["kentta"][a][b] = "{}".format(pommit)
#sijoittaa ruutuun numeron, jos ruudussa ei ole pommia
if tila["kentta"][a][b] == "x":
pommit = 0
#jos ruudussa on pommi, nollataan ruutua ympäröivien pommien lukumäärä
for tt in range(1, 8):
if tila["kentta"][a][b] == "{}".format(tt):
pommit = 0
#jos ruutuun on sijoitettu numero, nollataan ruutua ympäröivien pommien lukumäärä
if __name__ == "__main__":
alkuvalikko()
|
py | b417f4461d1834b296b2decc090add75a19882d5 | import numpy as np
import math
from tetris import Shape, ShapeKind
# NOTE: Suffix IP indicates the operation modifies the data in place
def dropDownByDist_IP(board, shape, direction, x0, dist):
for x, y in shape.getCoords(direction, x0, 0):
board[y + dist, x] = shape.kind
def dropDown_IP(board, shape, direction, x0):
ht = board.shape[0]
dy = ht - 1
for x, y in shape.getCoords(direction, x0, 0):
yy = 0
while yy + y < ht and (yy + y < 0 or board[(y + yy), x] == ShapeKind.NONE.value):
yy += 1
yy -= 1
if yy < dy:
dy = yy
# print("dropDown: shape {0}, direction {1}, x0 {2}, dy {3}".format(shape.shape, direction, x0, dy))
dropDownByDist_IP(board, shape, direction, x0, dy)
def calcNextDropDist(board, nextShape, d0, xRange):
ht = board.shape[0]
res = {}
for x0 in xRange:
if x0 not in res:
res[x0] = ht - 1
for x, y in nextShape.getCoords(d0, x0, 0):
yy = 0
while yy + y < ht and (yy + y < 0 or board[(y + yy), x] == ShapeKind.NONE.value):
yy += 1
yy -= 1
if yy < res[x0]:
res[x0] = yy
return res
def calculateScore(step1Board, nextShape, d1, x1, dropDist):
height,width = step1Board.shape
dropDownByDist_IP(step1Board, nextShape, d1, x1, dropDist[x1])
# Term 1: lines to be removed
fullLines, nearFullLines = 0, 0
roofY = [0] * width
holeCandidates = [0] * width
holeConfirm = [0] * width
vHoles, vBlocks = 0, 0
for y in range(height - 1, -1, -1):
hasHole = False
hasBlock = False
for x in range(width):
if step1Board[y, x] == ShapeKind.NONE.value:
hasHole = True
holeCandidates[x] += 1
else:
hasBlock = True
roofY[x] = height - y
if holeCandidates[x] > 0:
holeConfirm[x] += holeCandidates[x]
holeCandidates[x] = 0
if holeConfirm[x] > 0:
vBlocks += 1
if not hasBlock:
break
if not hasHole and hasBlock:
fullLines += 1
vHoles = sum([x ** .7 for x in holeConfirm])
maxHeight = max(roofY) - fullLines
roofDy = [roofY[i] - roofY[i+1] for i in range(len(roofY) - 1)]
if len(roofY) <= 0:
stdY = 0
else:
stdY = math.sqrt(sum([y ** 2 for y in roofY]) / len(roofY) - (sum(roofY) / len(roofY)) ** 2)
if len(roofDy) <= 0:
stdDY = 0
else:
stdDY = math.sqrt(sum([y ** 2 for y in roofDy]) / len(roofDy) - (sum(roofDy) / len(roofDy)) ** 2)
absDy = sum([abs(x) for x in roofDy])
maxDy = max(roofY) - min(roofY)
score = fullLines * 1.8 - vHoles * 1.0 - vBlocks * 0.5 - maxHeight ** 1.5 * 0.02 \
- stdY * 0.0 - stdDY * 0.01 - absDy * 0.2 - maxDy * 0.3
# print(score, fullLines, vHoles, vBlocks, maxHeight, stdY, stdDY, absDy, roofY, d0, x0, d1, x1)
return score
|
py | b417f4ee0b2135b6e5fc425d599a46b3ffcc8983 | # proxy module
from __future__ import absolute_import
from apptools.preferences.preference_binding import *
|
py | b417f4f7a890ef1e671fb368ed4ddbf076f8e090 | from math import prod
from operator import eq, ne, gt
from numbers import Number
from copy import deepcopy
import sos4hjb.polynomials as poly
class Polynomial:
'''
Polynomial expressed as the linear combination of basis vectors. Written in
such a way that coef_dict never contains an item with value equal to zero.
Attributes
----------
coef_dict : dict (key : BasisVector, value : float)
Dictionary that maps each basis vector to its coefficient.
'''
def __init__(self, coef_dict):
self._verify_vectors(coef_dict.keys())
self.coef_dict = {v: c for v, c in coef_dict.items() if optimistic(c, ne, 0)}
def __getitem__(self, vector):
return self.coef_dict[vector] if vector in self.coef_dict else 0
def __setitem__(self, vector, coef):
if pessimistic(coef, eq, 0):
self.coef_dict.pop(vector, None)
else:
self.coef_dict[vector] = coef
self._verify_vectors(self.vectors())
def __eq__(self, other):
# Comparison with 0 (float) is needed, e.g., in assertAlmostEqual(p, q)
# where p and q are two polynomials. Since what the unittest libray does
# is round(p - q, 7) == 0.
other = Polynomial({}) if other == 0 else other
return self.coef_dict == other.coef_dict
def __ne__(self, other):
return not self == other
def __len__(self):
return len(self.coef_dict)
def __iter__(self):
return iter(self.coef_dict.items())
def __call__(self, evaluation_dict):
return sum(v(evaluation_dict) * c for v, c in self)
def substitute(self, evaluation_dict):
return sum([v.substitute(evaluation_dict) * c for v, c in self], Polynomial({}))
def __pos__(self):
return deepcopy(self)
def __neg__(self):
return Polynomial({v: - c for v, c in self})
def __abs__(self):
return Polynomial({v: abs(c) for v, c in self})
def __round__(self, digits=0):
return Polynomial({v: round(c, digits) for v, c in self})
def __add__(self, other):
if isinstance(other, Polynomial):
vectors = set(self.vectors() + other.vectors())
return Polynomial({v: self[v] + other[v] for v in vectors})
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Polynomial):
for v, c in other:
self[v] += c
return self
else:
return NotImplemented
def __radd__(self, other):
# Defines 0 + self. Useful to use sum() on a list of polynomials.
if pessimistic(other, eq, 0):
return deepcopy(self)
else:
return NotImplemented
def __sub__(self, other):
# Does not use __add__ to avoid the overhead of __neg__.
if isinstance(other, Polynomial):
vectors = set(self.vectors() + other.vectors())
return Polynomial({v: self[v] - other[v] for v in vectors})
else:
return NotImplemented
def __isub__(self, other):
if isinstance(other, Polynomial):
for v, c in other:
self[v] -= c
return self
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, Polynomial):
return sum([(vs * vo) * (cs * co) for vs, cs in self for vo, co in other], Polynomial({}))
else:
# Tries to treat other as a scalar (allows, e.g., symbolic coefficients).
return Polynomial({v: c * other for v, c in self})
def __imul__(self, other):
return self * other
def __rmul__(self, other):
return self * other
def __pow__(self, power):
# poly ** 0 = 1, the case 0 ** 0 is left undefined.
if power == 0:
if len(self) == 0:
raise ValueError('Undefined result for 0 ** 0.')
vector_type = type(self.vectors()[0])
return Polynomial({vector_type({}): 1})
# Fall back to the multiplication method.
return prod([self] * (power - 1), start=self)
def derivative(self, variable):
return sum([v.derivative(variable) * c for v, c in self], Polynomial({}))
def jacobian(self, variables):
return [self.derivative(v) for v in variables]
def integral(self, variable):
return sum([v.integral(variable) * c for v, c in self], Polynomial({}))
def definite_integral(self, variables, lbs, ubs):
if not len(variables) == len(lbs) == len(ubs):
raise ValueError(f'integration range and variables have different lenghts.')
integral = self
for v, lb, ub in zip(variables, lbs, ubs):
integral = integral.integral(v)
integral = integral.substitute({v: ub}) - integral.substitute({v: lb})
return integral
def in_chebyshev_basis(self):
return sum([v.in_chebyshev_basis() * c for v, c in self], Polynomial({}))
def in_monomial_basis(self):
return sum([v.in_monomial_basis() * c for v, c in self], Polynomial({}))
def __repr__(self):
# Represent polynomial as 0 if all the coefficients are 0.
if len(self) == 0:
return '0'
# Add one addend per time to the representation.
r = ''
for vector, coef in self:
# Do not represent plus sign if first addend or negative sign.
if len(r) > 0 and optimistic(coef, gt, 0):
r += '+'
# Just represent the coefficient if vector is 1.
if len(vector) == 0:
r += str(coef)
else:
# Just represent - if coefficient is -1.
if pessimistic(coef, eq, - 1):
r += '-'
# Represent coefficient if different from +1.
elif optimistic(coef, ne, 1):
r += str(coef)
# Add representation of vector.
r += vector.__repr__()
return r
def _repr_latex_(self):
return '$' + self.__repr__() + '$'
def vectors(self):
return list(self.coef_dict)
def variables(self):
return list(set(var for vec in self.vectors() for var in vec.variables()))
def coefficients(self):
return list(self.coef_dict.values())
def degree(self):
return max(v.degree() for v in self.vectors()) if len(self) > 0 else 0
def is_odd(self):
return all(v.is_odd() for v in self.vectors()) if len(self) > 0 else False
def is_even(self):
return all(v.is_even() for v in self.vectors())
# ToDo: find a way to get rid of this method (currently needed bcs of definite_integral).
def to_scalar(self):
if self.degree() > 0:
raise RuntimeError(f'polynomial cannot be converted to scalar, it has degree {self.degree()}.')
return 0 if len(self) == 0 else self.coefficients()[0]
@classmethod
def quadratic_form(cls, basis, Q):
p = cls({})
for i, bi in enumerate(basis):
for j, bj in enumerate(basis[i:]):
j += i
coef = 1 if i == j else 2
p += (bi * bj) * (Q[i, j] * coef)
return p
@staticmethod
def _verify_vectors(vectors):
vector_types = set(type(v) for v in vectors)
if len(vector_types) > 1:
raise TypeError(f'basis vectors must have same type, got {t.__name__ for t in vector_types}.')
elif len(vector_types) == 1:
vector_type = list(vector_types)[0]
if not issubclass(vector_type, poly.BasisVector):
raise TypeError(f'basis vectors must be subclasses of BasisVector, got {vector_type.__name__}')
def pessimistic(a, op, b):
return isinstance(a, Number) and isinstance(b, Number) and op(a, b)
def optimistic(a, op, b):
return not isinstance(a, Number) or not isinstance(b, Number) or op(a, b)
|
py | b417f7461523fdfaf87f59f77e1905c16d77419e | from functools import wraps
from json import dumps
from typing import Collection, Optional
from .ast import Node, OperationType
from .visitor import visit, Visitor
from .block_string import print_block_string
__all__ = ["print_ast"]
def print_ast(ast: Node) -> str:
"""Convert an AST into a string.
The conversion is done using a set of reasonable formatting rules.
"""
return visit(ast, PrintAstVisitor())
def add_description(method):
"""Decorator adding the description to the output of a visitor method."""
@wraps(method)
def wrapped(self, node, *args):
return join([node.description, method(self, node, *args)], "\n")
return wrapped
# noinspection PyMethodMayBeStatic
class PrintAstVisitor(Visitor):
def leave_name(self, node, *_args):
return node.value
def leave_variable(self, node, *_args):
return f"${node.name}"
# Document
def leave_document(self, node, *_args):
return join(node.definitions, "\n\n") + "\n"
def leave_operation_definition(self, node, *_args):
name, op, selection_set = node.name, node.operation, node.selection_set
var_defs = wrap("(", join(node.variable_definitions, ", "), ")")
directives = join(node.directives, " ")
# Anonymous queries with no directives or variable definitions can use the
# query short form.
return (
join([op.value, join([name, var_defs]), directives, selection_set], " ")
if (name or directives or var_defs or op != OperationType.QUERY)
else selection_set
)
def leave_variable_definition(self, node, *_args):
return (
f"{node.variable}: {node.type}"
f"{wrap(' = ', node.default_value)}"
f"{wrap(' ', join(node.directives, ' '))}"
)
def leave_selection_set(self, node, *_args):
return block(node.selections)
def leave_field(self, node, *_args):
return join(
[
wrap("", node.alias, ": ")
+ node.name
+ wrap("(", join(node.arguments, ", "), ")"),
join(node.directives, " "),
node.selection_set,
],
" ",
)
def leave_argument(self, node, *_args):
return f"{node.name}: {node.value}"
# Fragments
def leave_fragment_spread(self, node, *_args):
return f"...{node.name}{wrap(' ', join(node.directives, ' '))}"
def leave_inline_fragment(self, node, *_args):
return join(
[
"...",
wrap("on ", node.type_condition),
join(node.directives, " "),
node.selection_set,
],
" ",
)
def leave_fragment_definition(self, node, *_args):
# Note: fragment variable definitions are experimental and may be changed or
# removed in the future.
return (
f"fragment {node.name}"
f"{wrap('(', join(node.variable_definitions, ', '), ')')}"
f" on {node.type_condition}"
f" {wrap('', join(node.directives, ' '), ' ')}"
f"{node.selection_set}"
)
# Value
def leave_int_value(self, node, *_args):
return node.value
def leave_float_value(self, node, *_args):
return node.value
def leave_string_value(self, node, key, *_args):
if node.block:
return print_block_string(node.value, "" if key == "description" else " ")
return dumps(node.value)
def leave_boolean_value(self, node, *_args):
return "true" if node.value else "false"
def leave_null_value(self, _node, *_args):
return "null"
def leave_enum_value(self, node, *_args):
return node.value
def leave_list_value(self, node, *_args):
return f"[{join(node.values, ', ')}]"
def leave_object_value(self, node, *_args):
return f"{{{join(node.fields, ', ')}}}"
def leave_object_field(self, node, *_args):
return f"{node.name}: {node.value}"
# Directive
def leave_directive(self, node, *_args):
return f"@{node.name}{wrap('(', join(node.arguments, ', '), ')')}"
# Type
def leave_named_type(self, node, *_args):
return node.name
def leave_list_type(self, node, *_args):
return f"[{node.type}]"
def leave_non_null_type(self, node, *_args):
return f"{node.type}!"
# Type System Definitions
def leave_schema_definition(self, node, *_args):
return join(
["schema", join(node.directives, " "), block(node.operation_types)], " "
)
def leave_operation_type_definition(self, node, *_args):
return f"{node.operation.value}: {node.type}"
@add_description
def leave_scalar_type_definition(self, node, *_args):
return join(["scalar", node.name, join(node.directives, " ")], " ")
@add_description
def leave_object_type_definition(self, node, *_args):
return join(
[
"type",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
],
" ",
)
@add_description
def leave_field_definition(self, node, *_args):
args = node.arguments
args = (
wrap("(\n", indent(join(args, "\n")), "\n)")
if has_multiline_items(args)
else wrap("(", join(args, ", "), ")")
)
directives = wrap(" ", join(node.directives, " "))
return f"{node.name}{args}: {node.type}{directives}"
@add_description
def leave_input_value_definition(self, node, *_args):
return join(
[
f"{node.name}: {node.type}",
wrap("= ", node.default_value),
join(node.directives, " "),
],
" ",
)
@add_description
def leave_interface_type_definition(self, node, *_args):
return join(
[
"interface",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
],
" ",
)
@add_description
def leave_union_type_definition(self, node, *_args):
return join(
[
"union",
node.name,
join(node.directives, " "),
"= " + join(node.types, " | ") if node.types else "",
],
" ",
)
@add_description
def leave_enum_type_definition(self, node, *_args):
return join(
["enum", node.name, join(node.directives, " "), block(node.values)], " "
)
@add_description
def leave_enum_value_definition(self, node, *_args):
return join([node.name, join(node.directives, " ")], " ")
@add_description
def leave_input_object_type_definition(self, node, *_args):
return join(
["input", node.name, join(node.directives, " "), block(node.fields)], " "
)
@add_description
def leave_directive_definition(self, node, *_args):
args = node.arguments
args = (
wrap("(\n", indent(join(args, "\n")), "\n)")
if has_multiline_items(args)
else wrap("(", join(args, ", "), ")")
)
repeatable = " repeatable" if node.repeatable else ""
locations = join(node.locations, " | ")
return f"directive @{node.name}{args}{repeatable} on {locations}"
def leave_schema_extension(self, node, *_args):
return join(
["extend schema", join(node.directives, " "), block(node.operation_types)],
" ",
)
def leave_scalar_type_extension(self, node, *_args):
return join(["extend scalar", node.name, join(node.directives, " ")], " ")
def leave_object_type_extension(self, node, *_args):
return join(
[
"extend type",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
],
" ",
)
def leave_interface_type_extension(self, node, *_args):
return join(
[
"extend interface",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
],
" ",
)
def leave_union_type_extension(self, node, *_args):
return join(
[
"extend union",
node.name,
join(node.directives, " "),
"= " + join(node.types, " | ") if node.types else "",
],
" ",
)
def leave_enum_type_extension(self, node, *_args):
return join(
["extend enum", node.name, join(node.directives, " "), block(node.values)],
" ",
)
def leave_input_object_type_extension(self, node, *_args):
return join(
["extend input", node.name, join(node.directives, " "), block(node.fields)],
" ",
)
def join(strings: Optional[Collection[str]], separator: str = "") -> str:
"""Join strings in a given collection.
Return an empty string if it is None or empty, otherwise join all items together
separated by separator if provided.
"""
return separator.join(s for s in strings if s) if strings else ""
def block(strings: Collection[str]) -> str:
"""Return strings inside a block.
Given a collection of strings, return a string with each item on its own line,
wrapped in an indented "{ }" block.
"""
return "{\n" + indent(join(strings, "\n")) + "\n}" if strings else ""
def wrap(start: str, string: str, end: str = "") -> str:
"""Wrap string inside other strings at start and end.
If the string is not None or empty, then wrap with start and end, otherwise return
an empty string.
"""
return f"{start}{string}{end}" if string else ""
def indent(string: str) -> str:
"""Indent string with two spaces.
If the string is not None or empty, add two spaces at the beginning of every line
inside the string.
"""
return " " + string.replace("\n", "\n ") if string else string
def is_multiline(string: str) -> bool:
"""Check whether a string consists of multiple lines."""
return "\n" in string
def has_multiline_items(maybe_list: Optional[Collection[str]]):
"""Check whether one of the items in the list has multiple lines."""
return maybe_list and any(is_multiline(item) for item in maybe_list)
|
py | b417f80a0fa0af2351c66cf57e8f2b7ecee87af4 | import hashlib
import gi
gi.require_version('Gdk', '3.0')
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
class Hash2Extension(Extension):
def __init__(self):
super(Hash2Extension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
argument = (event.get_argument() or '').encode('utf-8')
keyword = event.get_keyword()
# Find the keyword id using the keyword (since the keyword can be changed by users)
for kwId, kw in extension.preferences.items():
if kw == keyword:
keywordId = kwId
# Show the algorithm specified as keyword, or all if the keyword was "hash"
algos = hashlib.algorithms_guaranteed if keywordId == 'hash' else [keywordId]
for algo in algos:
try:
seed = hashlib.new(algo)
seed.update(argument)
hash = seed.hexdigest()
items.append(ExtensionResultItem(icon='icon.svg', name=hash, description=algo, on_enter=CopyToClipboardAction(hash), highlightable=False))
except:
pass
return RenderResultListAction(items)
if __name__ == '__main__':
Hash2Extension().run()
|
py | b417f8577aa9b1b067b868b89be06f28b58f613a | from __future__ import unicode_literals
from .common import InfoExtractor
from .gigya import GigyaBaseIE
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
clean_html,
extract_attributes,
float_or_none,
get_element_by_class,
int_or_none,
merge_dicts,
str_or_none,
strip_or_none,
url_or_none,
urlencode_postdata
)
class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza|dako)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '37b2b7bb9b3dcaa05b67058dc3a714a9',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'mp4',
'title': 'Nachtwacht: De Greystook',
'description': 'Nachtwacht: De Greystook',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
}]
_GEO_BYPASS = False
_HLS_ENTRY_PROTOCOLS_MAP = {
'HLS': 'm3u8_native',
'HLS_AES': 'm3u8',
}
_REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1'
def _real_extract(self, url):
mobj = self._match_valid_url(url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
data = None
if site_id != 'vrtvideo':
# Old API endpoint, serves more formats but may fail for some videos
data = self._download_json(
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
% (site_id, video_id), video_id, 'Downloading asset JSON',
'Unable to download asset JSON', fatal=False)
# New API endpoint
if not data:
headers = self.geo_verification_headers()
headers.update({'Content-Type': 'application/json'})
token = self._download_json(
'%s/tokens' % self._REST_API_BASE, video_id,
'Downloading token', data=b'', headers=headers)['vrtPlayerToken']
data = self._download_json(
'%s/videos/%s' % (self._REST_API_BASE, video_id),
video_id, 'Downloading video JSON', query={
'vrtPlayerToken': token,
'client': '%s@PROD' % site_id,
}, expected_status=400)
if not data.get('title'):
code = data.get('code')
if code == 'AUTHENTICATION_REQUIRED':
self.raise_login_required()
elif code == 'INVALID_LOCATION':
self.raise_geo_restricted(countries=['BE'])
raise ExtractorError(data.get('message') or code, expected=True)
title = data['title']
description = data.get('description')
formats = []
subtitles = {}
for target in data['targetUrls']:
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
if not format_url or not format_type:
continue
format_type = format_type.upper()
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
fmts, subs = self._extract_m3u8_formats_and_subtitles(
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
m3u8_id=format_type, fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
fmts, subs = self._extract_mpd_formats_and_subtitles(
format_url, video_id, mpd_id=format_type, fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
elif format_type == 'HSS':
fmts, subs = self._extract_ism_formats_and_subtitles(
format_url, video_id, ism_id='mss', fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
else:
formats.append({
'format_id': format_type,
'url': format_url,
})
self._sort_formats(formats)
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
subtitle_url = subtitle.get('url')
if subtitle_url and subtitle.get('type') == 'CLOSED':
subtitles.setdefault('nl', []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': video_id,
'title': title,
'description': description,
'formats': formats,
'duration': float_or_none(data.get('duration'), 1000),
'thumbnail': data.get('posterImageUrl'),
'subtitles': subtitles,
}
class CanvasEenIE(InfoExtractor):
IE_DESC = 'canvas.be and een.be'
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
'md5': 'ed66976748d12350b118455979cca293',
'info_dict': {
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
'ext': 'flv',
'title': 'De afspraak veilt voor de Warmste Week',
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 49.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
# with subtitles
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
'info_dict': {
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
'display_id': 'pieter-0167',
'ext': 'mp4',
'title': 'Pieter 0167',
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2553.08,
'subtitles': {
'nl': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True,
},
'skip': 'Pagina niet gevonden',
}, {
'url': 'https://www.een.be/thuis/emma-pakt-thilly-aan',
'info_dict': {
'id': 'md-ast-3a24ced2-64d7-44fb-b4ed-ed1aafbf90b8',
'display_id': 'emma-pakt-thilly-aan',
'ext': 'mp4',
'title': 'Emma pakt Thilly aan',
'description': 'md5:c5c9b572388a99b2690030afa3f3bad7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 118.24,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = strip_or_none(self._search_regex(
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None))
video_id = self._html_search_regex(
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
}
class VrtNUIE(GigyaBaseIE):
IE_DESC = 'VrtNU.be'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/vrtnu/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)'
_TESTS = [{
# Available via old API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1989/postbus-x-s1989a1/',
'info_dict': {
'id': 'pbs-pub-e8713dac-899e-41de-9313-81269f4c04ac$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
'ext': 'mp4',
'title': 'Postbus X - Aflevering 1 (Seizoen 1989)',
'description': 'md5:b704f669eb9262da4c55b33d7c6ed4b7',
'duration': 1457.04,
'thumbnail': r're:^https?://.*\.jpg$',
'series': 'Postbus X',
'season': 'Seizoen 1989',
'season_number': 1989,
'episode': 'De zwarte weduwe',
'episode_number': 1,
'timestamp': 1595822400,
'upload_date': '20200727',
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['is not a supported codec'],
}, {
# Only available via new API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/kamp-waes/1/kamp-waes-s1a5/',
'info_dict': {
'id': 'pbs-pub-0763b56c-64fb-4d38-b95b-af60bf433c71$vid-ad36a73c-4735-4f1f-b2c0-a38e6e6aa7e1',
'ext': 'mp4',
'title': 'Aflevering 5',
'description': 'Wie valt door de mand tijdens een missie?',
'duration': 2967.06,
'season': 'Season 1',
'season_number': 1,
'episode_number': 5,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
}]
_NETRC_MACHINE = 'vrtnu'
_APIKEY = '3_qhEcPa5JGFROVwu5SWKqJ4mVOIkwlFNMSKwzPDAh8QZOtHqu6L4nD5Q7lk0eXOOG'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
auth_info = self._download_json(
'https://accounts.vrt.be/accounts.login', None,
note='Login data', errnote='Could not get Login data',
headers={}, data=urlencode_postdata({
'loginID': username,
'password': password,
'sessionExpiration': '-2',
'APIKey': self._APIKEY,
'targetEnv': 'jssdk',
}))
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
self._request_webpage('https://token.vrt.be/vrtnuinitlogin',
None, note='Requesting XSRF Token', errnote='Could not get XSRF Token',
query={'provider': 'site', 'destination': 'https://www.vrt.be/vrtnu/'})
post_data = {
'UID': auth_info['UID'],
'UIDSignature': auth_info['UIDSignature'],
'signatureTimestamp': auth_info['signatureTimestamp'],
'client_id': 'vrtnu-site',
'_csrf': self._get_cookies('https://login.vrt.be').get('OIDCXSRF').value,
}
self._request_webpage(
'https://login.vrt.be/perform_login',
None, note='Requesting a token', errnote='Could not get a token',
headers={}, data=urlencode_postdata(post_data))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1
self.report_warning('Authentication failed')
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
else:
raise e
else:
break
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
attrs = extract_attributes(self._search_regex(
r'(<nui-media[^>]+>)', webpage, 'media element'))
video_id = attrs['videoid']
publication_id = attrs.get('publicationid')
if publication_id:
video_id = publication_id + '$' + video_id
page = (self._parse_json(self._search_regex(
r'digitalData\s*=\s*({.+?});', webpage, 'digial data',
default='{}'), video_id, fatal=False) or {}).get('page') or {}
info = self._search_json_ld(webpage, display_id, default={})
return merge_dicts(info, {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'season_number': int_or_none(page.get('episode_season')),
})
class DagelijkseKostIE(InfoExtractor):
IE_DESC = 'dagelijksekost.een.be'
_VALID_URL = r'https?://dagelijksekost\.een\.be/gerechten/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://dagelijksekost.een.be/gerechten/hachis-parmentier-met-witloof',
'md5': '30bfffc323009a3e5f689bef6efa2365',
'info_dict': {
'id': 'md-ast-27a4d1ff-7d7b-425e-b84f-a4d227f592fa',
'display_id': 'hachis-parmentier-met-witloof',
'ext': 'mp4',
'title': 'Hachis parmentier met witloof',
'description': 'md5:9960478392d87f63567b5b117688cdc5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 283.02,
},
'expected_warnings': ['is not a supported codec'],
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = strip_or_none(get_element_by_class(
'dish-metadata__title', webpage
) or self._html_search_meta(
'twitter:title', webpage))
description = clean_html(get_element_by_class(
'dish-description', webpage)
) or self._html_search_meta(
('description', 'twitter:description', 'og:description'),
webpage)
video_id = self._html_search_regex(
r'data-url=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/dako/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
}
|
py | b417f85e7c432ea88fe8dffced53ac23e103eb8c | # OpenStreetMap Networkx library to download data from OpenStretMap
import osmnx as ox
# Matplotlib-related stuff, for drawing
from matplotlib.path import Path
from matplotlib import pyplot as plt
from matplotlib.patches import PathPatch
# CV2 & Scipy & Numpy & Pandas
import numpy as np
from numpy.random import choice
# Shapely
from shapely.geometry import *
from shapely.affinity import *
# Geopandas
from geopandas import GeoDataFrame
# etc
import pandas as pd
from functools import reduce
from tabulate import tabulate
from IPython.display import Markdown, display
from collections.abc import Iterable
# Fetch
from .fetch import *
# Helper functions
def get_hash(key):
return frozenset(key.items()) if type(key) == dict else key
# Drawing functions
def show_palette(palette, description = ''):
'''
Helper to display palette in Markdown
'''
colorboxes = [
f''
for c in palette
]
display(Markdown((description)))
display(Markdown(tabulate(pd.DataFrame(colorboxes), showindex = False)))
def get_patch(shape, **kwargs):
'''
Convert shapely object to matplotlib patch
'''
#if type(shape) == Path:
# return patches.PathPatch(shape, **kwargs)
if type(shape) == Polygon and shape.area > 0:
return PolygonPatch(list(zip(*shape.exterior.xy)), **kwargs)
else:
return None
# Plot a single shape
def plot_shape(shape, ax, vsketch = None, **kwargs):
'''
Plot shapely object
'''
if isinstance(shape, Iterable) and type(shape) != MultiLineString:
for shape_ in shape:
plot_shape(shape_, ax, vsketch = vsketch, **kwargs)
else:
if not shape.is_empty:
if vsketch is None:
ax.add_patch(PolygonPatch(shape, **kwargs))
else:
if ('draw' not in kwargs) or kwargs['draw']:
if 'stroke' in kwargs:
vsketch.stroke(kwargs['stroke'])
else:
vsketch.stroke(1)
if 'penWidth' in kwargs:
vsketch.penWidth(kwargs['penWidth'])
else:
vsketch.penWidth(0.3)
if 'fill' in kwargs:
vsketch.fill(kwargs['fill'])
else:
vsketch.noFill()
vsketch.geometry(shape)
# Plot a collection of shapes
def plot_shapes(shapes, ax, vsketch = None, palette = None, **kwargs):
'''
Plot collection of shapely objects (optionally, use a color palette)
'''
if not isinstance(shapes, Iterable):
shapes = [shapes]
for shape in shapes:
if palette is None:
plot_shape(shape, ax, vsketch = vsketch, **kwargs)
else:
plot_shape(shape, ax, vsketch = vsketch, fc = choice(palette), **kwargs)
# Parse query (by coordinates, OSMId or name)
def parse_query(query):
if type(query) == tuple:
return 'coordinates'
elif False:
return 'osmid'
else:
return 'address'
# Apply transformation (translation & scale) to layers
def transform(layers, x, y, scale_x, scale_y, rotation):
# Transform layers (translate & scale)
k, v = zip(*layers.items())
v = GeometryCollection(v)
if (x is not None) and (y is not None):
v = translate(v, *(np.array([x, y]) - np.concatenate(v.centroid.xy)))
if scale_x is not None:
v = scale(v, scale_x, 1)
if scale_y is not None:
v = scale(v, 1, scale_y)
if rotation is not None:
v = rotate(v, rotation)
layers = dict(zip(k, v))
return layers
# Plot
def plot(
# Address
query,
# Whether to use a backup for the layers
backup = None,
# Custom postprocessing function on layers
postprocessing = None,
# Radius (in case of circular plot)
radius = None,
# Which layers to plot
layers = {'perimeter': {}},
# Drawing params for each layer (matplotlib params such as 'fc', 'ec', 'fill', etc.)
drawing_kwargs = {},
# Figure parameters
figsize = (10, 10), ax = None, title = None,
# Vsketch parameters
vsketch = None,
# Transform (translation & scale) params
x = None, y = None, scale_x = None, scale_y = None, rotation = None,
):
# Interpret query
query_mode = parse_query(query)
# Save maximum dilation for later use
dilations = [kwargs['dilate'] for kwargs in layers.values() if 'dilate' in kwargs]
max_dilation = max(dilations) if len(dilations) > 0 else 0
####################
### Fetch Layers ###
####################
# Use backup if provided
if backup is not None:
layers = backup
# Otherwise, fetch layers
else:
# Define base kwargs
if radius:
base_kwargs = {
'point': query if query_mode == 'coordinates' else ox.geocode(query),
'radius': radius
}
else:
by_osmid = False
base_kwargs = {
'perimeter': get_perimeter(query, by_osmid = by_osmid)
}
# Fetch layers
layers = {
layer: get_layer(
layer,
**base_kwargs,
**(kwargs if type(kwargs) == dict else {})
)
for layer, kwargs in layers.items()
}
# Apply transformation to layers (translate & scale)
layers = transform(layers, x, y, scale_x, scale_y, rotation)
# Apply postprocessing step to layers
if postprocessing is not None:
layers = postprocessing(layers)
############
### Plot ###
############
# Matplot-specific stuff (only run if vsketch mode isn't activated)
if vsketch is None:
# Ajust axis
ax.axis('off')
ax.axis('equal')
ax.autoscale()
# Plot background
if 'background' in drawing_kwargs:
xmin, ymin, xmax, ymax = layers['perimeter'].bounds
geom = scale(Polygon([
(xmin, ymin),
(xmin, ymax),
(xmax, ymax),
(xmax, ymin)
]), 2, 2)
if vsketch is None:
ax.add_patch(PolygonPatch(geom, **drawing_kwargs['background']))
else:
vsketch.geometry(geom)
# Adjust bounds
xmin, ymin, xmax, ymax = layers['perimeter'].buffer(max_dilation).bounds
if vsketch is None:
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# Draw layers
for layer, shapes in layers.items():
kwargs = drawing_kwargs[layer] if layer in drawing_kwargs else {}
if 'hatch_c' in kwargs:
# Draw hatched shape
plot_shapes(shapes, ax, vsketch = vsketch, lw = 0, ec = kwargs['hatch_c'], **{k:v for k,v in kwargs.items() if k not in ['lw', 'ec', 'hatch_c']})
# Draw shape contour only
plot_shapes(shapes, ax, vsketch = vsketch, fill = False, **{k:v for k,v in kwargs.items() if k not in ['hatch_c', 'hatch', 'fill']})
else:
# Draw shape normally
plot_shapes(shapes, ax, vsketch = vsketch, **kwargs)
# Return perimeter
return layers
|
py | b417f887e9d3c8d4e6ad5a86b1d0213ce7aa40ac | #!/usr/bin/env python
# To use:
# python setup.py install
from setuptools import setup
setup(name = 'picmistandard',
version = '0.0.14',
description = 'Python base classes for PICMI standard',
platforms = 'any',
packages = ['picmistandard'],
package_dir = {'picmistandard': '.'},
url = 'https://github.com/picmi-standard/picmi'
)
|
py | b417f9c13f855701aeef8119a1976207b63a1d89 | import pyb
class Matrix8x8:
"""
Driver for AdaFruit 8x8 LED Matrix display with HT16K33 backpack.
Example of use:
display = Matrix8x8()
display.set(b'\xFF' * 8) # turn on all LEDs
display.clear() # turn off all LEDs
display.set_row(2, 0xFF) # turn on all LEDs in row 2
display.set_column(3, 0xFF) # turn on all LEDs in column 3
display.set_pixel(7, 6) # turn on LED at row 7, column 6
"""
row_addr = (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E)
def __init__(self, i2c_bus=1, addr=0x70, brightness=15, i2c=None):
"""
Params:
* i2c_bus = I2C bus ID (1 or 2) or None (if param 'i2c' is provided)
* addr = I2C address of connected display
* brightness = display brightness (0 - 15)
* i2c = initialised instance of pyb.I2C object
"""
self._blinking = 0
self.addr = addr
self.buf = bytearray(8)
# I2C init
if i2c:
self.i2c = i2c
else:
self.i2c = pyb.I2C(i2c_bus, pyb.I2C.MASTER, baudrate=400000)
# set HT16K33 oscillator on
self._send(0x21)
self.set_brightness(brightness)
self.clear()
self.on()
def _send(self, data):
"""
Send data over I2C.
"""
self.i2c.send(data, self.addr)
def _send_row(self, row):
"""
Send single row over I2C.
"""
data = bytes((self.row_addr[row], rotate_right(self.buf[row])))
self._send(data)
def _send_buf(self):
"""
Send buffer over I2C.
"""
data = bytearray(16)
i = 1
for byte in self.buf:
data[i] = rotate_right(byte)
i += 2
self._send(data)
def _clear_column(self, column):
"""
Clear column in buffer (set it to 0).
"""
mask = 0x80 >> column
for row in range(8):
if self.buf[row] & mask:
self.buf[row] ^= mask
def _set_column(self, column, byte):
"""
Set column in buffer by byte.
"""
self._clear_column(column)
if byte == 0:
return
mask = 0x80
for row in range(8):
shift = column - row
if shift >= 0:
self.buf[row] |= (byte & mask) >> shift
else:
self.buf[row] |= (byte & mask) << abs(shift)
mask >>= 1
def on(self):
"""
Turn on display.
"""
self.is_on = True
self._send(0x81 | self._blinking << 1)
def off(self):
"""
Turn off display. You can controll display when it's off (change image,
brightness, blinking, ...).
"""
self.is_on = False
self._send(0x80)
def set_brightness(self, value):
"""
Set display brightness. Value from 0 (min) to 15 (max).
"""
self._send(0xE0 | value)
def set_blinking(self, mode):
"""
Set blinking. Modes:
0 - blinking off
1 - blinking at 2Hz
2 - blinking at 1Hz
3 - blinking at 0.5Hz
"""
self._blinking = mode
if self.is_on:
self.on()
def set(self, bitmap):
"""
Show bitmap on display. Bitmap should be 8 bytes/bytearray object or any
iterable object containing 8 bytes (one byte per row).
"""
self.buf = bytearray(bitmap)
self._send_buf()
def clear(self):
"""
Clear display.
"""
for i in range(8):
self.buf[i] = 0
self._send_buf()
def set_row(self, row, byte):
"""
Set row by byte.
"""
self.buf[row] = byte
self._send_row(row)
def clear_row(self, row):
"""
Clear row.
"""
self.set_row(row, 0)
def set_column(self, column, byte):
"""
Set column by byte.
"""
self._set_column(column, byte)
self._send_buf()
def clear_column(self, column):
"""
Clear column.
"""
self._clear_column(column)
self._send_buf()
def set_pixel(self, row, column):
"""
Set (turn on) pixel.
"""
self.buf[row] |= (0x80 >> column)
self._send_row(row)
def clear_pixel(self, row, column):
"""
Clear pixel.
"""
self.buf[row] &= ~(0x80 >> column)
self._send_row(row)
def rotate_right(byte):
"""
Rotate bits right.
"""
byte &= 0xFF
bit = byte & 0x01
byte >>= 1
if(bit):
byte |= 0x80
return byte
|
py | b417fa05e636025bb6f2747ca3e28f6d3dcd43e2 |
import json
ONE_MONTH = 24*30
class Construct_Lacima_Redis_Monitoring(object):
def __init__(self,bc,cd):
bc.add_header_node("REDIS_MONITORING","nano_data_center",properties={})
cd.construct_package("REDIS_MONITORING")
cd.add_redis_stream("REDIS_MONITOR_KEY_STREAM")
cd.add_redis_stream("REDIS_MONITOR_CLIENT_STREAM")
cd.add_redis_stream("REDIS_MONITOR_MEMORY_STREAM")
cd.add_redis_stream("REDIS_MONITOR_CALL_STREAM")
cd.add_redis_stream("REDIS_MONITOR_CMD_TIME_STREAM")
cd.add_redis_stream("REDIS_MONITOR_SERVER_TIME")
cd.close_package_contruction()
bc.end_header_node("REDIS_MONITORING")
#
#
# Add other processes if desired
#
|
py | b417fa3ee6730c84d4e8ef0e3bba9ccca3439089 | import tensorflow as tf
from capsLayer2 import CapsLayer
import math
class Caps2NE(object):
def __init__(self, sequence_length, embedding_size, vocab_size, iter_routing, vec_len_firstCapsLayer,
initialization=[], batch_size=256, num_sampled=256):
# Placeholders for input, output
self.input_x = tf.placeholder(tf.int32, [batch_size, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.int32, [batch_size, 1], name="input_y")
self.sequence_length = sequence_length
self.embedding_size = embedding_size
self.iter_routing = iter_routing
self.num_outputs_firstCapsLayer = sequence_length
self.vec_len_firstCapsLayer = vec_len_firstCapsLayer
self.num_outputs_secondCapsLayer = 1
self.vec_len_secondCapsLayer = embedding_size
self.batch_size = batch_size
self.vocab_size = vocab_size
self.num_sampled = num_sampled
# Embedding layer
with tf.name_scope("input_feature"):
if initialization != []:
self.input_feature = tf.get_variable(name="input_feature_1", initializer=initialization, trainable=False)
else:
self.input_feature = tf.Variable(
tf.random_uniform([vocab_size, vec_len_firstCapsLayer], -math.sqrt(1.0 / vec_len_firstCapsLayer),
math.sqrt(1.0 / vec_len_firstCapsLayer), seed=1234), name="input_feature_2")
self.embedded_chars = tf.nn.embedding_lookup(self.input_feature, self.input_x)
self.X = tf.expand_dims(self.embedded_chars, -1)
self.build_arch()
self.loss()
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=500)
tf.logging.info('Seting up the main structure')
def build_arch(self):
# The first capsule layer
with tf.variable_scope('FirstCaps_layer'):
self.primaryCaps = CapsLayer(num_outputs_firstCapsLayer=self.num_outputs_firstCapsLayer,
num_outputs_secondCapsLayer=self.num_outputs_secondCapsLayer,
vec_len_firstCapsLayer=self.vec_len_firstCapsLayer, vec_len_secondCapsLayer=self.vec_len_secondCapsLayer,
layer_type='FirstCapsule', embedding_size=self.embedding_size,
batch_size=self.batch_size, iter_routing=self.iter_routing)
self.caps1 = self.primaryCaps(self.X)
# assert caps1.get_shape() == [self.batch_size, num_outputs_firstCapsLayer, vec_len_firstCapsLayer, 1]
# The second capsule layer
with tf.variable_scope('SecondCaps_layer'):
self.digitCaps = CapsLayer(num_outputs_firstCapsLayer=self.num_outputs_firstCapsLayer,
num_outputs_secondCapsLayer=self.num_outputs_secondCapsLayer,
vec_len_firstCapsLayer=self.vec_len_firstCapsLayer, vec_len_secondCapsLayer=self.vec_len_secondCapsLayer,
layer_type='NextCapsule', embedding_size=self.embedding_size,
batch_size=self.batch_size, iter_routing=self.iter_routing)
self.caps2 = self.digitCaps(self.caps1)
def loss(self):
self.caps2reshape = tf.reshape(self.caps2, (self.batch_size, self.embedding_size))
with tf.name_scope("embedding"):
self.embedding_matrix = tf.get_variable(
"W", shape=[self.vocab_size, self.embedding_size],
initializer=tf.contrib.layers.xavier_initializer(seed=1234))
self.softmax_biases = tf.Variable(tf.zeros([self.vocab_size]))
self.total_loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=self.embedding_matrix, biases=self.softmax_biases, inputs=self.caps2reshape,
labels=self.input_y, num_sampled=self.num_sampled, num_classes=self.vocab_size))
|
py | b417fac7da311ae01ff071ad19aa389ba76f61ef | import pytest
import numpy as np
from dmtools.arrange import image_grid, border
# -----------
# TEST IMAGES
# -----------
# single white pixel
WHITE_PIXEL = np.array([[1]])
# single white pixel with 2 pixel black border
WHITE_PIXEL_BLACK_BORDER = \
np.array([[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0]])
# four white pixels in 2x2 grid with 1 pixel black border
FOUR_WHITE_PIXEL_GRID = \
np.array([[ 0, 0, 0, 0, 0],
[ 0, 1, 0, 1, 0],
[ 0, 0, 0, 0, 0],
[ 0, 1, 0, 1, 0],
[ 0, 0, 0, 0, 0]])
# single color pixel
COLOR_PIXEL = np.array([[[0.1, 0.5, 0.8]]])
# single color pixel with 1 pixel black border
COLOR_PIXEL_BLACK_BORDER = \
np.array([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.1, 0.5, 0.8], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
# opaque pixel
OPAQUE_PIXEL = np.array([[[0.1, 0.5, 0.8, 0.5]]])
# single opaque pixel with 1 pixel black border
OPAQUE_PIXEL_BLACK_BORDER = \
np.array([[[1, 1, 1, 1], [1.0, 1.0, 1.0, 1.0], [1, 1, 1, 1]],
[[1, 1, 1, 1], [0.1, 0.5, 0.8, 0.5], [1, 1, 1, 1]],
[[1, 1, 1, 1], [1.0, 1.0, 1.0, 1.0], [1, 1, 1, 1]]])
@pytest.mark.parametrize("images,w,h,b,color,result",[
([WHITE_PIXEL], 1, 1, 2, np.array([0]), WHITE_PIXEL_BLACK_BORDER),
([WHITE_PIXEL]*4, 2, 2, 1, np.array([0]), FOUR_WHITE_PIXEL_GRID),
([COLOR_PIXEL], 1, 1, 1, np.array([0,0,0]), COLOR_PIXEL_BLACK_BORDER),
([OPAQUE_PIXEL], 1, 1, 1, None, OPAQUE_PIXEL_BLACK_BORDER)])
def test_image_grid(images, w, h, b, color, result):
assert np.array_equal(result, image_grid(images, w, h, b, color))
@pytest.mark.parametrize("image,b,color,result",[
(WHITE_PIXEL, 2, np.array([0]), WHITE_PIXEL_BLACK_BORDER),
(COLOR_PIXEL, 1, np.array([0,0,0]), COLOR_PIXEL_BLACK_BORDER),
(OPAQUE_PIXEL, 1, None, OPAQUE_PIXEL_BLACK_BORDER)])
def test_border(image, b, color, result):
assert np.array_equal(result, border(image, b, color))
|
py | b417fad09614c258e7b439d95a3248bbe19a522d | # model settings
model = dict(
type='MaskRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['bbox', 'segm'])
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[32, 44])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 48
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/mask_rcnn_r50_fpn_instaboost_4x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | b417fadb38c583568d7a8303fae15139319a7c46 | import sys
from pathlib import Path
if str(Path.cwd()) not in sys.path:
sys.path.insert(0, str(Path.cwd()))
from envs import physics_sim
import numpy as np
import argparse
from cswm.utils import save_list_dict_h5py
parser = argparse.ArgumentParser()
parser.add_argument('--fname', type=str,
default='data',
help='File name / path.')
parser.add_argument('--num-episodes', type=int, default=1000,
help='Number of episodes to generate.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed.')
parser.add_argument('--eval', action='store_true', default=False,
help='Create evaluation set.')
args = parser.parse_args()
np.random.seed(args.seed)
physics_sim.generate_3_body_problem_dataset(
dest=args.fname + '.npz',
train_set_size=args.num_episodes,
valid_set_size=2,
test_set_size=2,
seq_len=12,
img_size=[50, 50],
dt=2.0,
vx0_max=0.5,
vy0_max=0.5,
color=True,
seed=args.seed
)
# data shape: (num_samples, num_steps, x_shape, y_shape, num_channels)
data = np.load(args.fname + '.npz')
train_x = np.concatenate(
(data['train_x'][:, :-1], data['train_x'][:, 1:]), axis=-1)
train_x = np.transpose(train_x, (0, 1, 4, 2, 3)) / 255.
replay_buffer = []
for idx in range(data['train_x'].shape[0]):
sample = {
'obs': train_x[idx, :-1],
'next_obs': train_x[idx, 1:],
'action': np.zeros((train_x.shape[1] - 1), dtype=np.int64)
}
replay_buffer.append(sample)
save_list_dict_h5py(replay_buffer, args.fname)
|
py | b417faff71275174c5bc1f36ae24be9ce74f9b78 | # ===============================================================================================================
# Copyright (c) 2019, Cornell University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the above copyright otice, this list of conditions and
# the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# * Neither the name of Cornell University nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# Author: Kai Zhang ([email protected])
#
# The research is based upon work supported by the Office of the Director of National Intelligence (ODNI),
# Intelligence Advanced Research Projects Activity (IARPA), via DOI/IBC Contract Number D17PC00287.
# The U.S. Government is authorized to reproduce and distribute copies of this work for Governmental purposes.
# ===============================================================================================================
from xml.etree.ElementTree import ElementTree
import dateutil.parser
def parse_meta(xml_file):
rpc_dict = {}
tree = ElementTree()
tree.parse(xml_file)
b = tree.find('IMD/IMAGE/SATID') # WorldView
if b.text not in ['WV01', 'WV02', 'WV03']:
raise ValueError('not a WorldView satellite!')
im = tree.find('RPB/IMAGE')
l = im.find('LINENUMCOEFList/LINENUMCOEF')
rpc_dict['rowNum']= [float(c) for c in l.text.split()]
l = im.find('LINEDENCOEFList/LINEDENCOEF')
rpc_dict['rowDen'] = [float(c) for c in l.text.split()]
l = im.find('SAMPNUMCOEFList/SAMPNUMCOEF')
rpc_dict['colNum'] = [float(c) for c in l.text.split()]
l = im.find('SAMPDENCOEFList/SAMPDENCOEF')
rpc_dict['colDen'] = [float(c) for c in l.text.split()]
# self.inverseBias = float(im.find('ERRBIAS').text)
# scale and offset
rpc_dict['rowOff'] = float(im.find('LINEOFFSET').text)
rpc_dict['rowScale'] = float(im.find('LINESCALE').text)
rpc_dict['colOff'] = float(im.find('SAMPOFFSET').text)
rpc_dict['colScale'] = float(im.find('SAMPSCALE').text)
rpc_dict['latOff'] = float(im.find('LATOFFSET').text)
rpc_dict['latScale'] = float(im.find('LATSCALE').text)
rpc_dict['lonOff'] = float(im.find('LONGOFFSET').text)
rpc_dict['lonScale'] = float(im.find('LONGSCALE').text)
rpc_dict['altOff'] = float(im.find('HEIGHTOFFSET').text)
rpc_dict['altScale'] = float(im.find('HEIGHTSCALE').text)
# meta dict
meta_dict = {'rpc': rpc_dict}
# image dimensions
meta_dict['height'] = int(tree.find('IMD/NUMROWS').text)
meta_dict['width'] = int(tree.find('IMD/NUMCOLUMNS').text)
# date string is in ISO format
meta_dict['capTime'] = dateutil.parser.parse(tree.find('IMD/IMAGE/TLCTIME').text)
# sun direction
meta_dict['sunAzim'] = float(tree.find('IMD/IMAGE/MEANSUNAZ').text)
meta_dict['sunElev'] = float(tree.find('IMD/IMAGE/MEANSUNEL').text)
# satellite direction
meta_dict['satAzim'] = float(tree.find('IMD/IMAGE/MEANSATAZ').text)
meta_dict['satElev'] = float(tree.find('IMD/IMAGE/MEANSATEL').text)
# cloudless or not
meta_dict['cloudCover'] = float(tree.find('IMD/IMAGE/CLOUDCOVER').text)
meta_dict['sensor_id'] = tree.find('IMD/IMAGE/SATID').text
return meta_dict
|
py | b417fbbd245d1f6872a5928eb1d791a938a5e0ff | # Author: Christian Brodbeck <[email protected]>
"""Connect Experiment and Serve
Notes
=====
Experiment:
.locate_trf: return filename & whether it exists or instruction
(func, kwargs) to make it
.make_trf: makes trf-file based on filename and kwargs or sends to
dispatcher
.locate_trfs: find list of TRFs that still have to be made
.make_trf_report: makes report assuming filenames exist
Dispatcher:
.add_job - scans for which trfs still have to be made (requirement)
- saves {report: requirement}
- stores requirement jobs
threaded: - adds jobs to server (loader thread)
- gets results from server, checks off requirements, runs
make_report once requirements are fulfilled
"""
import atexit
from collections import Counter
import fnmatch
import logging
from collections import deque
from os.path import commonprefix, exists
from queue import Queue, Empty
import shutil
import sys
from threading import Lock, Thread
from time import sleep, time
from eelbrain import fmtxt
from eelbrain._utils.com import Notifier
from eelbrain._utils import ask
from eelfarm.server import JobServer, JobServerTerminated
from . import read_job_file
from ._jobs import FuncIterJob
MIN_IO_CYCLE_TIME = 5 # I/O thread
TRF_IRRELEVANT_STATE_KEYS = ('group', 'test', 'test_desc', 'model')
def dict_difference(name1, d1, name2, d2):
if d1 == d2:
return {}
diff = {}
for k in set(d1).union(d2):
if k not in d1:
diff[k] = "only in %s: %r" % (name2, d2[k])
elif k not in d2:
diff[k] = "only in %s: %r" % (name1, d1[k])
elif d1[k] != d2[k]:
diff[k] = "%r != %r" % (d1[k], d2[k])
return diff
def assert_trf_jobs_equal(new_job, old_job):
"List of problem desciptions (or None if there is none)"
if new_job == old_job:
return
problems = []
# since job[0] is the key it is equal
for name, new, old in zip(('fields', 'field_values', 'params'),
new_job.state, old_job.state):
if name == 'field_values':
continue
s_diff = dict_difference('new', new, 'old', old)
if s_diff:
for k, v in s_diff.items():
if k in TRF_IRRELEVANT_STATE_KEYS:
continue
problems.append(" state(%s): %s %s" % (name, k, v))
if new_job.args != old_job.args:
problems.append("args unequal:\n%r\n%r" % (new_job.args, old_job.args))
return problems
def print_traceback(exc_info):
# call_pdb does not work properly from a different thread. Instead, can
# I store the traceback and call ipdb form the main thread, e.g. with
# Dispatcher.debug()?
if not hasattr(print_traceback, 'TRACEBACK_PRINTER'):
from IPython.core.ultratb import VerboseTB
print_traceback.TRACEBACK_PRINTER = VerboseTB()
print_traceback.TRACEBACK_PRINTER(*exc_info)
class Dispatcher:
"""Dispatch jobs to Eelfarm server"""
def __init__(self, host=None, job_queue_length=5, notify=False):
self.server = JobServer(host, job_queue_length)
self.e_lock = Lock() # access to experiment
self.logger = logging.getLogger('eelfarm.dispatcher')
# queues
self._request_queue = Queue()
self._user_jobs = [] # jobs added by the user
self._report_jobs = {} # jobs with unique target report file
self._trf_job_queue = deque()
self._trf_jobs = {}
self._job_finalize_queue = deque() # once all TRFs are present
self._report_queue = Queue() # schedule for report (manual)
# flags
self._auto_make_reports = False
self._shutdown = False
self.terminated = False
# initiate threads
self._thread = Thread(target=self._local_io, name="dispatcher")
self._requeue_thread = Thread(target=self._requeue_failed_jobs, name="requeuer")
if notify:
self._notifier = Notifier(notify, 'Dispatcher')
else:
self._notifier = None
atexit.register(self.shutdown, True)
# make client methods available
self.show_workers = self.server.show_workers
self.blacklist_worker = self.server.blacklist_worker
self.shutdown_worker = self.server.shutdown_worker
def start(self):
self.server.start()
self._thread.start()
self._requeue_thread.start()
if self._notifier:
self.logger.info("Notification to %s", self._notifier.to)
def __repr__(self):
if self.terminated:
info = "successfully terminated"
elif self._shutdown:
info = f"shutting down, waiting for {self.server.n_pending_jobs()} pending TRFs..."
else:
items = [f"{len(self._trf_jobs)} pending TRFs"]
n_reports = self._report_queue.qsize()
if n_reports:
items.append(f"{n_reports} reports ready")
info = ', '.join(items)
return f"<Dispatcher: {info}>"
def add_jobs_from_file(self, filename, priority=False):
"""Add jobs from a job-file to the queue
Parameters
----------
filename : str
Path to the job file.
priority : bool
Insert the jobs at the beginning of the queue (default ``False``).
"""
if self._shutdown:
raise RuntimeError("Dispatcher is shutting down.")
n = 0
with self.e_lock:
for job in read_job_file(filename):
if priority:
job.priority = True
self._request_queue.put(job)
n += 1
self.logger.info("%i jobs requested", n)
def add_jobs_from_iter(self, name, job_factory, priority=False):
"""Add jobs by providing an iterator over ``(path, func)`` pairs
Parameters
----------
name : str
Name by which the job will be known.
job_factory : iterator over (path_like, callable)
Iterator over ``(path, job_loader)`` tuples. A ``job_loader`` is a
function which will load the required data and return the ``job``.
The ``job`` itself is a function, whose return value will be saved at
``path`` (see example). Make sure that most of the data is not
loaded until ``job`` is called.
priority : bool
Insert the jobs at the beginning of the queue (default ``False``).
Examples
--------
Generate a job for each of several subjects::
from functools import partial
import os
from eelbrain import boosting
import trftools
def load_job(subject):
y = load_data_for(subject)
x = load_predictors_for(subject)
return partial(boosting, y, x, tstart=0, tstop=0.500)
def job_factory():
for subject in range(1, 10):
path = f'save/to-{subject}.pickle'
if os.path.exists(path):
continue
job_loader = partial(load_job, subject)
yield path, job_loader
dispatcher = trftools.start_dispatcher()
dispatcher.add_jobs_from_iter('my_job', job_factory())
An example that works, turning strings into uppercase::
def load_job(string):
return partial(str.upper, string)
def job_factory():
for string in ['test', 'this']:
path = f'{string}.pickle'
if os.path.exists(path):
continue
job_loader = partial(load_job, string)
yield path, job_loader
dispatcher.add_jobs_from_iter('my_job', job_factory())
"""
self._request_queue.put(FuncIterJob(name, job_factory, priority))
def _local_io(self):
n_exceptions = n_trf_exceptions = 0
while True:
cycle_start_time = time()
new_results = False
# schedule new jobs (on the same thread to make sure we don't miss
# incoming result files)
n_reports_requested = n_trfs_requested = jobs_processed = n_pending = 0
while not self._shutdown:
try:
job = self._request_queue.get(block=False)
except Empty:
if n_reports_requested:
new_results = True
if n_reports_requested or n_trfs_requested:
self.logger.info("%i requests processed, added %i reports and %i TRFs to queue", jobs_processed, n_reports_requested, n_trfs_requested)
if n_pending:
self.logger.warning("%i TRFs already pending", n_pending)
elif jobs_processed:
self.logger.info("%i requests processed, no new jobs", jobs_processed)
break
jobs_processed += 1
# check whether job already exists
if any(job.is_same(j) for j in self._user_jobs):
continue
# initialize job
try:
with self.e_lock:
job.init_sub_jobs()
# check whether all target files already exist
if not job.test_path and not job.missing_trfs and not job.has_followup_jobs():
continue
except Exception:
n_exceptions += 1
if n_exceptions == 1:
self.logger.error("Error initializing job %r", job)
print_traceback(sys.exc_info())
continue
# file the job for secondary tasks once TRFs are done
self._user_jobs.append(job)
if job.test_path:
# file report job, using the test-path to uniquely identify it
self._report_jobs[job.test_path] = job
if not exists(job.test_path):
n_reports_requested += job.report
# if all TRFs are already available
if not job.trf_jobs:
self._job_finalize_queue.append(job)
# schedule all TRF requests
for trfjob in job.trf_jobs:
if trfjob.path in self._trf_jobs:
problems = assert_trf_jobs_equal(trfjob, self._trf_jobs[trfjob.path])
if problems:
self.logger.warning("Mismatching jobs for %s:\n%s", trfjob.path, '\n'.join(problems))
else:
self._trf_jobs[trfjob.path] = trfjob
if self.server.job_exists(trfjob.path):
n_pending += 1
elif job.priority:
self._trf_job_queue.appendleft(trfjob.path)
else:
self._trf_job_queue.append(trfjob.path)
n_trfs_requested += 1
if n_exceptions:
self.logger.error(f"Ignored {n_exceptions} faulty jobs")
n_exceptions = 0
# put a new TRF-job into the server queue
if self._trf_job_queue and not self._shutdown and not self.server.full():
path = self._trf_job_queue.popleft()
func = trfjob = None
if path not in self._trf_jobs:
self.logger.error("Trying to queue non-existing job: %s", path)
else:
trfjob = self._trf_jobs[path]
try:
with self.e_lock:
func = trfjob.generate_job()
except Exception:
n_trf_exceptions += 1
if n_trf_exceptions == 1:
self.logger.error("Error processing trf-job: %s", path)
print_traceback(sys.exc_info())
# remove jobs related to the failed TRF
del self._trf_jobs[path]
for job in reversed(self._user_jobs):
if path in job.missing_trfs:
self._user_jobs.remove(job)
if job.test_path in self._report_jobs:
del self._report_jobs[job.test_path]
if func is not None:
try:
self.server.put(path, func)
except JobServerTerminated:
self.logger.info("Request rejected, server terminated")
except Exception:
self.logger.exception("Request rejected by server: %s", trfjob.desc)
else:
self.logger.info("Request %s", trfjob.desc)
elif n_trf_exceptions:
self.logger.error(f"Ignored {n_trf_exceptions} faulty trf-jobs")
n_trf_exceptions = 0
# receive all available results
while True:
try:
trf_path = self.server.get(block=False)
except Empty:
break
else:
new_results = True
if trf_path in self._trf_jobs:
trfjob = self._trf_jobs.pop(trf_path)
self.logger.info("Received %s", trfjob.desc)
else:
self.logger.info("Received orphan %s", trf_path)
for job in self._user_jobs:
if job.missing_trfs:
job.missing_trfs.discard(trf_path)
if not job.missing_trfs:
self._job_finalize_queue.append(job)
# finalize jobs which received all TRFs
while self._job_finalize_queue:
job = self._job_finalize_queue.popleft()
if job.test_path:
if job.report and not exists(job.test_path):
self._report_queue.put(job)
if job.has_followup_jobs():
with self.e_lock:
for new_job in job.get_followup_jobs(self.logger):
self._request_queue.put(new_job)
elif self._notifier:
message = fmtxt.FMText([f"All TRFs received for {job.name}.", fmtxt.linebreak])
message.append(self.show_jobs())
if not self._request_queue.empty():
message.append(fmtxt.linebreak)
message.append(fmtxt.linebreak)
message.append("Processing new requests...")
self._notifier.send(f'Job done: {job.name}', message)
if self.server.terminated:
return
# make sure we don't keep checking empty queues
if self.server.full() or not self._trf_job_queue:
cycle_time = time() - cycle_start_time
if cycle_time < MIN_IO_CYCLE_TIME:
sleep(MIN_IO_CYCLE_TIME - cycle_time)
def _find_jobs(self, model: str):
return [job for job in self._user_jobs if fnmatch.fnmatch(job.name, model)]
def cancel_job(self, model: str):
"""Cancel one or several jobs
Parameters
----------
model
Pattern to match jobs by model name. For example, a job's full
model, or '*' to match all jobs.
"""
jobs = self._find_jobs(model)
if not jobs:
raise ValueError(f"{model=}: no job with this model name")
n_removed = 0
for job in jobs:
if job.trf_jobs is None:
continue # (peviously canceled)
for trf_job in job.trf_jobs:
if trf_job.path in self._trf_jobs:
del self._trf_jobs[trf_job.path]
if trf_job.path in self._trf_job_queue:
self._trf_job_queue.remove(trf_job.path)
n_removed += 1
job.cancel()
print(f"{len(jobs)} jobs with {n_removed} TRF-jobs canceled")
def clear_report_queue(self):
"Remove all report requests (but leave TRF-requests)"
while True:
try:
job = self._report_queue.get(False)
if job.test_path in self._report_jobs:
del self._report_jobs[job.test_path]
job.test_path = False
except Empty:
break
def flush(self):
"""Remove finished jobs from list
See Also
--------
clear_report_queue : skip queued report
"""
for job in reversed(self._user_jobs):
if job.missing_trfs:
continue
if job.test_path and not exists(job.test_path):
continue
if job.test_path:
del self._report_jobs[job.test_path]
self._user_jobs.remove(job)
def info(self):
out = fmtxt.Report(f"{self.server.host} ({self.server.ip})", date='%c')
out.append(self.show_workers())
out.append(fmtxt.linebreak)
out.append(self.show_jobs(True))
print(out)
def make_reports(self, block=False, notify=False):
"Make reports with calling thread"
if notify and not self._notifier:
raise ValueError("Can't notify because no notifier is available. Set the notify parameter when initializing the Dispatcher.")
if block:
print("Make all incoming reports; ctrl-c to stop...")
n_made = 0
while True:
try:
job = self._report_queue.get(block, 1000)
except Empty:
if not block:
print("Report queue empty")
if notify and n_made:
self._notifier.send("All queued reports are done.", f"{n_made} reports created.")
return
except KeyboardInterrupt:
break
else:
with self.e_lock:
job.make_test_report()
n_made += 1
sleep(2) # make lock available
def prioritize(self, model: str = None, priority: bool = True):
"""Set the priority of jobs with name matching ``model``
Currently this only affects scheduling of new TRFs, i.e. a change only
takes effect when a model is reduced.
"""
priority = bool(priority)
jobs = self._find_jobs(model)
for job in jobs:
job.priority = priority
def _requeue_failed_jobs(self):
while True:
key = self.server.get_failed(True)
if key is None:
break
elif key in self._trf_jobs:
self._trf_job_queue.appendleft(key)
def remove_broken_worker(self, worker, blacklist=False):
"""Move jobs sent to this worker back into the queue"""
keys = [str(job.path) for job in self.server.remove_broken_worker(worker, blacklist)]
if not keys:
print(f"No jobs found for worker {worker}")
return
n_added = 0
n_skipped = 0
for key in keys:
if key in self._trf_jobs:
self._trf_job_queue.appendleft(key)
n_added += 1
else:
n_skipped += 1
msg = f"{n_added} jobs added back into queue"
if n_skipped:
msg += f"; {n_skipped} unknown jobs skipped"
print(msg)
def remove_broken_jobs(self, pattern):
"""Re-queue jobs for which processing failed
Parameters
----------
pattern : int | str | list
Job model or comparison, job path pattern, or one or more job IDs.
Notes
-----
Move jobs back into the queue based on target filename pattern. Assumes
that the corresponding jobs are not being worked on anymore. Otherwise
they will be received as orphans and overwrite
"""
# check if pattern is a model
if isinstance(pattern, str):
model_jobs = [job for job in self._user_jobs if job.trf_jobs and fnmatch.fnmatch(job.name, pattern)]
else:
model_jobs = None
# find TRF-job keys
if model_jobs:
keys = {trfjob.path for job in model_jobs for trfjob in job.trf_jobs}
keys.intersection_update(job.path for job in self.server.pending_jobs())
keys = list(keys)
else:
keys = self.server.find_jobs(pattern, 'pending')
if not keys:
print("No jobs match pattern")
return
prefix = commonprefix(keys)
t = fmtxt.Table('lll')
t.cells("Job", "Worker", "Orphan")
t.midrule()
t.caption("Common prefix: %r" % (prefix,))
n_prefix = len(prefix)
for key in keys:
desc = key[n_prefix:]
desc = desc if len(desc) < 100 else desc[:97] + '...'
orphan = '' if key in self._trf_jobs else 'x'
t.cells(desc, self.server._jobs[key].worker, orphan)
print(t)
command = ask(f"Remove {len(keys)} jobs?", {'requeue': 'requeue jobs', 'drop': 'drop jobs', 'abort': "don't do anything (default)"}, allow_empty=True)
if command in ('requeue', 'drop'):
n_skipped = n_restarted = 0
self.server.remove_broken_job(keys)
for key in keys:
if key in self._trf_jobs:
if command == 'requeue':
self._trf_job_queue.appendleft(key)
n_restarted += 1
else:
pass # FIXME: remove job properly
else:
n_skipped += 1
print(f"{n_restarted} restarted, {n_skipped} skipped")
def show_jobs(self, trfs=False):
pending_jobs = {job.path: job for job in self.server.pending_jobs()}
priority = len({job.priority for job in self._user_jobs}) > 1
t = fmtxt.Table('lllrrl' + 'l' * priority)
t.cells("Exp.", "Epoch", "Model", "TRFs", "Pending")
if priority:
t.cell("Priority")
t.cell('Report')
t.midrule()
for job in self._user_jobs:
if job.trf_jobs is None:
n_trfs = '<not'
n_missing = 'initialized>'
report = ''
else:
n_trfs = len(job.trf_jobs)
n_missing = len(job.missing_trfs)
if job.test_path:
if exists(job.test_path):
report = '\u2611'
else:
report = '\u2610'
elif job.test_path is False:
report = '\u2612'
else:
report = ''
if isinstance(job, FuncIterJob):
t.cells('<iter>', '')
else:
t.cell(job.experiment.__class__.__name__) # Exp
t.cell(job.options.get('epoch', '')) # Epoch
t.cell(job.name)
t.cell(n_trfs) # TRFs
t.cell(n_missing) # Pending
if priority:
t.cell(job.priority)
t.cell(report)
# TRFs currently being processed
if trfs and job.trf_jobs:
trf_jobs = [j for j in job.trf_jobs if j.path in pending_jobs]
n = Counter(pending_jobs[j.path].worker or 'requested' for j in trf_jobs)
for worker in sorted(n):
t.cells('', '')
t.cell(self.server._worker_info.get(worker, worker), just='r')
t.cells('', n[worker])
t.endline()
return t
def shutdown(self, block=True):
"Schedule regular shutdown, waiting for outstanding results"
if not self._shutdown:
self.logger.info("Initiating shutdown...")
self._shutdown = True
# empty queue
try:
while True:
self._request_queue.get(block=False)
except Empty:
pass
self.server.shutdown(True) # need to join, otherwise will hang
if block:
self.join()
def join(self):
if not self._shutdown:
raise RuntimeError("Can not join before shutting down")
self.server.join()
self._thread.join()
self._requeue_thread.join()
self.terminated = True
self.logger.info("Dispatcher successfully terminated.")
def __del__(self):
if not self.terminated:
self.shutdown(True)
|
py | b417fea3ad14e09cc80a15e4d5b4c0ba24f038fc | import os
import torch
import torch.nn as nn
import onmt.inputters
from collections import deque
from onmt.utils.logging import logger
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.save_checkpoint_steps,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
"""
Base class for model saving operations
Inherited classes must implement private methods:
* `_save`
* `_rm_checkpoint
"""
def __init__(self, base_path, model, model_opt, fields, optim,
save_checkpoint_steps, keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.keep_checkpoint = keep_checkpoint
self.save_checkpoint_steps = save_checkpoint_steps
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def maybe_save(self, step, ppl, acc):
"""
Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0:
return
if step % self.save_checkpoint_steps != 0:
return
chkpt, chkpt_name = self._save(step, ppl, acc)
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step, ppl, acc):
""" Save a resumable checkpoint.
Args:
step (int): step number
Returns:
checkpoint: the saved object
checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError()
def _rm_checkpoint(self, name):
"""
Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
"""
Simple model saver to filesystem
"""
def __init__(self, base_path, model, model_opt, fields, optim,
save_checkpoint_steps, keep_checkpoint=0):
super(ModelSaver, self).__init__(
base_path, model, model_opt, fields, optim,
save_checkpoint_steps, keep_checkpoint)
def _save(self, step, ppl, acc):
real_model = (self.model.module
if isinstance(self.model, nn.DataParallel)
else self.model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': onmt.inputters.save_fields_to_vocab(self.fields),
'opt': self.model_opt,
'optim': self.optim,
}
checkpoint_path = '%s_step_%d_ppl_%.2f_acc_%.2f.pt' % \
(self.base_path, step, ppl, acc)
logger.info("Saving checkpoint %s" % (checkpoint_path))
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name)
|
py | b41800d6af3e7e72c79c39042d11609f3da8fbdb | from discord.ext import commands
class LocaleStore(commands.Cog):
def __init__(self, bot):
self._db = bot.get_cog("PostgreSQL")
async def get(self, ctx: commands.Context):
"""|coro|
Returns a valid locale when given a context.
Parameters
----------
ctx: commands.Context
Context to return a locale for.
Returns
-------
str
Locale for the given context.
"""
async with self._db.pool.acquire() as conn:
locale = await conn.fetchval(
"SELECT locale FROM userdata WHERE id=$1",
ctx.message.author.id,
)
return locale
async def set(self, ctx: commands.Context, locale: str):
"""|coro|
Sets a valid locale for a given context.
Parameters
----------
ctx: discord.abc.Messageable
The context to set prefix for.
data: Dict[str, str]
Dictionary of prefixes.
"""
async with self._db.pool.acquire() as conn:
await conn.execute(
"""
INSERT INTO userdata (id, locale) VALUES ($1, $2)
ON CONFLICT (id) DO UPDATE SET (id, locale) = ($1, $2);
""",
ctx.author.id,
locale,
)
|
py | b41800eebcea7cff60e4e44c47ea96cec32d8c06 | # model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[2],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
use_sigmoid_cls=True,
use_focal_loss=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=5,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=128,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=True),
allowed_border=0,
pos_weight=-1,
smoothl1_beta=1 / 9.0,
debug=False),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/data0/qilei_chen/AI_EYE/BostonAI4DB7/'
img_norm_cfg = dict(
mean=[122.5, 122.5, 122.5], std=[122.5, 122.5, 122.5], to_rgb=True)
img_scale_ = (2048, 2048)
data = dict(
imgs_per_gpu=1,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2014.json',
img_prefix=data_root + 'train2014/',
img_scale=img_scale_,
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2014.json',
img_prefix=data_root + 'val2014/',
img_scale=img_scale_,
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2014.json',
img_prefix=data_root + 'val2014/',
img_scale=img_scale_,
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = data_root+'work_dirs/faster_rcnn_r50_fpn_1x_with_focal_loss_smallset_advance_optdataset'
load_from = None
resume_from = work_dir+'/epoch_2.pth'#'/data0/qilei_chen/AI_EYE/BostonAI4DB9/work_dirs/faster_rcnn_r50_fpn_1x/epoch_2.pth'
workflow = [('train', 1)]
|
py | b418010c3b682019a0b1f11f6e3402dc521bbd2e | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import subprocess
import sys
from typing import List
import pytest
import composer
@pytest.mark.daily
@pytest.mark.parametrize("args", [
["composer", "--version"],
[sys.executable, "-m", "composer", "--version"],
[sys.executable, "-m", "composer.cli", "--version"],
[sys.executable, "-m", "composer.cli.launcher", "--version"],
])
@pytest.mark.timeout(5) # spawning a subprocess is slow
def test_cli_version(args: List[str]):
version_str = subprocess.check_output(args, text=True)
assert version_str == f"composer {composer.__version__}\n"
|
py | b41803ad234e9c8f9b275145a54036769b0710aa | #!/usr/bin/env python
import csv
import sys
import logging
import os
import re
import multiprocessing
from os import environ
from queue import Queue
from runners.helpers import db
def pull_aws_data():
finished = False
offset = 0
limit = 1000000
finished = False
aws_writer = None
with open('aws_inventory.csv', 'w') as fou:
while not finished:
data = db.fetch(
f'''SELECT distinct INSTANCE:InstanceId::string as instance_id
, min(distinct case when value:"Key"='SFROLE' then value:"Value" else NULL end ) as role FROM (
SELECT distinct INSTANCE FROM SNOWALERT.BASE_DATA.EC2_INSTANCE_SNAPSHOTS_T where timestamp > dateadd(day,-30,current_timestamp)
and INSTANCE:"Tags" not like '%{{"Key":"SFROLE","Value":"XP"}}%'
and INSTANCE:"Tags" not like '%{{"Key":"SFROLE","Value":"IMS_PENDING_SHUTDOWN"}}%'
), lateral flatten(input=>INSTANCE:"Tags")
group by instance_id having ROLE != 'XP' AND ROLE != 'IMS_PENDING_SHUTDOWN' limit {limit} offset {offset}'''
)
num_results = 0
for row in data:
num_results += 1
if aws_writer is None:
aws_writer = csv.DictWriter(fou, row.keys())
aws_writer.writeheader()
aws_writer.writerow(row)
if num_results < limit:
finished = True
offset += limit
def grab_osquery_details(deployments):
osquery_schema = environ.get('SECURITY_SCHEMA')
osquery_query = db.fetch("SHOW VIEWS LIKE 'osquery_v' IN {}".format(osquery_schema))
query_text = None
for row in osquery_query:
query_text = row["text"]
query_text = query_text.split('union all')
for query in query_text:
deployments.append(re.findall('from (.*)', query)[0])
def query_snowflake(query):
global writer # , lock
finished = False
offset = 0
limit = 10000000
while not finished:
num_results = 0
query_with_limit = query + " limit %s offset %s" % (limit, offset)
data = db.fetch(query_with_limit)
for row in data:
num_results += 1
# with lock:
if writer is None:
writer = csv.DictWriter(sys.stdout, row.keys())
writer.writeheader()
writer.writerow(row)
if num_results < limit:
finished = True
offset += limit
pull_aws_data()
deployments = []
grab_osquery_details(deployments)
queries = []
for i in deployments:
queries.append(
"select raw:\"columns\":\"path\"::string as process, date_trunc(day, event_time) as day, raw:\"instance_id\" as instance_id, count(*) as hits from {} where event_time >= dateadd(day,-35,current_timestamp) AND event_time < dateadd(minute,-60,current_timestamp) AND NAME like 'process_events' group by 1,2,3 order by DAY, PROCESS, INSTANCE_ID".format(
i
)
)
def init(l):
global lock, writer
lock = l
writer = None
# l = multiprocessing.Lock()
# pool = multiprocessing.Pool(len(deployments),initializer=init, initargs=(l,))
# results = pool.map(query_snowflake, queries)
# pool.close()
# pool.join()
writer = None
for query in queries:
query_snowflake(query)
|
py | b418045a44d681555f5451111c3fef94bf8f3de9 | #-------------------------------------------------------------------------------
# Name: CONNECT 4 GAME
# Purpose: PROJECT FOR GAME DEVELOPMENT IN OOP
#
# Author: GROUP CATACUTAN, PASCUAL, LAURENT, VENERACION
#
# Created: 30/10/2019
# Copyright: (c) XENON_XEIN_XENLY 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import pygame as pg
from pygame.locals import *
from connect_4_images import * # BLIT, RECT, SCREEN
from connect_4_scene_switch import *
from _connect4_logic import *
class OPTIONS1():
def __init__(self):
self.gear_bottom_right = ROTATE(image.GEAR,(1336,610),1.5)
self.gear_bottom = ROTATE(image.GEAR,(1030,740),-1.5)
self.__counter1 = 0
self.options_panelX = 400
self.SOUND = False
self.FULL_SCREEN = False
self.FULL_SCREEN_DISABLED = False
self.SOUND_DISABLED = False
self.click_sound, self.click_full = 0,0
self.BACK = False
self.CURSOR_AVAILABLE = False
self.STARTING = True
self.ENDING = False
self.soundX = 400
self.fullX = 400
self.played_once = 0
self.GEAR_START = False
def reset(self):
self.gear_bottom_right = ROTATE(image.GEAR,(1336,610),1.5)
self.gear_bottom = ROTATE(image.GEAR,(1030,740),-1.5)
self.__counter1 = 0
self.options_panelX = 400
self.SOUND = False
self.FULL_SCREEN = False
self.FULL_SCREEN_DISABLED = False
self.SOUND_DISABLED = False
self.click_sound, self.click_full = 0,0
self.BACK = False
self.CURSOR_AVAILABLE = False
self.STARTING = True
self.ENDING = False
self.soundX = 400
self.fullX = 400
self.played_once = 0
self.GEAR_START = False
def SHOW_OPTIONS_BLUR(self,n):
image.OPTIONS_BLUR.set_alpha(n)
BLIT(image.OPTIONS_BLUR,image.ORIGIN)
def OPTIONS1_event_handler(self):
if self.CURSOR_AVAILABLE:
##----------------------------------------------- checking mouse position
if MOUSE_inside((1087,1219),(243,274)):
self.SOUND = True
else: self.SOUND = False
if MOUSE_inside((1111,1192),(325,403)):
self.FULL_SCREEN = True
else: self.FULL_SCREEN = False
if MOUSE_inside((0,965),(0,718)):
self.BACK = True
else: self.BACK = False
##------------------------------------------------- events
for event in pg.event.get():
if event.type == QUIT:
scenes.create_scene('EXIT DIALOG')
elif event.type == MOUSEBUTTONDOWN:
click = get_MOUSECLICK()
if click[0]:
if self.SOUND:
self.click_sound+=1
self.click_sound%=2
if self.click_sound == 1:
self.SOUND_DISABLED = True
else: self.SOUND_DISABLED = False; print(True)
sounds.toggle_mute('HOME')
elif self.FULL_SCREEN:
self.click_full+=1
self.click_full%=2
if self.click_full == 0:
self.FULL_SCREEN_DISABLED = True
else: self.FULL_SCREEN_DISABLED = False
game_window.toggle_fullscreen()
elif self.BACK:
self.CURSOR_AVAILABLE = False
self.GEAR_START = False
self.STARTING = False; self.ENDING = True
'''elif click[2]:
self.start_OPTIONS1()'''
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.CURSOR_AVAILABLE = False
self.GEAR_START = False
self.STARTING = False; self.ENDING = True
def start_OPTIONS1(self):
self.reset()
while scenes.scene == 'OPTIONS1':
if self.__counter1 < 300:
if self.STARTING:
self.__counter1 += 10
self.SHOW_OPTIONS_BLUR(self.__counter1)
if self.__counter1 == 300:
self.GEAR_START= True
if self.STARTING:
if sounds.GLOBAL_SOUND:
if self.played_once == 1: pass
else: sounds.MOVING_PANEL_SOUND(); self.played_once = 1
else: self.played_once = 1
if self.options_panelX > 0:
self.options_panelX -= 10
if self.soundX > 0:
self.soundX -= 10
if self.fullX > 0:
self.fullX -= 10
if self.ENDING:
if sounds.GLOBAL_SOUND:
if self.played_once == 1: sounds.MOVING_PANEL_SOUND(); self.played_once = 0
else: pass
if self.options_panelX < 400:
self.options_panelX += 10
else:
if scenes.get_previous_SCENE() == 'HOME' or scenes.get_previous_SCENE() == 'EXIT DIALOG': fade_out.start_fade_out(); scenes.create_scene('HOME'); game_window.MAIN_WINDOW.fill(BLACK)
else: fade_out.start_fade_out(); scenes.create_scene(scenes.get_previous_SCENE()); game_window.MAIN_WINDOW.fill(BLACK)
if self.options_panelX == 0: self.CURSOR_AVAILABLE = True
BLIT(image.OPTIONS_PANEL,(self.options_panelX,image.ORIGIN[1]))
if self.SOUND:
BLIT(image.SOUND_GLOW,image.ORIGIN)
if self.FULL_SCREEN:
BLIT(image.FULL_SCREEN_GLOW,image.ORIGIN)
if sounds.get_sound_condition():
if self.ENDING:
if self.soundX < 400:
self.soundX += 10
BLIT(image.SOUND_NON_ACTIVE,(self.soundX,image.ORIGIN[1]))
if game_window.get_screen_condition():
if self.ENDING:
if self.fullX < 400:
self.fullX += 10
BLIT(image.FULL_SCREEN_NON_ACTIVE,(self.fullX,image.ORIGIN[1]))
if self.GEAR_START:
self.gear_bottom_right.show_rotation()
self.gear_bottom.show_rotation()
image._SHADOW()
if self.BACK:
if self.CURSOR_AVAILABLE:
image._CURSOR_BACK()
else:
if self.CURSOR_AVAILABLE:
image._CURSOR_MAIN()
self.OPTIONS1_event_handler()
#print_current_mouse_position()
UPDATE()
def main():
pass
#GEAR POSITION 1: 176,63
options1 = OPTIONS1()
if __name__ == '__main__':
scenes.scene = 'OPTIONS1'
options1.start_OPTIONS1()
|
py | b41804c313e7afc88a7d248490b09cf28416eb92 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl.marker"
_path_str = "scatterpolargl.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scatterpolargl.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py | b418051bc165161665e42e967d2972009c9dba2f | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.ops import DeformConv
from mmdet.core import multi_apply, multiclass_nms
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, ConvModule
INF = 1e8
class FeatureAlign(nn.Module):
"""Feature Alignment Module.
Feature Alignment Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAlign, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(4,
deformable_groups * offset_channels,
1,
bias=False)
self.conv_adaption = DeformConv(in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape)
x = self.relu(self.conv_adaption(x, offset))
return x
@HEADS.register_module
class PPDetHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
base_edge_list=(16, 32, 64, 126, 256),
scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)),
sigma = 0.4,
with_deform=False,
deformable_groups=4,
loss_cls=None,
loss_bbox=None,
conv_cfg=None,
norm_cfg=None):
super(PPDetHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.base_edge_list = base_edge_list
self.scale_ranges = scale_ranges
self.sigma = sigma
self.with_deform = with_deform
self.deformable_groups = deformable_groups
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
# box branch
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.ppdet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
# cls branch
if not self.with_deform:
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.ppdet_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
else:
self.cls_convs.append(
ConvModule(
self.feat_channels,
(self.feat_channels * 4),
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.cls_convs.append(
ConvModule(
(self.feat_channels * 4),
(self.feat_channels * 4),
1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.feature_adaption = FeatureAlign(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deformable_groups=self.deformable_groups)
self.ppdet_cls = nn.Conv2d(
int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.ppdet_cls, std=0.01, bias=bias_cls)
normal_init(self.ppdet_reg, std=0.01)
if self.with_deform:
self.feature_adaption.init_weights()
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.ppdet_reg(reg_feat)
if self.with_deform:
cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp())
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.ppdet_cls(cls_feat)
return cls_score, bbox_pred
def get_points(self, featmap_sizes, dtype, device, flatten=False):
points = []
for featmap_size in featmap_sizes:
x_range = torch.arange(featmap_size[1], dtype=dtype, device=device) + 0.5
y_range = torch.arange(featmap_size[0], dtype=dtype, device=device) + 0.5
y, x = torch.meshgrid(y_range, x_range)
if flatten:
points.append((y.flatten(), x.flatten()))
else:
points.append((y, x))
return points
def loss(self,
cls_scores,
bbox_preds,
gt_bbox_list,
gt_label_list,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in
cls_scores]
points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
all_gt_label_list = torch.cat([x for x in gt_label_list])
temp = torch.tensor(0).cuda()
label_size_list = []
for x in gt_label_list:
label_size_list.append(temp)
temp = torch.tensor(x.size()).cuda() + label_size_list[-1]
label_list, bbox_target_list, gt_ids_list = multi_apply(
self.ppdet_target_single,
gt_bbox_list,
gt_label_list,
label_size_list,
featmap_size_list=featmap_sizes,
point_list=points)
flatten_labels = [
torch.cat([labels_level_img.flatten()
for labels_level_img in labels_level])
for labels_level in zip(*label_list)
]
flatten_bbox_targets = [
torch.cat([bbox_targets_level_img.reshape(-1, 4)
for bbox_targets_level_img in bbox_targets_level])
for bbox_targets_level in zip(*bbox_target_list)
]
flatten_ids = [
torch.cat([gt_ids_level_img.flatten()
for gt_ids_level_img in gt_ids_level])
for gt_ids_level in zip(*gt_ids_list)
]
flatten_labels = torch.cat(flatten_labels)
flatten_bbox_targets = torch.cat(flatten_bbox_targets)
flatten_ids = torch.cat(flatten_ids)
num_imgs = cls_scores[0].size(0)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
pos_inds = (flatten_labels > 0).nonzero().view(-1)
num_pos = len(pos_inds)
neg_inds = (flatten_labels <= 0).nonzero().view(-1)
agg_labels = flatten_labels[neg_inds]
agg_cls_scores = flatten_cls_scores[neg_inds]
num_agg_pos = 0
for i, class_id in enumerate(all_gt_label_list):
aggregation_indices = (flatten_ids == i).nonzero()
if flatten_labels[aggregation_indices].size()[0] != 0:
agg_labels = torch.cat((all_gt_label_list[i:i+1], agg_labels))
agg_cls_scores = torch.cat((flatten_cls_scores[aggregation_indices].mean(dim=0), agg_cls_scores))
num_agg_pos +=1
loss_cls = self.loss_cls(agg_cls_scores, agg_labels, avg_factor=num_agg_pos + num_imgs)
if num_pos > 0:
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_weights = pos_bbox_targets.new_zeros(pos_bbox_targets.size())+1.0
loss_bbox = self.loss_bbox(pos_bbox_preds,
pos_bbox_targets, pos_weights, avg_factor = num_pos)
else:
loss_bbox = torch.tensor([0], dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device)
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox)
def ppdet_target_single(self,
gt_bboxes_raw,
gt_labels_raw,
label_size_list_raw,
featmap_size_list=None,
point_list=None):
gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (
gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
label_list = []
bbox_target_list = []
ids_list = []
for base_len, (lower_bound, upper_bound), stride, featmap_size, (y, x) \
in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, point_list):
labels = gt_labels_raw.new_zeros(featmap_size)
bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], 4) + 1
gt_ids = gt_labels_raw.new_zeros(featmap_size) - 1
hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten()
if len(hit_indices) == 0:
label_list.append(labels)
bbox_target_list.append(torch.log(bbox_targets))
ids_list.append(gt_ids)
continue
_, hit_index_order = torch.sort(-gt_areas[hit_indices])
hit_indices = hit_indices[hit_index_order]
gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride
gt_labels = gt_labels_raw[hit_indices]
half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0])
half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1])
pos_left = torch.ceil(gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long().\
clamp(0, featmap_size[1] - 1)
pos_right = torch.floor(gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long().\
clamp(0, featmap_size[1] - 1)
pos_top = torch.ceil(gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long().\
clamp(0, featmap_size[0] - 1)
pos_down = torch.floor(gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long().\
clamp(0, featmap_size[0] - 1)
for px1, py1, px2, py2, label, gt_id, (gt_x1, gt_y1, gt_x2, gt_y2) in \
zip(pos_left, pos_top, pos_right, pos_down, gt_labels, hit_indices,
gt_bboxes_raw[hit_indices, :]):
labels[py1:py2 + 1, px1:px2 + 1] = label
gt_ids[py1:py2 + 1, px1:px2 + 1] = gt_id + label_size_list_raw
bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = (stride * x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len
bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = (stride * y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len
bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = (gt_x2 - stride * x[py1:py2 + 1, px1:px2 + 1]) / base_len
bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = (gt_y2 - stride * y[py1:py2 + 1, px1:px2 + 1]) / base_len
bbox_targets = bbox_targets.clamp(min=1./16, max=16.)
label_list.append(labels)
bbox_target_list.append(torch.log(bbox_targets))
ids_list.append(gt_ids)
return label_list, bbox_target_list, ids_list
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device, flatten=True)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list, featmap_sizes, points,
img_shape, scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_aug(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device, flatten=True)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single_aug(cls_score_list, bbox_pred_list, featmap_sizes, points,
img_shape, scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single_aug(self,
cls_scores,
bbox_preds,
featmap_sizes,
point_list,
img_shape,
scale_factor,
cfg,
rescale=False, debug=False):
assert len(cls_scores) == len(bbox_preds) == len(point_list)
det_bboxes = []
det_scores = []
for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) in zip(
cls_scores, bbox_preds, featmap_sizes, self.strides, self.base_edge_list, point_list):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp()
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
y = y[topk_inds]
x = x[topk_inds]
x1 = (stride * x - base_len * bbox_pred[:, 0]).clamp(min=0, max=img_shape[1] - 1)
y1 = (stride * y - base_len * bbox_pred[:, 1]).clamp(min=0, max=img_shape[0] - 1)
x2 = (stride * x + base_len * bbox_pred[:, 2]).clamp(min=0, max=img_shape[1] - 1)
y2 = (stride * y + base_len * bbox_pred[:, 3]).clamp(min=0, max=img_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], -1)
det_bboxes.append(bboxes)
det_scores.append(scores)
det_bboxes = torch.cat(det_bboxes)
if rescale:
det_bboxes /= det_bboxes.new_tensor(scale_factor)
det_scores = torch.cat(det_scores)
padding = det_scores.new_zeros(det_scores.shape[0], 1)
det_scores = torch.cat([padding, det_scores], dim=1)
return det_bboxes, det_scores
def get_bboxes_single(self,
cls_scores,
bbox_preds,
featmap_sizes,
point_list,
img_shape,
scale_factor,
cfg,
rescale=False, debug=False):
assert len(cls_scores) == len(bbox_preds) == len(point_list)
det_bboxes = []
det_scores = []
for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) in zip(
cls_scores, bbox_preds, featmap_sizes, self.strides, self.base_edge_list, point_list):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp()
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
y = y[topk_inds]
x = x[topk_inds]
x1 = (stride * x - base_len * bbox_pred[:, 0]).clamp(min=0, max=img_shape[1] - 1)
y1 = (stride * y - base_len * bbox_pred[:, 1]).clamp(min=0, max=img_shape[0] - 1)
x2 = (stride * x + base_len * bbox_pred[:, 2]).clamp(min=0, max=img_shape[1] - 1)
y2 = (stride * y + base_len * bbox_pred[:, 3]).clamp(min=0, max=img_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], -1)
det_bboxes.append(bboxes)
det_scores.append(scores)
det_bboxes = torch.cat(det_bboxes)
if rescale:
det_bboxes /= det_bboxes.new_tensor(scale_factor)
det_scores = torch.cat(det_scores)
padding = det_scores.new_zeros(det_scores.shape[0], 1)
det_scores = torch.cat([padding, det_scores], dim=1)
if debug:
det_bboxes, det_labels = multiclass_nms(
det_bboxes,
det_scores,
cfg['k'],
cfg['agg_thr'],
cfg['score_thr'],
cfg['nms'],
cfg['max_per_img'])
else:
det_bboxes, det_labels = multiclass_nms(
det_bboxes,
det_scores,
cfg.score_thr,
cfg.k,
cfg.agg_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
|
py | b418060bb0ed3821899f5ed228526276b761242b | from .hidden import *
from .convolution import *
from .flatten import *
from .dropout import * |
py | b418064f6ab6b0cbc75073352a0f449cb90c12ca | from .input_layer import Input
from .training import model_utils, optimizers
from .training.graph import context as ctx
from .training.model import Model
|
py | b41806573a547cedafad0db1c3f2b648fc532b45 | ##########################################################################################
# import necessary modules (list of all simulation running modules)
##########################################################################################
import matplotlib.pyplot as plt
from dolfin import *
import os
import sys
import numpy as np
from mshr import *
from scipy import interpolate
#from ufl import *
##########################################################################################
# input text files
##########################################################################################
num = int(sys.argv[1])
mref = 5 #int(sys.argv[2])
flag_quad = 2 # int(sys.argv[3]) # --> if flag_quad == 1,
fname = 'test_data_' + str(num)
input_folder = 'input_data' # location of the bitmaps
data_import = np.loadtxt(input_folder + '/' + fname + '.txt') # <--MNIST data
data = np.zeros(data_import.shape)
for jj in range(0,data.shape[0]):
for kk in range(0,data.shape[1]):
data[jj,kk] = data_import[int(27.0 - kk),jj] #jj is columns of input, kk is rows
folder_name = 'folder' + '_' + fname + '_' + 'mesh' + str(mref) + '_quad' + str(flag_quad)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
##########################################################################################
##########################################################################################
# compliler settings / optimization options
##########################################################################################
parameters["form_compiler"]["cpp_optimize"] = True
parameters["form_compiler"]["representation"] = "uflacs"
parameters["form_compiler"]["quadrature_degree"] = flag_quad
##########################################################################################
################ ~ * ~ * ~ * ~ |
################ ~ * ~ * ~ * ~ | --> before solver loop
################ ~ * ~ * ~ * ~ |
##########################################################################################
# mesh geometry
##########################################################################################
p_1_x = 0; p_1_y = 0;
p_2_x = 28.0; p_2_y = 28.0;
rect = Rectangle(Point(p_1_x,p_1_y),Point(p_2_x,p_2_y))
mesh = generate_mesh(rect,int(28*mref))
##########################################################################################
# mesh and material prop
##########################################################################################
P2 = VectorElement("Lagrange", mesh.ufl_cell(), flag_quad)
TH = P2
W = FunctionSpace(mesh, TH)
V = FunctionSpace(mesh, 'CG', 1)
back = 1.0
high = 100.0
nu = 0.3
material_parameters = {'back':back, 'high':high, 'nu':nu}
def bitmap(x,y): #there could be a much better way to do this, but this is working within the confines of ufl
total = 0
for jj in range(0,data.shape[0]):
for kk in range(0,data.shape[1]):
const1 = conditional(x>=jj,1,0) # x is rows
const2 = conditional(x<jj+1,1,0)
const3 = conditional(y>=kk,1,0) # y is columns
const4 = conditional(y<kk+1,1,0) #less than or equal to?
sum = const1 + const2 + const3 + const4
const = conditional(sum>3,1,0) #ufl equality is not working, would like to make it sum == 4
total += const*data[jj,kk]
return total
class GetMat:
def __init__(self,material_parameters,mesh):
mp = material_parameters
self.mesh = mesh
self.back = mp['back']
self.high = mp['high']
self.nu = mp['nu']
def getFunctionMaterials(self, V):
self.x = SpatialCoordinate(self.mesh)
val = bitmap(self.x[0],self.x[1])
E = val/255.0*(self.high-self.back) + self.back
effectiveMdata = {'E':E, 'nu':self.nu}
return effectiveMdata
mat = GetMat(material_parameters, mesh)
EmatData = mat.getFunctionMaterials(V)
E = EmatData['E']
nu = EmatData['nu']
lmbda, mu = (E*nu/((1.0 + nu )*(1.0-2.0*nu))) , (E/(2*(1+nu)))
matdomain = MeshFunction('size_t',mesh,mesh.topology().dim())
dx = Measure('dx',domain=mesh, subdomain_data=matdomain)
##########################################################################################
# define boundary domains
##########################################################################################
btm = CompiledSubDomain("near(x[1], btmCoord)", btmCoord = p_1_y)
btmBC = DirichletBC(W, Constant((0.0,0.0)), btm)
##########################################################################################
# apply traction, and body forces (boundary conditions are within the solver b/c they update)
##########################################################################################
T = Constant((0.0, 0.0)) # Traction force on the boundary
B = Constant((0.0, 0.0))
##########################################################################################
# define finite element problem
##########################################################################################
u = Function(W)
du = TrialFunction(W)
v = TestFunction(W)
##########################################################################################
##########################################################################################
################ ~ * ~ * ~ * ~ |
################ ~ * ~ * ~ * ~ | --> solver loop and post-processing functions
################ ~ * ~ * ~ * ~ |
##########################################################################################
##########################################################################################
def problem_solve(applied_disp,u,du,v):
# Updated boundary conditions
top = CompiledSubDomain("near(x[1], topCoord)", topCoord = p_2_y)
topBC = DirichletBC(W, Constant((1.0*applied_disp,0.0)), top)
bcs = [btmBC,topBC]
# Kinematics
d = len(u)
I = Identity(d) # Identity tensor
F = I + grad(u) # Deformation gradient
F = variable(F)
psi = 1/2*mu*( inner(F,F) - 3 - 2*ln(det(F)) ) + 1/2*lmbda*(1/2*(det(F)**2 - 1) - ln(det(F)))
f_int = derivative(psi*dx,u,v)
f_ext = derivative( dot(B, u)*dx('everywhere') + dot(T, u)*ds , u, v)
Fboth = f_int - f_ext
# Tangent
dF = derivative(Fboth, u, du)
solve(Fboth == 0, u, bcs, J=dF)
P = diff(psi,F)
S = inv(F)*P
sig = F*S*F.T*((1/det(F))*I)
#vm = sqrt(sig[0,0]*sig[0,0] - sig[0,0]*sig[1,1] + sig[1,1]*sig[1,1] + 3.0*sig[0,1]*sig[0,1])
return u, du, v, f_int, f_ext, psi
to_print = True
def rxn_forces(list_rxn,W,f_int,f_ext):
x_dofs = W.sub(0).dofmap().dofs()
y_dofs = W.sub(1).dofmap().dofs()
f_ext_known = assemble(f_ext)
f_ext_unknown = assemble(f_int) - f_ext_known
dof_coords = W.tabulate_dof_coordinates().reshape((-1, 2))
y_val_min = np.min(dof_coords[:,1]) + 10E-5; y_val_max = np.max(dof_coords[:,1]) - 10E-5
x_top = []; x_btm = []
for kk in x_dofs:
if dof_coords[kk,1] > y_val_max:
x_top.append(kk)
if dof_coords[kk,1] < y_val_min:
x_btm.append(kk)
f_sum_top_x = np.sum(f_ext_unknown[x_top])
f_sum_btm_x = np.sum(f_ext_unknown[x_btm])
y_top = []; y_btm = []
for kk in y_dofs:
if dof_coords[kk,1] > y_val_max:
y_top.append(kk)
if dof_coords[kk,1] < y_val_min:
y_btm.append(kk)
f_sum_top_y = np.sum(f_ext_unknown[y_top])
f_sum_btm_y = np.sum(f_ext_unknown[y_btm])
if to_print:
print("x_top, x_btm rxn force:", f_sum_top_x, f_sum_btm_x)
print("y_top, y_btm rxn force:", f_sum_top_y, f_sum_btm_y)
list_rxn.append([f_sum_top_x,f_sum_btm_x,f_sum_top_y,f_sum_btm_y])
return list_rxn
def pix_centers(u):
disps_all_x = np.zeros((28,28))
disps_all_y = np.zeros((28,28))
for kk in range(0,28):
for jj in range(0,28):
xx = jj + 0.5 # x is columns
yy = kk + 0.5 # y is rows
disps_all_x[kk,jj] = u(xx,yy)[0]
disps_all_y[kk,jj] = u(xx,yy)[1]
return disps_all_x, disps_all_y
def strain_energy(list_psi, psi):
val = assemble(psi*dx)
list_psi.append(val)
return list_psi
def strain_energy_subtract_first(list_psi):
first = list_psi[0]
for kk in range(0,len(list_psi)):
list_psi[kk] = list_psi[kk] - first
return list_psi
# --> set up the loop
fname_paraview = File(folder_name + "/paraview.pvd")
list_rxn = []
list_psi = []
# --> run the loop
disp_val = [0.0, 0.001, 0.01, 0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5]
fname = folder_name + '/pixel_disp'
for dd in range(0,len(disp_val)):
applied_disp = disp_val[dd]
u, du, v, f_int, f_ext, psi = problem_solve(applied_disp,u,du,v)
list_rxn = rxn_forces(list_rxn,W,f_int,f_ext)
#fname_paraview << (u,dd)
disps_all_x, disps_all_y = pix_centers(u)
fn_x = fname + '_step' + str(dd) + '_x.txt'
fn_y = fname + '_step' + str(dd) + '_y.txt'
np.savetxt(fn_x,disps_all_x)
np.savetxt(fn_y,disps_all_y)
list_psi = strain_energy(list_psi, psi)
# --> save paraview file (not necessary most of the time)
# fname_paraview << u
# --> save reaction forces
fname = folder_name + '/rxn_force.txt'
np.savetxt(fname,np.asarray(list_rxn))
# --> save total (delta) potential energy
fname = folder_name + '/strain_energy.txt'
list_psi = strain_energy_subtract_first(list_psi)
np.savetxt(fname, np.asarray(list_psi))
##########################################################################################
##########################################################################################
|
py | b418065ffbd543946ececcb58b46dea6e4630e06 | #!/usr/bin/python
from __future__ import print_function
from __future__ import with_statement
import sys
import re
import os
KNOWN_GROUPS = set([
"Minor bugfix",
"Minor bugfixes",
"Major bugfix",
"Major bugfixes",
"Minor feature",
"Minor features",
"Major feature",
"Major features",
"New system requirements",
"Testing",
"Documentation",
"Code simplification and refactoring",
"Removed features",
"Deprecated features"])
NEEDS_SUBCATEGORIES = set([
"Minor bugfix",
"Minor bugfixes",
"Major bugfix",
"Major bugfixes",
"Minor feature",
"Minor features",
"Major feature",
"Major features",
])
def lintfile(fname):
have_warned = []
def warn(s):
if not have_warned:
have_warned.append(1)
print("{}:".format(fname))
print("\t{}".format(s))
m = re.search(r'(\d{3,})', os.path.basename(fname))
if m:
bugnum = m.group(1)
else:
bugnum = None
with open(fname) as f:
contents = f.read()
if bugnum and bugnum not in contents:
warn("bug number {} does not appear".format(bugnum))
m = re.match(r'^[ ]{2}o ([^\(:]*)([^:]*):', contents)
if not m:
warn("Header not in format expected. (' o Foo:' or ' o Foo (Bar):')")
elif m.group(1).strip() not in KNOWN_GROUPS:
warn("Unrecognized header: %r" % m.group(1))
elif (m.group(1) in NEEDS_SUBCATEGORIES and '(' not in m.group(2)):
warn("Missing subcategory on %r" % m.group(1))
if m:
isBug = ("bug" in m.group(1).lower() or "fix" in m.group(1).lower())
else:
isBug = False
contents = " ".join(contents.split())
if re.search(r'\#\d{2,}', contents):
warn("Don't use a # before ticket numbers. ('bug 1234' not '#1234')")
if isBug and not re.search(r'(\d+)', contents):
warn("Ticket marked as bugfix, but does not mention a number.")
elif isBug and not re.search(r'Fixes ([a-z ]*)bug (\d+)', contents):
warn("Ticket marked as bugfix, but does not say 'Fixes bug XXX'")
if re.search(r'[bB]ug (\d+)', contents):
if not re.search(r'[Bb]ugfix on ', contents):
warn("Bugfix does not say 'bugfix on X.Y.Z'")
elif not re.search('[fF]ixes ([a-z ]*)bug (\d+); bugfix on ',
contents):
warn("Bugfix does not say 'Fixes bug X; bugfix on Y'")
elif re.search('tor-([0-9]+)', contents):
warn("Do not prefix versions with 'tor-'. ('0.1.2', not 'tor-0.1.2'.)")
return have_warned != []
def files(args):
"""Walk through the arguments: for directories, yield their contents;
for files, just yield the files. Only search one level deep, because
that's how the changes directory is laid out."""
for f in args:
if os.path.isdir(f):
for item in os.listdir(f):
if item.startswith("."): #ignore dotfiles
continue
yield os.path.join(f, item)
else:
yield f
if __name__ == '__main__':
problems = 0
for fname in files(sys.argv[1:]):
if fname.endswith("~"):
continue
if lintfile(fname):
problems += 1
if problems:
sys.exit(1)
else:
sys.exit(0)
|
py | b41806cb9ed597a2571bdabb02a2e03ce2f9a7df | """
Day 19 - Monster Messages
"""
from pprint import pprint
import re
def get_search_pattern(rules, rule_number):
return_pattern = ''
for sub_pattern in rules[rule_number]:
if isinstance(sub_pattern, int):
return_pattern += get_search_pattern(rules, sub_pattern)
elif sub_pattern == '|':
return_pattern += ')' + sub_pattern + '('
else:
return_pattern += sub_pattern
if '|' in rules[rule_number]:
return '((' + return_pattern + '))'
return '(' + return_pattern + ')'
return return_pattern
def convert_to_string_or_int(rule_list):
ret_list = []
for rule in rule_list:
if rule[0] == '"':
ret_list.append(rule[1:-1])
else:
try:
ret_list.append(int(rule))
except Exception:
ret_list.append(rule)
return ret_list
def part_one(testfile):
with open(testfile, 'r') as f:
data = f.read()
rule_lines, messages = data.split('\n\n')
rules = {}
for rule_line in rule_lines.split('\n'):
rule_number, rule_line = rule_line.split(':')
rule_list = rule_line.strip().split(' ')
rules[int(rule_number)] = convert_to_string_or_int(rule_list)
# pprint(rules)
search_pattern = '^' + get_search_pattern(rules, 0) + '$'
# pprint(search_pattern)
# pprint(messages)
# Search for valid messages
x = re.findall(search_pattern, messages, re.MULTILINE)
return len(x)
if __name__ == '__main__':
# Part one:
valid_messages = part_one('data.txt')
print(f'valid messages {valid_messages}')
|
py | b418091de8d7f7ab53a9b4476573d116c3caf2a7 | # -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
def test_constructor_cast(self):
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
msg = "The number of dimensions required is 3"
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(10, 2))
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
assert list(p.items) == keys
p = Panel.from_dict(d)
assert list(p.items) == keys
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
assert panel['foo'].values.dtype == np.object_
assert panel['A'].values.dtype == np.float64
def test_constructor_error_msgs(self):
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(4, 5, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(4), lrange(5), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 4, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(4), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 5, 4\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(5), lrange(4))
def test_apply_slabs(self):
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(
lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_fillna(self):
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
pytest.raises(NotImplementedError,
lambda: p.fillna(999, limit=1))
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples(
[(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples(
[(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
[3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'],
['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], [
'y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4],
[-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples(
[(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
def test_filter(self):
pass
def test_shift(self):
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame())
for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_numpy_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(p, out=p)
# removing Panel before NumPy enforces, so just ignore
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_multiindex_get(self):
ind = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.loc['a']
assert (f1.items == [1, 2]).all()
assert (f2.items == [1, 2]).all()
MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_repr_empty(self):
empty = Panel()
repr(empty)
@pytest.mark.parametrize('bad_kwarg, exception, msg', [
# errors must be 'ignore' or 'raise'
({'errors': 'something'}, ValueError, 'The parameter errors must.*'),
({'join': 'inner'}, NotImplementedError, 'Only left join is supported')
])
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
pan = Panel([[[1.5, np.nan, 3.]]])
with pytest.raises(exception, match=msg):
pan.update(pan, **bad_kwarg)
def test_update_raise_on_overlap(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
with pytest.raises(ValueError, match='Data overlaps'):
pan.update(pan, errors='raise')
@pytest.mark.parametrize('raise_conflict', [True, False])
def test_update_deprecation(self, raise_conflict):
pan = Panel([[[1.5, np.nan, 3.]]])
other = Panel([[[]]])
with tm.assert_produces_warning(FutureWarning):
pan.update(other, raise_conflict=raise_conflict)
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
expected = MultiIndex.from_arrays([np.tile([1, 2, 3, 4], 3),
np.repeat([1, 2, 3], 4)],
names=['time', 'panel'])
tm.assert_index_equal(index, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_panel_np_all():
wp = Panel({"A": DataFrame({'b': [1, 2]})})
result = np.all(wp)
assert result == np.bool_(True)
|
py | b41809362e4fde9a5cee79009a87d78352a68d3b | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.8 Python SDK
Pure Storage FlashBlade REST 1.8 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CertificateGroup(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None):
"""
CertificateGroup - a model defined in Swagger
"""
self._id = None
self._name = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""
Gets the id of this CertificateGroup.
A non-modifiable, globally unique ID chosen by the system.
:return: The id of this CertificateGroup.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this CertificateGroup.
A non-modifiable, globally unique ID chosen by the system.
:param id: The id of this CertificateGroup.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this CertificateGroup.
The name of the object (e.g., a file system or snapshot).
:return: The name of this CertificateGroup.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CertificateGroup.
The name of the object (e.g., a file system or snapshot).
:param name: The name of this CertificateGroup.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CertificateGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b4180942a06a4b1a036981bd211fb80ed3454ed8 | def ply(filepath="",
check_existing=True,
filter_glob="*.ply",
use_mesh_modifiers=True,
use_normals=True,
use_uv_coords=True,
use_colors=True,
global_scale=1.0,
axis_forward='Y',
axis_up='Z'):
'''Export a single object as a Stanford PLY with normals, colors and texture coordinates
:param filepath: File Path, Filepath used for exporting the file
:type filepath: string, (optional, never None)
:param check_existing: Check Existing, Check and warn on overwriting existing files
:type check_existing: boolean, (optional)
:param filter_glob: filter_glob
:type filter_glob: string, (optional, never None)
:param use_mesh_modifiers: Apply Modifiers, Apply Modifiers to the exported mesh
:type use_mesh_modifiers: boolean, (optional)
:param use_normals: Normals, Export Normals for smooth and hard shaded faces (hard shaded faces will be exported as individual faces)
:type use_normals: boolean, (optional)
:param use_uv_coords: UVs, Export the active UV layer
:type use_uv_coords: boolean, (optional)
:param use_colors: Vertex Colors, Export the active vertex color layer
:type use_colors: boolean, (optional)
:param global_scale: Scale
:type global_scale: float in [0.01, 1000], (optional)
:param axis_forward: Forward
:type axis_forward: enum in ['X', 'Y', 'Z', '-X', '-Y', '-Z'], (optional)
:param axis_up: Up
:type axis_up: enum in ['X', 'Y', 'Z', '-X', '-Y', '-Z'], (optional)
'''
pass
def stl(filepath="",
check_existing=True,
filter_glob="*.stl",
use_selection=False,
global_scale=1.0,
use_scene_unit=False,
ascii=False,
use_mesh_modifiers=True,
batch_mode='OFF',
axis_forward='Y',
axis_up='Z'):
'''Save STL triangle mesh data from the active object
:param filepath: File Path, Filepath used for exporting the file
:type filepath: string, (optional, never None)
:param check_existing: Check Existing, Check and warn on overwriting existing files
:type check_existing: boolean, (optional)
:param filter_glob: filter_glob
:type filter_glob: string, (optional, never None)
:param use_selection: Selection Only, Export selected objects only
:type use_selection: boolean, (optional)
:param global_scale: Scale
:type global_scale: float in [0.01, 1000], (optional)
:param use_scene_unit: Scene Unit, Apply current scene’s unit (as defined by unit scale) to exported data
:type use_scene_unit: boolean, (optional)
:param ascii: Ascii, Save the file in ASCII file format
:type ascii: boolean, (optional)
:param use_mesh_modifiers: Apply Modifiers, Apply the modifiers before saving
:type use_mesh_modifiers: boolean, (optional)
:param batch_mode: Batch ModeOFF Off, All data in one file.OBJECT Object, Each object as a file.
:type batch_mode: enum in ['OFF', 'OBJECT'], (optional)
:param axis_forward: Forward
:type axis_forward: enum in ['X', 'Y', 'Z', '-X', '-Y', '-Z'], (optional)
:param axis_up: Up
:type axis_up: enum in ['X', 'Y', 'Z', '-X', '-Y', '-Z'], (optional)
'''
pass
|
py | b41809ca7b231e4b2c3e66b85f4cbea25cdf7e9b | import torch
import sys
import types
class VFModule(types.ModuleType):
def __init__(self, name):
super(VFModule, self).__init__(name)
self.vf = torch._C._VariableFunctions
def __getattr__(self, attr):
return getattr(self.vf, attr)
sys.modules[__name__] = VFModule(__name__)
|
py | b41809d359163721ad81202d92a9363709eb9e5d | class Topdown:
def maximum_product_cut(self, N):
self.dp = [[0] * (N+1) for x in range(N)]
self.rope = [x for x in range(1, N)]
for i in range(0, N):
self.dp[i][0] = 1
return self.maximum_product_cut_util(self.rope, N-1, N)
def maximum_product_cut_util(self, rope, n, N):
for i in range(1, n+1):
for j in range(1, N + 1):
if j >= rope[i - 1]:
self.dp[i][j] = max(rope[i-1] * self.dp[i][j - rope[i - 1]], self.dp[i - 1][j])
else:
self.dp[i][j] = self.dp[i - 1][j]
return self.dp[n][N]
if __name__ == '__main__':
N = 10
t = Topdown()
print(t.maximum_product_cut(N))
|
py | b41809e51a71d33c69450a241ed64ae5480a42d6 | import numpy
import math
import bob.ip.base
from .Extractor import Extractor
class DCTBlocks (Extractor):
"""Extracts DCT blocks"""
def __init__(
self,
block_size = 12, # 1 or two parameters for block size
block_overlap = 11, # 1 or two parameters for block overlap
number_of_dct_coefficients = 45,
normalize_blocks = True,
normalize_dcts = True,
auto_reduce_coefficients = False
):
# call base class constructor
Extractor.__init__(
self,
block_size = block_size,
block_overlap = block_overlap,
number_of_dct_coefficients = number_of_dct_coefficients,
normalize_blocks = normalize_blocks,
normalize_dcts = normalize_dcts,
auto_reduce_coefficients = auto_reduce_coefficients
)
# block parameters
self.m_block_size = block_size if isinstance(block_size, (tuple, list)) else (block_size, block_size)
self.m_block_overlap = block_overlap if isinstance(block_overlap, (tuple, list)) else (block_overlap, block_overlap)
self.m_number_of_dct_coefficients = number_of_dct_coefficients
self.norm_block = normalize_blocks
self.norm_dct = normalize_dcts
if self.m_block_size[0] < self.m_block_overlap[0] or self.m_block_size[1] < self.m_block_overlap[1]:
raise ValueError("The overlap '%s' is bigger than the block size '%s'. This won't work. Please check your setup!"%(self.m_block_overlap, self.m_block_size))
if self.m_block_size[0] * self.m_block_size[1] <= self.m_number_of_dct_coefficients:
if auto_reduce_coefficients:
self.m_number_of_dct_coefficients = self.m_block_size[0] * self.m_block_size[1] - 1
else:
raise ValueError("You selected more coefficients %d than your blocks have %d. This won't work. Please check your setup!"%(self.m_number_of_dct_coefficients, self.m_block_size[0] * self.m_block_size[1]))
def __call__(self, image):
"""Computes and returns DCT blocks for the given input image"""
# Initializes cropper and destination array
extractor = bob.ip.base.DCTFeatures(self.m_number_of_dct_coefficients, self.m_block_size, self.m_block_overlap, self.norm_block, self.norm_dct)
# Computes DCT features
return extractor(image)
|
py | b41809fd992fba6f8069a8f7b7b989d9b1855251 | import os.path
import random
import string
import pytest
import pyrebase
def test_wrap_mapping():
data = 1
wrapped = pyrebase.wrap_mapping(data)
assert wrapped['.value'] == 1
data = {'foo': 'bar'}
wrapped = pyrebase.wrap_mapping(data)
assert '.value' not in wrapped
assert data == wrapped
def test_ref():
f = pyrebase.Firebase('https://pyrebase.firebaseIO.com')
assert f.ref == 'https://pyrebase.firebaseIO.com/'
f = pyrebase.Firebase('https://pyrebase.firebaseIO.com/')
assert f.ref == 'https://pyrebase.firebaseIO.com/'
class MockTransport(object):
def __init__(self):
self.mapping = {}
def normalize(self, ref):
if not ref.endswith('/'):
return ref + '/'
return ref
def get(self, ref, params):
try:
data = self.mapping[self.normalize(ref)]
except KeyError:
return
return data
def set(self, ref, params, data):
if pyrebase.is_mapping(data):
if '.priority' in data:
priority_ref = os.path.join(os.path.dirname(self.normalize(ref)), '.priority')
priority = data.pop('.priority')
self.set(priority_ref, params, priority)
if '.value' in data:
data = data['.value']
self.mapping[self.normalize(ref)] = data
return data
def push(self, ref, params, data):
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(16))
pushed_ref = os.path.join(os.path.dirname(ref), name)
self.set(pushed_ref, params, data)
return {'name': name}
def update(self, ref, params, data):
self.mapping[self.normalize(ref)].update(data)
return data
def remove(self, ref, params):
del self.mapping[self.normalize(ref)]
@pytest.fixture(params=[MockTransport()])
def firebase(request):
import pyrebase
return pyrebase.Firebase('https://pyrebase.firebaseIO.com/', transport=request.param)
def test_child(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/'
c = firebase.child('foo/bar')
assert c.ref == 'https://pyrebase.firebaseIO.com/foo/bar/'
c = firebase.child('.')
assert c.ref == 'https://pyrebase.firebaseIO.com/'
c = firebase.child('..')
assert c.ref == 'https://pyrebase.firebaseIO.com/'
c = firebase.child('foo/bar/..')
assert c.ref == 'https://pyrebase.firebaseIO.com/foo/'
c = firebase.child('foo/../bar')
assert c.ref == 'https://pyrebase.firebaseIO.com/bar/'
def test_child_priority(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi').child('.priority')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/.priority/'
def test_nested_child(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi').child('-Izjh72mPJj7xJm0e4kQ')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ/'
c = firebase.child('-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ/'
def test_parent(firebase):
assert firebase.ref == firebase.parent.ref
child = firebase.child('-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ')
parent = child.parent
assert parent.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/'
root = parent.parent
assert root.ref == 'https://pyrebase.firebaseIO.com/'
def test_root(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ')
assert c.root.ref == firebase.ref
def test_prepare_data(firebase):
simple_payload = 'foo'
prepared = firebase.prepare_data(simple_payload, None)
assert prepared == simple_payload
prepared = firebase.prepare_data(simple_payload, 1)
assert prepared['.value'] == 'foo'
assert prepared['.priority'] == 1.0
complex_payload = {'foo': 'bar'}
prepared = firebase.prepare_data(complex_payload, None)
assert prepared == complex_payload
assert '.value' not in prepared
prepared = firebase.prepare_data(complex_payload, 2)
assert '.value' not in prepared
assert prepared['foo'] == 'bar'
assert prepared['.priority'] == 2.0
def test_set(firebase):
assert firebase.set(True) == True
assert firebase.child('bar').set('foo') == 'foo'
assert firebase.set([1, 2, 3]) == [1, 2, 3]
assert firebase.set({'foo': 'bar'}) == {'foo': 'bar'}
assert firebase.set('foo', priority=5) == 'foo'
def test_get(firebase):
firebase.set('foo')
assert firebase.get() == 'foo'
def test_set_priority(firebase):
firebase.set('foo')
assert firebase.set_priority(5) == 5
def test_get_priority(firebase):
firebase.set('foo', priority=5)
assert firebase.get_priority() == 5
def test_push(firebase):
c = firebase.push('foo')
assert c.ref != firebase.ref
assert c.get() == 'foo'
c = firebase.push('bar', priority=3)
assert c.get() == 'bar'
assert c.get_priority() == 3
def test_update(firebase):
firebase.set({'foo': 'bar'})
assert firebase.get() == {'foo': 'bar'}
assert firebase.update({'baz': 'quux'}) == {'baz': 'quux'}
assert firebase.get() == {'foo': 'bar', 'baz': 'quux'}
def test_remove(firebase):
c = firebase.push('foo')
c.remove()
assert c.get() is None
|
py | b4180a5151904d4257f871819a5a8e87605c1b02 | from __future__ import unicode_literals
from django.db import models
class Acl(models.Model):
principaltype = models.CharField(
db_column='PrincipalType',
max_length=25
)
principalid = models.IntegerField(
db_column='PrincipalId'
)
rightname = models.CharField(
db_column='RightName',
max_length=25
)
objecttype = models.CharField(
db_column='ObjectType',
max_length=25
)
objectid = models.IntegerField(
db_column='ObjectId'
)
creator = models.IntegerField(
db_column='Creator'
)
created = models.DateTimeField(
db_column='Created',
blank=True, null=True
)
lastupdatedby = models.IntegerField(
db_column='LastUpdatedBy'
)
lastupdated = models.DateTimeField(
db_column='LastUpdated',
blank=True, null=True
)
class Meta:
managed = False
db_table = 'ACL'
app_label = 'rt_acl'
class Articles(models.Model):
name = models.CharField(db_column='Name', max_length=255)
summary = models.CharField(db_column='Summary', max_length=255)
sortorder = models.IntegerField(db_column='SortOrder')
# Field renamed because it was a Python reserved word.
class_field = models.IntegerField(db_column='Class')
parent = models.IntegerField(db_column='Parent')
uri = models.CharField(db_column='URI', max_length=255, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
disabled = models.IntegerField(db_column='Disabled')
class Meta:
managed = False
db_table = 'Articles'
app_label = 'rt_articles'
class Assets(models.Model):
name = models.CharField(db_column='Name', max_length=255)
catalog = models.IntegerField(db_column='Catalog')
status = models.CharField(db_column='Status', max_length=64)
description = models.CharField(db_column='Description', max_length=255)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'Assets'
app_label = 'rt_assets'
class Attributes(models.Model):
name = models.CharField(db_column='Name', max_length=255, blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
content = models.TextField(db_column='Content', blank=True, null=True)
contenttype = models.CharField(db_column='ContentType', max_length=16, blank=True, null=True)
objecttype = models.CharField(db_column='ObjectType', max_length=64, blank=True, null=True)
objectid = models.IntegerField(db_column='ObjectId', blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'Attributes'
app_label = 'rt_attributes'
class Cachedgroupmembers(models.Model):
groupid = models.IntegerField(db_column='GroupId', blank=True, null=True)
memberid = models.IntegerField(db_column='MemberId', blank=True, null=True)
via = models.IntegerField(db_column='Via', blank=True, null=True)
immediateparentid = models.IntegerField(db_column='ImmediateParentId', blank=True, null=True)
disabled = models.SmallIntegerField(db_column='Disabled')
class Meta:
managed = False
db_table = 'CachedGroupMembers'
app_label = 'rt_cachedgroupmembers'
class Catalogs(models.Model):
name = models.CharField(db_column='Name', max_length=255)
lifecycle = models.CharField(db_column='Lifecycle', max_length=32)
description = models.CharField(db_column='Description', max_length=255)
disabled = models.SmallIntegerField(db_column='Disabled')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'Catalogs'
app_label = 'rt_catalogs'
class Classes(models.Model):
name = models.CharField(db_column='Name', max_length=255)
description = models.CharField(db_column='Description', max_length=255)
sortorder = models.IntegerField(db_column='SortOrder')
disabled = models.IntegerField(db_column='Disabled')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
hotlist = models.IntegerField(db_column='HotList')
class Meta:
managed = False
db_table = 'Classes'
app_label = 'rt_classes'
class Customfieldvalues(models.Model):
customfield = models.IntegerField(db_column='CustomField')
name = models.CharField(db_column='Name', max_length=200, blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
sortorder = models.IntegerField(db_column='SortOrder')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
category = models.CharField(db_column='Category', max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'CustomFieldValues'
app_label = 'rt_customfieldvalues'
class Customfields(models.Model):
name = models.CharField(db_column='Name', max_length=200, blank=True, null=True)
type = models.CharField(db_column='Type', max_length=200, blank=True, null=True)
maxvalues = models.IntegerField(db_column='MaxValues', blank=True, null=True)
pattern = models.TextField(db_column='Pattern', blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
sortorder = models.IntegerField(db_column='SortOrder')
lookuptype = models.CharField(db_column='LookupType', max_length=255)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
disabled = models.SmallIntegerField(db_column='Disabled')
basedon = models.IntegerField(db_column='BasedOn', blank=True, null=True)
rendertype = models.CharField(db_column='RenderType', max_length=64, blank=True, null=True)
valuesclass = models.CharField(db_column='ValuesClass', max_length=64, blank=True, null=True)
entryhint = models.CharField(db_column='EntryHint', max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'CustomFields'
app_label = 'rt_customfields'
class Customroles(models.Model):
name = models.CharField(db_column='Name', max_length=200, blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
maxvalues = models.IntegerField(db_column='MaxValues', blank=True, null=True)
entryhint = models.CharField(db_column='EntryHint', max_length=255, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
disabled = models.SmallIntegerField(db_column='Disabled')
class Meta:
managed = False
db_table = 'CustomRoles'
app_label = 'rt_customroles'
class FmArticles(models.Model):
name = models.CharField(db_column='Name', max_length=255)
summary = models.CharField(db_column='Summary', max_length=255)
sortorder = models.IntegerField(db_column='SortOrder')
# Field renamed because it was a Python reserved word.
class_field = models.IntegerField(db_column='Class')
parent = models.IntegerField(db_column='Parent')
uri = models.CharField(db_column='URI', max_length=255, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'FM_Articles'
app_label = 'rt_fmarticles'
class FmClasses(models.Model):
name = models.CharField(db_column='Name', max_length=255)
description = models.CharField(db_column='Description', max_length=255)
sortorder = models.IntegerField(db_column='SortOrder')
disabled = models.IntegerField(db_column='Disabled')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
hotlist = models.IntegerField(db_column='HotList')
class Meta:
managed = False
db_table = 'FM_Classes'
app_label = 'rt_fmclasses'
class FmObjecttopics(models.Model):
topic = models.IntegerField(db_column='Topic')
objecttype = models.CharField(db_column='ObjectType', max_length=64)
objectid = models.IntegerField(db_column='ObjectId')
class Meta:
managed = False
db_table = 'FM_ObjectTopics'
app_label = 'rt_fmobjects'
class FmTopics(models.Model):
parent = models.IntegerField(db_column='Parent')
name = models.CharField(db_column='Name', max_length=255)
description = models.CharField(db_column='Description', max_length=255)
objecttype = models.CharField(db_column='ObjectType', max_length=64)
objectid = models.IntegerField(db_column='ObjectId')
class Meta:
managed = False
db_table = 'FM_Topics'
app_label = 'rt_fmtopics'
class Groupmembers(models.Model):
groupid = models.IntegerField(db_column='GroupId')
memberid = models.IntegerField(db_column='MemberId')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'GroupMembers'
app_label = 'rt_groupmembers'
unique_together = (('groupid', 'memberid'),)
class Groups(models.Model):
name = models.CharField(db_column='Name', max_length=200, blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
domain = models.CharField(db_column='Domain', max_length=64, blank=True, null=True)
instance = models.IntegerField(db_column='Instance', blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'Groups'
app_label = 'rt_groups'
class Links(models.Model):
base = models.CharField(db_column='Base', max_length=240, blank=True, null=True)
target = models.CharField(db_column='Target', max_length=240, blank=True, null=True)
type = models.CharField(db_column='Type', max_length=20)
localtarget = models.IntegerField(db_column='LocalTarget')
localbase = models.IntegerField(db_column='LocalBase')
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
class Meta:
managed = False
db_table = 'Links'
app_label = 'rt_links'
class Objectclasses(models.Model):
# Field renamed because it was a Python reserved word.
class_field = models.IntegerField(db_column='Class')
objecttype = models.CharField(db_column='ObjectType', max_length=255)
objectid = models.IntegerField(db_column='ObjectId')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'ObjectClasses'
app_label = 'rt_objectclasses'
class Objectcustomfieldvalues(models.Model):
customfield = models.IntegerField(db_column='CustomField')
objecttype = models.CharField(db_column='ObjectType', max_length=255)
objectid = models.IntegerField(db_column='ObjectId')
sortorder = models.IntegerField(db_column='SortOrder')
content = models.CharField(db_column='Content', max_length=255, blank=True, null=True)
largecontent = models.TextField(db_column='LargeContent', blank=True, null=True)
contenttype = models.CharField(db_column='ContentType', max_length=80, blank=True, null=True)
contentencoding = models.CharField(db_column='ContentEncoding', max_length=80, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
disabled = models.SmallIntegerField(db_column='Disabled')
class Meta:
managed = False
db_table = 'ObjectCustomFieldValues'
app_label = 'rt_objectcustomfieldvalues'
class Objectcustomfields(models.Model):
customfield = models.IntegerField(db_column='CustomField')
objectid = models.IntegerField(db_column='ObjectId')
sortorder = models.IntegerField(db_column='SortOrder')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'ObjectCustomFields'
app_label = 'rt_objectcustomfields'
class Objectcustomroles(models.Model):
customrole = models.IntegerField(db_column='CustomRole')
objectid = models.IntegerField(db_column='ObjectId')
sortorder = models.IntegerField(db_column='SortOrder')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'ObjectCustomRoles'
app_label = 'rt_objectcustomroles'
unique_together = (('objectid', 'customrole'),)
class Objectscrips(models.Model):
scrip = models.IntegerField(db_column='Scrip')
stage = models.CharField(db_column='Stage', max_length=32)
objectid = models.IntegerField(db_column='ObjectId')
sortorder = models.IntegerField(db_column='SortOrder')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'ObjectScrips'
app_label = 'rt_objectcscripts'
unique_together = (('objectid', 'scrip'),)
class Objecttopics(models.Model):
topic = models.IntegerField(db_column='Topic')
objecttype = models.CharField(db_column='ObjectType', max_length=64)
objectid = models.IntegerField(db_column='ObjectId')
class Meta:
managed = False
db_table = 'ObjectTopics'
app_label = 'rt_objectctopics'
class Principals(models.Model):
principaltype = models.CharField(db_column='PrincipalType', max_length=16)
disabled = models.SmallIntegerField(db_column='Disabled')
class Meta:
managed = False
db_table = 'Principals'
app_label = 'rt_principals'
class Queues(models.Model):
name = models.CharField(db_column='Name', unique=True, max_length=200)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
correspondaddress = models.CharField(db_column='CorrespondAddress', max_length=120, blank=True, null=True)
commentaddress = models.CharField(db_column='CommentAddress', max_length=120, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
disabled = models.SmallIntegerField(db_column='Disabled')
subjecttag = models.CharField(db_column='SubjectTag', max_length=120, blank=True, null=True)
lifecycle = models.CharField(db_column='Lifecycle', max_length=32, blank=True, null=True)
sortorder = models.IntegerField(db_column='SortOrder')
sladisabled = models.SmallIntegerField(db_column='SLADisabled')
class Meta:
managed = False
db_table = 'Queues'
app_label = 'rt_queues'
class Rtxassets(models.Model):
name = models.CharField(db_column='Name', max_length=255)
catalog = models.IntegerField(db_column='Catalog')
status = models.CharField(db_column='Status', max_length=64)
description = models.CharField(db_column='Description', max_length=255)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'RTxAssets'
app_label = 'rt_rtxassets'
class Rtxcatalogs(models.Model):
name = models.CharField(db_column='Name', max_length=255)
lifecycle = models.CharField(db_column='Lifecycle', max_length=32)
description = models.CharField(db_column='Description', max_length=255)
disabled = models.SmallIntegerField(db_column='Disabled')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'RTxCatalogs'
app_label = 'rt_rtxcatalogs'
class Scripactions(models.Model):
name = models.CharField(db_column='Name', max_length=200, blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
execmodule = models.CharField(db_column='ExecModule', max_length=60, blank=True, null=True)
argument = models.CharField(db_column='Argument', max_length=255, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'ScripActions'
app_label = 'rt_scriptactions'
class Scripconditions(models.Model):
name = models.CharField(db_column='Name', max_length=200, blank=True, null=True)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
execmodule = models.CharField(db_column='ExecModule', max_length=60, blank=True, null=True)
argument = models.CharField(db_column='Argument', max_length=255, blank=True, null=True)
applicabletranstypes = models.CharField(db_column='ApplicableTransTypes', max_length=60, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
class Meta:
managed = False
db_table = 'ScripConditions'
app_label = 'rt_scriptconditions'
class Scrips(models.Model):
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
scripcondition = models.IntegerField(db_column='ScripCondition')
scripaction = models.IntegerField(db_column='ScripAction')
customisapplicablecode = models.TextField(db_column='CustomIsApplicableCode', blank=True, null=True)
custompreparecode = models.TextField(db_column='CustomPrepareCode', blank=True, null=True)
customcommitcode = models.TextField(db_column='CustomCommitCode', blank=True, null=True)
template = models.CharField(db_column='Template', max_length=200)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
disabled = models.SmallIntegerField(db_column='Disabled')
class Meta:
managed = False
db_table = 'Scrips'
app_label = 'rt_scripts'
class Templates(models.Model):
queue = models.IntegerField(db_column='Queue')
name = models.CharField(db_column='Name', max_length=200)
description = models.CharField(db_column='Description', max_length=255, blank=True, null=True)
type = models.CharField(db_column='Type', max_length=16, blank=True, null=True)
content = models.TextField(db_column='Content', blank=True, null=True)
lastupdated = models.DateTimeField(db_column='LastUpdated', blank=True, null=True)
lastupdatedby = models.IntegerField(db_column='LastUpdatedBy')
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
class Meta:
managed = False
db_table = 'Templates'
app_label = 'rt_templates'
class Topics(models.Model):
parent = models.IntegerField(db_column='Parent')
name = models.CharField(db_column='Name', max_length=255)
description = models.CharField(db_column='Description', max_length=255)
objecttype = models.CharField(db_column='ObjectType', max_length=64)
objectid = models.IntegerField(db_column='ObjectId')
class Meta:
managed = False
db_table = 'Topics'
app_label = 'rt_topics'
class Transactions(models.Model):
objecttype = models.CharField(db_column='ObjectType', max_length=64)
objectid = models.IntegerField(db_column='ObjectId')
timetaken = models.IntegerField(db_column='TimeTaken')
type = models.CharField(db_column='Type', max_length=20, blank=True, null=True)
field = models.CharField(db_column='Field', max_length=40, blank=True, null=True)
oldvalue = models.CharField(db_column='OldValue', max_length=255, blank=True, null=True)
newvalue = models.CharField(db_column='NewValue', max_length=255, blank=True, null=True)
referencetype = models.CharField(db_column='ReferenceType', max_length=255, blank=True, null=True)
oldreference = models.IntegerField(db_column='OldReference', blank=True, null=True)
newreference = models.IntegerField(db_column='NewReference', blank=True, null=True)
data = models.CharField(db_column='Data', max_length=255, blank=True, null=True)
creator = models.IntegerField(db_column='Creator')
created = models.DateTimeField(db_column='Created', blank=True, null=True)
class Meta:
managed = False
db_table = 'Transactions'
app_label = 'rt_transactions'
class Users(models.Model):
name = models.CharField(
db_column='Name',
unique=True,
max_length=200
)
password = models.CharField(
db_column='Password',
max_length=256,
blank=True, null=True
)
comments = models.TextField(
db_column='Comments',
blank=True, null=True
)
signature = models.TextField(
db_column='Signature',
blank=True, null=True
)
emailaddress = models.CharField(
db_column='EmailAddress',
max_length=120,
blank=True, null=True
)
freeformcontactinfo = models.TextField(
db_column='FreeformContactInfo',
blank=True, null=True
)
organization = models.CharField(
db_column='Organization',
max_length=200,
blank=True, null=True
)
realname = models.CharField(
db_column='RealName',
max_length=120,
blank=True, null=True
)
nickname = models.CharField(
db_column='NickName',
max_length=16,
blank=True, null=True
)
lang = models.CharField(
db_column='Lang',
max_length=16,
blank=True, null=True
)
gecos = models.CharField(
db_column='Gecos',
max_length=16,
blank=True, null=True
)
homephone = models.CharField(
db_column='HomePhone',
max_length=30,
blank=True, null=True
)
workphone = models.CharField(
db_column='WorkPhone',
max_length=30,
blank=True, null=True
)
mobilephone = models.CharField(
db_column='MobilePhone',
max_length=30,
blank=True, null=True
)
pagerphone = models.CharField(
db_column='PagerPhone',
max_length=30,
blank=True, null=True
)
address1 = models.CharField(
db_column='Address1',
max_length=200,
blank=True, null=True
)
address2 = models.CharField(
db_column='Address2',
max_length=200,
blank=True, null=True
)
city = models.CharField(
db_column='City',
max_length=100,
blank=True, null=True
)
state = models.CharField(
db_column='State',
max_length=100,
blank=True, null=True
)
postal_code = models.CharField(
db_column='Zip',
max_length=16,
blank=True, null=True
)
country = models.CharField(
db_column='Country',
max_length=50,
blank=True, null=True
)
timezone = models.CharField(
db_column='Timezone',
max_length=50,
blank=True, null=True
)
creator = models.IntegerField(
db_column='Creator',
unique=True
)
created = models.DateTimeField(
db_column='Created',
blank=True, null=True
)
lastupdatedby = models.IntegerField(
db_column='LastUpdatedBy'
)
lastupdated = models.DateTimeField(
db_column='LastUpdated',
blank=True, null=True
)
authtoken = models.CharField(
db_column='AuthToken',
max_length=16,
blank=True, null=True
)
smimecertificate = models.TextField(
db_column='SMIMECertificate',
blank=True, null=True
)
class Meta:
managed = False
db_table = 'Users'
app_label = 'rt_users'
def __unicode__(self):
'''
Default data for display
'''
return "{}: {}".format(
self.name,
self.emailaddress
)
class Tickets(models.Model):
effectiveid = models.IntegerField(
db_column='EffectiveId'
)
queue = models.IntegerField(
db_column='Queue'
)
ticket_type = models.CharField(
db_column='Type',
max_length=16,
blank=True, null=True
)
owner = models.IntegerField(
db_column='Owner'
)
subject = models.CharField(
db_column='Subject',
max_length=200,
blank=True, null=True
)
initialpriority = models.IntegerField(
db_column='InitialPriority'
)
finalpriority = models.IntegerField(
db_column='FinalPriority'
)
priority = models.IntegerField(
db_column='Priority'
)
timeestimated = models.IntegerField(
db_column='TimeEstimated'
)
timeworked = models.IntegerField(
db_column='TimeWorked'
)
status = models.CharField(
db_column='Status',
max_length=64,
blank=True, null=True
)
timeleft = models.IntegerField(
db_column='TimeLeft'
)
told = models.DateTimeField(
db_column='Told',
blank=True, null=True
)
starts = models.DateTimeField(
db_column='Starts',
blank=True, null=True
)
started = models.DateTimeField(
db_column='Started',
blank=True, null=True
)
due = models.DateTimeField(
db_column='Due',
blank=True, null=True
)
resolved = models.DateTimeField(
db_column='Resolved',
blank=True, null=True
)
lastupdatedby = models.IntegerField(
db_column='LastUpdatedBy'
)
lastupdated = models.DateTimeField(
db_column='LastUpdated',
blank=True, null=True
)
creator = models.ForeignKey(
Users,
db_column='Creator'
)
created = models.DateTimeField(
db_column='Created',
blank=True, null=True
)
ismerged = models.SmallIntegerField(
db_column='IsMerged',
blank=True, null=True
)
sla = models.CharField(
db_column='SLA',
max_length=64,
blank=True, null=True
)
class Meta:
managed = False
db_table = 'Tickets'
app_label = 'rt_tickets'
def __unicode__(self):
'''
Default data for display
'''
return self.subject
|
py | b4180ab6e54e76cb79f6b5c81ee68c71db07f943 | from django.contrib import admin
from .models import Pickle
from .forms import ConfigAdminForm
@admin.register(Pickle)
class ConfigModelAdmin(admin.ModelAdmin):
form = ConfigAdminForm
list_display = ('name', 'data_type', 'data_value')
search_fields = ('name',)
list_filter = ('data_type',)
def get_form(self, request, obj=None, change=False, **kwargs):
form = super(ConfigModelAdmin, self).get_form(request, obj, change, **kwargs)
if obj is not None:
form.base_fields['raw_value'].initial = obj.value
return form
def save_model(self, request, obj, form, change):
raw_value = form.cleaned_data.get('raw_value')
obj.value = raw_value
return super(ConfigModelAdmin, self).save_model(request, obj, form, change)
|
py | b4180d981088f960f213c8d8a1f38ad4149f8869 | import os
os.system('bash tools/dist_test.sh configs/imvoxelnet/imvoxelnet_kitti.py work_dirs/atlas_kitti/20210503_214214.pth 2 --eval mAP')
|
py | b4180dae6c1fbceca42a20fccd9644765d42a2cf | from project.medicine.medicine import Medicine
class Salve(Medicine):
HEALTH_INCREASE = 50
def __init__(self):
super().__init__(self.HEALTH_INCREASE)
def __str__(self):
return "salve" |
py | b4180ff90b6b37753a0a7a6268b911c6ce21190e | #!/usr/bin/env python
import gym
import safety_gym
import safe_rl
from safe_rl.utils.run_utils import setup_logger_kwargs
from safe_rl.utils.mpi_tools import mpi_fork, proc_id
import sys, pathlib
sys.path.append(pathlib.Path().parent.resolve().parent) # Adding zikang repo
import wandb
# wandb = None
def main(robot, task, algo, seed, exp_name=None, cpu=4):
# Verify experiment
robot_list = ['nav', 'point', 'car', 'doggo']
task_list = ['goal1', 'goal2', 'button1', 'button2', 'push1', 'push2']
algo_list = ['ppo', 'ppo_lagrangian', 'trpo', 'trpo_lagrangian', 'cpo','sac']
algo = algo.lower()
task = task.capitalize()
robot = robot.capitalize()
assert algo in algo_list, "Invalid algo"
assert task.lower() in task_list, "Invalid task"
assert robot.lower() in robot_list, "Invalid robot"
# Hyperparameters
if exp_name is None:
exp_name = algo + '_' + robot + task
if robot=='Doggo':
num_steps = 1e8
steps_per_epoch = 60000
else:
num_steps = 1e7
steps_per_epoch = 30000
if robot == 'Nav':
from shrl.envs.nav import Continuous2DNav as zikenv
elif robot == 'Point':
from shrl.envs.point import PointNav as zikenv
elif robot == 'Car':
from shrl.envs.car import CarNav as zikenv
elif robot == 'Doggo':
from shrl.envs.doggo import DoggoNav as zikenv
# Copied from run_polopt_agent
config = {
"ent_reg": 0.,
"cost_lim":0,
"penalty_init":1.,
"penalty_lr":5e-2,
"target_kl" : 0.01,
"vf_lr":1e-3,
"vf_iters":80,
}
epochs = int(num_steps / steps_per_epoch)
save_freq = 50
config["epochs"] = epochs
# wandb.config["cost_lim"] = cost_lim
# wandb.config["target_kl"] = target_kl
# Fork for parallelizing
mpi_fork(cpu)
if proc_id() == 0 and wandb is not None:
# For using wandb with mpi
wandb.init(project="hisarl-baselines", entity="csbric", config=config)
config = wandb.config # For allowing hparam sweep?
# TODO: see if we can sweep when using mpi
# Prepare Logger
exp_name = exp_name or (algo + '_' + robot.lower() + task.lower())
logger_kwargs = setup_logger_kwargs(exp_name, seed)
# Algo and Env
algo = eval('safe_rl.'+algo)
env_name = 'Safexp-'+robot+task+'-v0'
algo(env_fn=lambda: zikenv(),
ac_kwargs=dict(
hidden_sizes=(256, 256),
),
# epochs=epochs,
steps_per_epoch=steps_per_epoch,
save_freq=save_freq,
# target_kl=target_kl,
# cost_lim=cost_lim,
seed=seed,
logger_kwargs=logger_kwargs,
wandb=wandb,
**config
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--robot', type=str, default='Point')
parser.add_argument('--task', type=str, default='Goal1')
parser.add_argument('--algo', type=str, default='ppo')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--cpu', type=int, default=1)
args = parser.parse_args()
exp_name = args.exp_name if not(args.exp_name=='') else None
main(args.robot, args.task, args.algo, args.seed, exp_name, args.cpu) |
py | b418102e42a33201e6a094fe3102b74954f7946e | #Desenvolver um algoritmo capaz de calcular qual o máximo e qual o mínimo valor
#de uma série de números inteiros positivos lidos do teclado cuja marca de fim é -1.
def numMinMaX():
try:
num=int(input('Insira um número: '))
except ValueError:
print('Não foi inserido um número.')
max=min=num
while num!=-1:
try:
num=int(input('Insira um número: '))
except ValueError:
print('Não foi inserido um número.')
if num==-1:
break
elif num<min:
min=num
elif num>max:
max=num
print('O maior número inserido foi o %d e o menor foi %d.' %(max, min))
numMinMaX()
|
py | b41812bc353aa97051e3f05ef16821a8ee241a14 | from math import floor, ceil
from itertools import dropwhile
from frostsynth import get_srate
class PolySequence(object):
def __init__(self, xs, coefficientss, periodic=False, srate=None):
self.xs = xs
self.coefficientss = coefficientss
self.periodic = periodic
self.srate = srate
#self._prune_coefficientss()
def _prune_coefficientss(self):
self.coefficientss = [tuple(dropwhile(lambda x: x == 0, coefficients)) for coefficients in self.coefficientss]
def __call__(self, x):
if self.periodic:
raise NotImplementedError
else:
if x < self.xs[0]:
return 0
elif x >= self.xs[-1]:
return 0
else:
sx = self.xs[0]
for index, next_sx in enumerate(self.xs, -1):
if x < next_sx:
break
sx = next_sx
coefficients = self.coefficientss[index]
mu = x - sx
r = 0
for coefficient in coefficients:
r = mu * r + coefficient
return r
def __iter__(self):
srate = get_srate(self.srate)
dt = 1 / srate
x = self.xs[0]
prev_x = x
for i, target_x in enumerate(self.xs[1:]):
dx = x - prev_x
l = target_x - x
if l <= 0:
continue
if i < len(self.xs) - 2:
samples = int(ceil(l * srate))
else:
samples = int(floor(l * srate))
coefficients = self.coefficientss[i]
if not coefficients:
for _ in range(samples):
yield 0
elif len(coefficients) == 1:
coef = coefficients[0]
for _ in range(samples):
yield coef
elif len(coefficients) == 2:
accumulator = coefficients[1] + coefficients[0] * dx
yield accumulator
da = coefficients[0] * dt
for _ in range(samples - 1):
accumulator += da
yield accumulator
elif len(coefficients) == 3:
accumulator0 = coefficients[2] + (coefficients[1] + coefficients[0] * dx) * dx
yield accumulator0
da = coefficients[0] * dt * dt
accumulator1 = (coefficients[1] + 2 * dx * coefficients[0]) * dt - da
da += da
for _ in range(samples - 1):
accumulator1 += da
accumulator0 += accumulator1
yield accumulator0
elif len(coefficients) == 4:
accumulator0 = coefficients[3] + (coefficients[2] + (coefficients[1] + coefficients[0] * dx) * dx) * dx
yield accumulator0
dt2 = dt * dt
dxdt = dx * dt
da = coefficients[0] * dt2 * dt
b = coefficients[1] * dt2
accumulator1 = coefficients[2] * dt - b + da + (2 * coefficients[1] * dt + 3 * coefficients[0] * (dxdt - dt2)) * dx
da *= 6
accumulator2 = b + b - da + 6 * coefficients[0] * dxdt * dt
for _ in range(samples - 1):
accumulator2 += da
accumulator1 += accumulator2
accumulator0 += accumulator1
yield accumulator0
else:
t = x
for _ in range(samples):
yield self(t)
t += dt
x += samples * dt
prev_x = target_x
@property
def length(self):
return self.xs[-1] - self.xs[0]
@property
def samples(self):
srate = get_srate(self.srate)
return int(self.length * srate)
def extend_to(self, value=0):
if value < self.xs[0]:
self.xs.insert(0, value)
self.coefficientss.insert(0, ())
elif value > self.xs[-1]:
self.xs.append(value)
self.coefficientss.append(())
def extend(self, other):
delta_x = self.xs[-1] - other.xs[0]
self.xs[-1:] = [x + delta_x for x in other.xs]
self.coefficientss.extend(other.coefficientss)
def scale_y(self, multiplier):
self.coefficientss = [tuple(multiplier * coefficient for coefficient in coefficients) for coefficients in self.coefficientss]
def scale_x(self, multiplier):
self.xs = [multiplier * x for x in self.xs]
self.coefficientss = [tuple(multiplier ** (i - len(coefficients) + 1) * coefficient for i, coefficient in enumerate(coefficients)) for coefficients in self.coefficientss]
def differentiate(self):
self.coefficientss = [tuple(coefficient * (len(coefficients) - 1 - i) for i, coefficient in enumerate(coefficients[:-1])) for coefficients in self.coefficientss]
def integrate(self):
dc = 0
for index, coefficients in enumerate(self.coefficientss[:]):
delta_x = self.xs[index + 1] - self.xs[index]
new_coefficients = []
for i, coefficient in enumerate(reversed(coefficients)):
new_coefficients.insert(0, coefficient / (i + 1))
new_coefficients.append(dc)
dc = sum(coefficient * delta_x ** i for i, coefficient in enumerate(reversed(new_coefficients)))
self.coefficientss[index] = tuple(new_coefficients)
@classmethod
def constant(_, duration, value, start=0):
return PolySequence([start, start + duration], [(value,)])
def _add_coefficientss(self, other):
coefficientss = []
for coefficients in self.coefficientss:
if not coefficients:
coefficientss.append((other, ))
else:
coefficientss.append(coefficients[:-1] + (coefficients[-1] + other,))
return coefficientss
def copy(self):
return PolySequence(self.xs, self.coefficientss)
def to_list(self):
raise NotImplementedError
def __neg__(self):
result = self.copy()
result.scale_y(-1)
return result
def __add__(self, other):
if isinstance(other, PolySequence):
return NotImplemented
else:
return PolySequence(self.xs, self._add_coefficientss(other))
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
self.coefficientss = self._add_coefficientss(other)
return self
def __sub__(self, other):
return self.__add__(-other)
def __rsub__(self, other):
return -(self.__add__(-other))
def __isub__(self, other):
self += -other
return self
def __mul__(self, other):
if isinstance(other, PolySequence):
return NotImplemented
else:
coefficientss = [tuple(coefficient * other for coefficient in coefficients) for coefficients in self.coefficientss]
return PolySequence(self.xs, coefficientss)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
self.scale_y(other)
return self
def __truediv__(self, other):
return self.__mul__(1 / other)
def __itruediv__(self, other):
self.scale_y(1 / other)
return self
class ConstantSequence(PolySequence):
def __init__(self, data):
xs, ys = zip(*data)
super().__init__(xs, [(y, ) for y in ys])
class LinearSequence(PolySequence):
def __init__(self, data):
coefficientss = []
for d0, d1 in zip(data, data[1:]):
l = (d1[0] - d0[0])
if l > 0:
coefficientss.append(((d1[1] - d0[1]) / l, d0[1]))
else:
coefficientss.append((d0[1], 0))
super().__init__([d[0] for d in data], coefficientss)
#self._prune_coefficientss()
@classmethod
def from_flat_list(cls, l):
def g():
i = iter(l)
while True:
yield (next(i), next(i))
return cls(list(g()))
class NaturalParabolic(PolySequence):
"""Pretty useless. Suffers from oscillations."""
def __init__(self, data):
xs, ys = zip(*data)
as_ = [0]
for i in range(1, len(data) - 1):
as_.append(((xs[i] - xs[i + 1]) * (as_[i - 1] + ys[i - 1]) + (xs[i + 1] - xs[i - 1]) * ys[i]) / (xs[i] - xs[i - 1]) - ys[i + 1])
coefficientss = []
for a, x0, y0, x1, y1 in zip(as_, xs, ys, xs[1:], ys[1:]):
i_delta_x = 1 / (x1 - x0)
coefficientss.append((
-a * i_delta_x * i_delta_x,
(a + y1 - y0) * i_delta_x,
y0,
))
super().__init__(xs, coefficientss)
class CubicSequence(PolySequence):
def __init__(self, data):
xs = []
coefficientss = []
for d0, d1 in zip(data, data[1:]):
x0 = d0[0]
x1 = d1[0]
# TODO: Fix the sign
delta_x = (x0 - x1)
if delta_x < 0:
y0 = d0[1]
if len(d0) < 4:
s0 = d0[2]
else:
s0 = d0[3]
y1 = d1[1]
s1 = d1[2]
a = ((s0 + s1) * (x0 - x1) - 2 * y0 + 2 * y1) / delta_x ** 3
b = ((2 * s0 + s1) * delta_x - 3 * y0 + 3 * y1) / delta_x ** 2
coefficientss.append((a, b, s0, y0))
else:
coefficientss.append((d0[1],))
xs.append(x0)
xs.append(data[-1][0])
super().__init__(xs, coefficientss)
@classmethod
def from_flat_bezier(cls, l):
d0 = l[2] - l[0]
s0 = (l[3] - l[1]) / d0 if d0 != 0 else 0
data = [(l[0], l[1], None, s0)]
i = 6
while i + 3 < len(l):
d1 = l[i] - l[i - 2]
s1 = (l[i + 1] - l[i - 1]) / d1 if d1 != 0 else 0
d0 = l[i + 2] - l[i]
s0 = (l[i + 3] - l[i + 1]) / d0 if d0 != 0 else 0
data.append((l[i], l[i + 1], s1, s0))
i += 6
d1 = l[-2] - l[-4]
s1 = (l[-1] - l[-3]) / d1 if d1 != 0 else 0
data.append((l[-2], l[-1], s1))
return cls(data)
class NaturalSpline(CubicSequence):
def __init__(self, data):
# Separate data
xs, ys = zip(*data)
# Build the tridiagonal matrix for solving the derivatives
i_delta_x = 1 / (xs[1] - xs[0])
a = [0]
b = [i_delta_x + i_delta_x]
c = [i_delta_x]
d = [3 * (ys[1] - ys[0]) * i_delta_x * i_delta_x]
for i in range(1, len(data) - 1):
i_delta_x0 = 1 / (xs[i] - xs[i - 1])
i_delta_x1 = 1 / (xs[i + 1] - xs[i])
a.append(i_delta_x0)
b.append(2 * (i_delta_x0 + i_delta_x1))
c.append(i_delta_x1)
d.append(3 * ((ys[i] - ys[i - 1]) * i_delta_x0 * i_delta_x0 + (ys[i + 1] - ys[i]) * i_delta_x1 * i_delta_x1))
i_delta_x = 1 / (xs[-1] - xs[-2])
a.append(i_delta_x)
b.append(i_delta_x + i_delta_x)
d.append(3 * (ys[-1] - ys[-2]) * i_delta_x * i_delta_x)
# Solve the equations...
c[0] /= b[0]
for i in range(1, len(c)):
c[i] /= (b[i] - a[i] * c[i - 1])
d[0] /= b[0]
for i in range(1, len(d)):
d[i] = (d[i] - a[i] * d[i - 1]) / (b[i] - a[i] * c[i -1])
# ...by back substitution.
ss = [d[-1]]
for i in range(len(d) - 2, -1, -1):
ss.insert(0, d[i] - c[i] * ss[0])
data = [(x, y, s) for x, y, s in zip(xs, ys, ss)]
super().__init__(data)
|
py | b41813ca2ba5c60d78ff4a685790c22b2d8e8798 | from collections import defaultdict
from report import ReportNode
from multi_project_metric_report import MultiProjectMetricReport
from wikimetrics.models.storage.wikiuser import WikiUserKey
from wikimetrics.api import CohortService
from wikimetrics.configurables import db
from wikimetrics.enums import Aggregation
__all__ = ['SumAggregateByUserReport']
class SumAggregateByUserReport(ReportNode):
"""
A node responsible for aggregating the results of MultiProjectMetricReport
by user, and formatting them as expected by RunProgramMetricsReport. It specifically
knows how to aggregate rolling active editor and newly registered metrics.
"""
show_in_ui = False
def __init__(self, cohort, metric, *args, **kwargs):
"""
Parameters:
metric : an instance of a Metric class
cohort : a logical cohort object
args : should include any parameters needed by ReportNode
kwargs : should include any parameters needed by ReportNode
"""
super(SumAggregateByUserReport, self).__init__(*args, **kwargs)
# Get mediawiki's username map to be able to aggregate.
service = CohortService()
session = db.get_session()
self.usernames = service.get_wikiusernames_for_cohort(cohort.id, session)
self.children = [
MultiProjectMetricReport(cohort, metric, *args, **kwargs)
]
def finish(self, child_results):
results = child_results[0] # One child only.
# The way of aggregating results accross different projects
# is applying the OR operator. Read more in the Wikitech docs:
# https://wikitech.wikimedia.org/wiki/Analytics/Wikimetrics/Global_metrics
# (TODO: Rename page to Program_metrics)
aggregated_results = defaultdict(lambda: defaultdict(lambda: 0))
for key_str, result in results.iteritems():
key = WikiUserKey.fromstr(key_str)
username = self.usernames[key]
for metric_name in result:
aggregated_results[username][metric_name] |= result[metric_name]
# Finally, count all users that have a positive result.
summed_results = defaultdict(lambda: 0)
for mw_username, results in aggregated_results.iteritems():
for metric_name, value in results.iteritems():
summed_results[metric_name] += value
# Encapsulate the results to be consistent with other metrics.
return {Aggregation.SUM: dict(summed_results)}
|
py | b41813eb0fa0aab3705fb66b03dd265406940643 | import os
import gitcommands as git
import diffcalc
from ignore import getIgnoreFiles
import logger
from utils import getNestedFiles,read_file,commitAndUpdate
from colors import logcolors
mypath = os.getcwd()
ignoredirs = getIgnoreFiles()
print(ignoredirs)
# gets the list of all nested files
onlyfiles = getNestedFiles(mypath,ignoredirs)
def ischanged(url, branch,*args,**kwargs):
changedfile = []
diffarr = []
# if uncommited data found perform git commands on them
initbuffer = kwargs.get('initbuffer' , -1)
if(initbuffer != -1):
for obj in initbuffer:
file = obj['path']
diff = obj['changes']
diffarr.append(diff)
changedfile.append(file)
# Performing Git Commands for changed files
commitAndUpdate(changedfile,diffarr,url,branch)
print('Listening for changes....')
initial = list(read_file(onlyfiles))
while True:
current = list(read_file(onlyfiles))
changeditem = []
previtem = []
if(current != initial):
# Calculating Previous Version of File
for ele in initial:
if ele not in current:
for item in ele:
previtem.append(item)
# Calculating New Version of File
for ele in current:
if ele not in initial:
changeditem.append(ele)
# calculating changed file's name
for i in range(0, len(changeditem)):
print('loop :-', i)
changedfile.append(onlyfiles[current.index(changeditem[i])])
print(f"Changed file is {logcolors.BOLD}{changedfile}{logcolors.ENDC}\n")
# Calculating Diff for previous and changed version of file
diff = diffcalc.calcDiff(previtem, changeditem[0])
diffarr.append(diff)
for file in changedfile:
logger.writedata(path=file, diff=diff)
# Performing Git Commands for changed files
commitAndUpdate(changedfile,diffarr,url,branch)
initial = current
# time.sleep(5)
|
py | b41813f12620d363e853d3d8aff0f2f060e1924f | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv(path,sep=",")
loan_status=data['Loan_Status'].value_counts()
plt.bar(loan_status.index,loan_status.values)
plt.show()
#Code starts here
# --------------
#Code starts here
property_and_loan=data.groupby(['Property_Area','Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar',rot=45).set(xlabel='Property Area',ylabel='Loan Status')
# --------------
#Code starts here
education_and_loan=data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar',rot=45).set(xlabel="Education Status",ylabel="Loan Status")
# --------------
#Code starts here
graduate=data[data["Education"]=="Graduate"]
not_graduate=data[data["Education"]=="Not Graduate"]
graduate.plot(kind="density",label="Graduate")
not_graduate.plot(kind="density",label="Not Graduate")
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig,(ax_1,ax_2,ax_3)=plt.subplots(1,3, figsize=(20,8))
ax_1.scatter(data['ApplicantIncome'],data['LoanAmount'])
ax_1.set(title="Application Income")
ax_2.scatter(data['CoapplicantIncome'],data['LoanAmount'])
ax_1.set(title="Coapplication")
data['TotalIncome']=data['ApplicantIncome']+data['CoapplicantIncome']
ax_3.scatter(data['TotalIncome'],data["LoanAmount"])
ax_3.set(title="Total Income")
|
py | b418151d1ddeed33773c225416b09a862ad27e25 | from flask import request, g, current_app, jsonify, url_for
from . import api
from ..models import Post, Permission
from .decorators import permission_required
from .errors import forbidden
from app import db
@api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(page=page,
per_page=current_app.config['POST_PER_PAGE'], error_out=False)
prev = None
next = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=prev, _external=True)
if pagination.has_next:
next = url_for('api.get_posts', page=next, _external=True)
posts = {
'posts': [post.to_json() for post in pagination.items],
'prev': prev,
'next': next,
'posts_count': pagination.total
}
return jsonify(posts)
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def compose_post():
body = request.get_json().get('body')
if not body:
return forbidden('post can not be null')
post = Post(body=body, author=g.current_user)
db.session.add(post)
db.session.commit()
return jsonify({
'message': 'post success',
'post': post.to_json()
})
@api.route('/post/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/post/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
post = Post.query.get_or_404(id)
if g.current_user != post.author and not g.current_user.is_adminstrator():
return forbidden('Insufficient permissions')
if not request.get_json().get('body'):
return forbidden('post can not be null')
post.body = request.get_json().get('body')
db.session.add(post)
return jsonify(post.to_json())
@api.route('/post/<int:id>/comments/')
def get_post_comments(id):
post = Post.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = post.comments.paginate(page=page,
per_page=current_app.config['FLASK_COMMENTS_PER_PAGE'], error_out=False)
prev = None
if pagination.has_prev:
prev = url_for('api.get_post_comments', id=post.id, page=page - 1,
_external=True)
next = None
if pagination.has_next:
next = url_for('api.get_post_comments', id=post.id, page=page + 1,
_external=True)
return jsonify({
'comments': [comment.to_json() for comment in pagination.items],
'prev': prev,
'next': next,
'comments_count': post.comments.count()
}) |
py | b418154561ccb91dcb55a0bfe9e36cc63331c014 | # pylint: disable=no-self-use,redefined-outer-name,misplaced-comparison-constant
from copy import copy
from unittest.mock import Mock, patch
import pytest
from gitman.models import Source
@pytest.fixture
def source():
return Source(type='git', repo='repo', name='name', rev='rev', link='link')
class TestSource:
def test_init_defaults(self):
"""Verify a source has a default revision."""
source = Source(type='git', repo='http://example.com/foo/bar.git', name=None)
assert 'http://example.com/foo/bar.git' == source.repo
assert 'bar' == source.name
assert 'main' == source.rev
assert None is source.link
def test_init_name_as_path(self, tmp_path):
"""Verify the name can be a path."""
source = Source(type='git', repo='http://example.com', name=tmp_path)
assert isinstance(source.name, str)
def test_init_rev(self):
"""Verify the revision can be customized."""
source = Source(
type='git', repo='http://mock.git', name='mock_name', rev='v1.0'
)
assert 'v1.0' == source.rev
def test_init_link(self):
"""Verify the link can be set."""
source = Source(
type='git', repo='http://mock.git', name='mock_name', link='mock/link'
)
assert 'mock/link' == source.link
def test_repr(self, source):
"""Verify sources can be represented."""
assert "<source ['git'] 'repo' @ 'rev' in 'name' <- 'link'>" == repr(source)
def test_repr_no_link(self, source):
"""Verify sources can be represented."""
source.link = None
assert "<source ['git'] 'repo' @ 'rev' in 'name'>" == repr(source)
def test_eq(self, source):
source2 = copy(source)
assert source == source2
source2.name = "dir2"
assert source != source2
def test_lt(self):
sources = [
Source(type='git', repo='http://github.com/owner/123.git', name=None),
Source(type='git', repo='bbb', name='456'),
Source(type='git', repo='ccc', name='456'),
Source(type='git', repo='BBB', name='AAA'),
Source(type='git', repo='AAA', name='AAA'),
]
assert sources == sorted(sources)
@patch('os.path.exists', Mock(return_value=False))
@patch('gitman.shell.cd', Mock(return_value=True))
@patch('gitman.git.valid', Mock(return_value=True))
@patch('gitman.git.changes', Mock(return_value=False))
@patch('gitman.git.update')
@patch('gitman.git.fetch')
@patch('gitman.git.is_fetch_required')
@patch('gitman.git.clone')
def test_update_files(
self, mock_clone, mock_is_fetch_required, mock_fetch, mock_update
):
"""Verify update_files when path does not exist"""
source = Source(type='git', repo='repo', name='name', rev='rev', link='link')
source.update_files()
mock_clone.assert_called_once_with(
'git', 'repo', 'name', rev='rev', sparse_paths=[]
)
mock_is_fetch_required.assert_called_once_with('git', 'rev')
mock_fetch.assert_called_once_with('git', 'repo', 'name', rev='rev')
mock_update.assert_called_once_with(
'git', 'repo', 'name', clean=True, fetch=False, rev='rev'
)
@patch('os.path.isdir', Mock(return_value=True))
@patch('os.listdir', Mock(return_value=['test_file']))
@patch('gitman.shell.cd', Mock(return_value=True))
@patch('gitman.git.valid', Mock(return_value=False))
@patch('gitman.git.changes', Mock(return_value=False))
@patch('gitman.git.update')
@patch('gitman.git.fetch')
@patch('gitman.git.is_fetch_required')
@patch('gitman.git.clone')
def test_update_files_invalid_repo(
self, mock_clone, mock_is_fetch_required, mock_fetch, mock_update
):
"""Verify update_files throws exception on invalid repo when not forced"""
source = Source(type='git', repo='repo', name='name', rev='rev', link='link')
with pytest.raises(Exception):
source.update_files()
mock_clone.assert_not_called()
mock_is_fetch_required.assert_not_called()
mock_fetch.assert_not_called()
mock_update.assert_not_called()
@patch('os.path.isdir', Mock(return_value=True))
@patch('os.listdir', Mock(return_value=['test_file']))
@patch('gitman.shell.cd', Mock(return_value=True))
@patch('gitman.git.valid', Mock(return_value=False))
@patch('gitman.git.changes', Mock(return_value=False))
@patch('gitman.git.update')
@patch('gitman.git.fetch')
@patch('gitman.git.is_fetch_required')
@patch('gitman.git.rebuild')
@patch('gitman.git.clone')
def test_update_files_rebuild_git(
self, mock_clone, mock_rebuild, mock_is_fetch_required, mock_fetch, mock_update
):
"""Verify update_files rebuilds when invalid repo and force is passed"""
source = Source(type='git', repo='repo', name='name', rev='rev', link='link')
source.update_files(force=True)
mock_clone.assert_not_called()
mock_rebuild.assert_called_once_with('git', 'repo')
mock_is_fetch_required.assert_not_called()
mock_fetch.assert_called_once_with('git', 'repo', 'name', rev='rev')
mock_update.assert_called_once_with(
'git', 'repo', 'name', clean=True, fetch=True, rev='rev'
)
def test_identify_missing(self, source, tmpdir):
"""Verify a missing source identifies as unknown."""
tmpdir.chdir()
with patch('os.path.isdir', Mock(return_value=False)):
assert (str(tmpdir), '<missing>', '<unknown>') == source.identify()
def test_lock_uses_the_identity_rev(self, source):
source.identify = Mock(return_value=('path2', 'dir2', 'abc123'))
source2 = source.lock()
assert 'abc123' == source2.rev
assert 'name' == source2.name
|
py | b418156461cc695933d37e170023e7b3b69d5845 | from commercetools import types
def test_get_by_id(client):
product = client.products.create(
types.ProductDraft(
master_variant=types.ProductVariantDraft(sku="123"),
publish=True,
name=types.LocalizedString(nl="Test product"),
)
)
shopping_list = client.shopping_lists.create(
draft=types.ShoppingListDraft(
name=types.LocalizedString({"nl": "Verlanglijstje"}),
description=types.LocalizedString({"nl": "Verlanglijstje van LabD"}),
line_items=[
types.ShoppingListLineItemDraft(product_id=product.id, quantity=1)
],
)
)
assert shopping_list.id
shopping_list = client.shopping_lists.get_by_id(shopping_list.id)
assert shopping_list.name["nl"] == "Verlanglijstje"
assert shopping_list.description["nl"] == "Verlanglijstje van LabD"
assert shopping_list.line_items[0].product_id == product.id
assert shopping_list.line_items[0].quantity == 1
def test_get_by_key(client):
product = client.products.create(
types.ProductDraft(
master_variant=types.ProductVariantDraft(sku="123"),
publish=True,
name=types.LocalizedString(nl="Test product"),
)
)
variant = product.master_data.current.master_variant
shopping_list = client.shopping_lists.create(
draft=types.ShoppingListDraft(
key="test-shopping-list",
name=types.LocalizedString({"nl": "Verlanglijstje"}),
description=types.LocalizedString({"nl": "Verlanglijstje van LabD"}),
line_items=[types.ShoppingListLineItemDraft(sku=variant.sku, quantity=1)],
)
)
assert shopping_list.key
shopping_list = client.shopping_lists.get_by_key("test-shopping-list")
assert shopping_list.name["nl"] == "Verlanglijstje"
assert shopping_list.description["nl"] == "Verlanglijstje van LabD"
assert shopping_list.line_items[0].variant.sku == "123"
assert shopping_list.line_items[0].quantity == 1
def test_query(client):
shopping_list_draft = types.ShoppingListDraft(
key="test-shopping-list",
name=types.LocalizedString({"nl": "Verlanglijstje"}),
description=types.LocalizedString({"nl": "Verlanglijstje van LabD"}),
)
client.shopping_lists.create(draft=shopping_list_draft)
# Update the key and create another one.
shopping_list_draft.key = "test-shopping-list2"
client.shopping_lists.create(draft=shopping_list_draft)
result = client.shopping_lists.query(sort="id asc", limit=10)
assert len(result.results) == 2
assert result.total == 2
result = client.shopping_lists.query(sort=["id asc", "name asc"], limit=1)
assert len(result.results) == 1
assert result.total == 2
def test_delete_by_id(client):
shopping_list = client.shopping_lists.create(
draft=types.ShoppingListDraft(
key="test-shopping-list",
name=types.LocalizedString({"nl": "Verlanglijstje"}),
description=types.LocalizedString({"nl": "Verlanglijstje van LabD"}),
)
)
assert shopping_list.id
shopping_list = client.shopping_lists.delete_by_id(
shopping_list.id, version=shopping_list.version
)
def test_delete_by_key(client):
shopping_list = client.shopping_lists.create(
draft=types.ShoppingListDraft(
key="test-shopping-list",
name=types.LocalizedString({"nl": "Verlanglijstje"}),
description=types.LocalizedString({"nl": "Verlanglijstje van LabD"}),
)
)
assert shopping_list.id
shopping_list = client.shopping_lists.delete_by_key(
shopping_list.key, version=shopping_list.version
)
|
py | b41815b67e01b0827b5dff4274165d5207b93fa3 | """
EBGAN
-----
Implements the Energy based GAN[1].
Uses an auto-encoder as the adversary structure.
Losses:
- Generator: L2 (Mean Squared Error)
- Autoencoder: L2 (Mean Squared Error)
Default optimizer:
- torch.optim.Adam
Custom parameter:
- m: Cut off for the hinge loss. Look at reference for more information.
References
----------
.. [1] https://arxiv.org/pdf/1609.03126.pdf
"""
import torch
from torch.nn import MSELoss
from vegans.models.unconditional.AbstractGAN1v1 import AbstractGAN1v1
class EBGAN(AbstractGAN1v1):
"""
Parameters
----------
generator: nn.Module
Generator architecture. Produces output in the real space.
adversary: nn.Module
Adversary architecture. Produces predictions for real and fake samples to differentiate them.
x_dim : list, tuple
Number of the output dimensions of the generator and input dimension of the discriminator / critic.
In the case of images this will be [nr_channels, nr_height_pixels, nr_width_pixels].
z_dim : int, list, tuple
Number of the latent dimensions for the generator input. Might have dimensions of an image.
optim : dict or torch.optim
Optimizer used for each network. Could be either an optimizer from torch.optim or a dictionary with network
name keys and torch.optim as value, i.e. {"Generator": torch.optim.Adam}.
optim_kwargs : dict
Optimizer keyword arguments used for each network. Must be a dictionary with network
name keys and dictionary with keyword arguments as value, i.e. {"Generator": {"lr": 0.0001}}.
m: float
Cut off for the hinge loss. Look at reference for more information.
feature_layer : torch.nn.*
Output layer used to compute the feature loss. Should be from either the discriminator or critic.
If `feature_layer` is not None, the original generator loss is replaced by a feature loss, introduced
[here](https://arxiv.org/abs/1606.03498v1).
fixed_noise_size : int
Number of images shown when logging. The fixed noise is used to produce the images in the folder/images
subdirectory, the tensorboard images tab and the samples in get_training_results().
device : string
Device used while training the model. Either "cpu" or "cuda".
ngpu : int
Number of gpus used during training if device == "cuda".
folder : string
Creates a folder in the current working directory with this name. All relevant files like summary, images, models and
tensorboard output are written there. Existing folders are never overwritten or deleted. If a folder with the same name
already exists a time stamp is appended to make it unique.
"""
#########################################################################
# Actions before training
#########################################################################
def __init__(
self,
generator,
adversary,
x_dim,
z_dim,
optim=None,
optim_kwargs=None,
m=None,
feature_layer=None,
fixed_noise_size=32,
device=None,
ngpu=None,
folder="./veganModels/EBGAN",
secure=True):
super().__init__(
generator=generator, adversary=adversary,
z_dim=z_dim, x_dim=x_dim, adv_type="Autoencoder",
optim=optim, optim_kwargs=optim_kwargs,
feature_layer=feature_layer,
fixed_noise_size=fixed_noise_size,
device=device, folder=folder, ngpu=ngpu, secure=secure
)
if self.secure:
assert self.adversary.output_size == x_dim, (
"AutoEncoder structure used for adversary. Output dimensions must equal x_dim. " +
"Output: {}. x_dim: {}.".format(self.adversary.output_size, x_dim)
)
self.m = m
self.hyperparameters["m"] = m
def _define_loss(self):
loss_functions = {"Generator": torch.nn.MSELoss(), "Adversary": torch.nn.MSELoss()}
return loss_functions
def _set_up_training(self, X_train, y_train, X_test, y_test, epochs, batch_size, steps,
print_every, save_model_every, save_images_every, save_losses_every, enable_tensorboard):
train_dataloader, test_dataloader, writer_train, writer_test, save_periods = super()._set_up_training(
X_train, y_train, X_test, y_test, epochs, batch_size, steps,
print_every, save_model_every, save_images_every, save_losses_every, enable_tensorboard
)
if self.m is None:
self.m = np.mean(X_train)
return train_dataloader, test_dataloader, writer_train, writer_test, save_periods
def _calculate_generator_loss(self, X_batch, Z_batch):
fake_images = self.generate(z=Z_batch)
if self.feature_layer is None:
fake_predictions = self.predict(x=fake_images)
gen_loss = self.loss_functions["Generator"](
fake_predictions, fake_images
)
else:
gen_loss = self._calculate_feature_loss(X_real=X_batch, X_fake=fake_images)
return {"Generator": gen_loss}
def _calculate_adversary_loss(self, X_batch, Z_batch):
fake_images = self.generate(z=Z_batch).detach()
fake_predictions = self.predict(x=fake_images)
real_predictions = self.predict(x=X_batch)
adv_loss_fake = self.loss_functions["Adversary"](
fake_predictions, fake_images
)
if adv_loss_fake < self.m:
adv_loss_fake = self.m - adv_loss_fake
else:
adv_loss_fake = torch.Tensor([0]).to(self.device)
adv_loss_real = self.loss_functions["Adversary"](
real_predictions, X_batch
)
adv_loss = 0.5*(adv_loss_fake + adv_loss_real).float()
return {
"Adversary": adv_loss,
"Adversary_fake": adv_loss_fake,
"Adversary_real": adv_loss_real,
"RealFakeRatio": adv_loss_real / adv_loss_fake
}
|
py | b41815b8369f87360ecab4f2640daffa0a14037a | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..representation import LanguageModelingAnnotation
from ..config import PathField, NumberField
from ..utils import UnsupportedPackage
from .format_converter import BaseFormatConverter, ConverterReturn
try:
from tokenizers import Tokenizer, pre_tokenizers, decoders
from tokenizers.models import BPE
except ImportError as import_error:
Tokenizer = UnsupportedPackage("tokenizers", import_error.msg)
pre_tokenizers = UnsupportedPackage("tokenizers", import_error.msg)
decoders = UnsupportedPackage("tokenizers", import_error.msg)
BPE = UnsupportedPackage("tokenizers.models", import_error.msg)
class Wikitext2RawConverter(BaseFormatConverter):
__provider__ = "wikitext2raw"
annotation_types = (LanguageModelingAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'testing_file': PathField(description="Path to testing file."),
'merges_file': PathField(description="Path to merges file."),
'vocab_file': PathField(description='Path to vocabulary file.'),
'max_seq_length': NumberField(
description='The maximum total input sequence length after tokenization.',
optional=True, default=128, value_type=int
),
})
return configuration_parameters
def configure(self):
if isinstance(Tokenizer, UnsupportedPackage):
Tokenizer.raise_error(self.__provider__)
self.testing_file = self.get_value_from_config('testing_file')
self.vocab_file = self.get_value_from_config('vocab_file')
self.merges_file = self.get_value_from_config('merges_file')
self.max_seq_length = int(self.get_value_from_config('max_seq_length'))
self.tokenizer = Tokenizer(BPE.from_file(str(self.vocab_file), str(self.merges_file)))
self.tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
self.tokenizer.decoder = decoders.ByteLevel()
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
with open(str(self.testing_file), encoding="utf-8") as f:
text = f.read()
tokens = self.tokenizer.encode_batch([text])
encoding = tokens[0]
annotations = []
unique_id = 1000000000
for idx in range(0, len(encoding.ids) - self.max_seq_length + 1, self.max_seq_length):
ids = encoding.ids[idx: idx + self.max_seq_length]
tokens = encoding.tokens[idx:idx + self.max_seq_length]
identifier = ['input_ids_{}'.format(idx), 'labels_{}'.format(idx)]
annotation = LanguageModelingAnnotation(
identifier,
np.array(unique_id),
np.array([ids]),
tokens,
labels=np.array(ids),
)
annotations.append(annotation)
unique_id += 1
return ConverterReturn(annotations, None, None)
|
py | b4181612913fbd4b1328fa0c609c3768a9c6196d | # Copyright 2017 BBVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from celery import task
from deeptracy.tasks.base_task import DeeptracyTask
from deeptracy_core.dal.database import db
from deeptracy_core.dal.scan_dep.manager import get_scan_by_raw_dep
from deeptracy.notifications.manager import notify_deltas
logger = logging.getLogger('deeptracy')
@task(name="notify_patton_deltas", base=DeeptracyTask)
def notify_patton_deltas(dependencies):
scan_dep_by_project_id = {}
with db.session_scope() as session:
for raw_dep in dependencies:
scan_deps = get_scan_by_raw_dep(raw_dep, session)
for scan_dep in scan_deps:
project = scan_dep.scan.project
if project.id in scan_dep_by_project_id:
scan_dep_by_project_id[project.id]['dependencies'].append(raw_dep)
else:
scan_dep_by_project_id[project.id] = {'project': project, 'dependencies': [raw_dep]}
for project_id in scan_dep_by_project_id:
elem = scan_dep_by_project_id[project_id]
notify_deltas(elem['project'], elem['dependencies'])
logger.debug('notify vulnerabilities')
def format_notify_text(dependencies):
return " , ".join(dependencies)
|
py | b4181806fafcc2cba81699c956246cf36c21a67b | import cairocffi
import pytest
from libqtile import images
from libqtile.widget import Battery, BatteryIcon, battery
from libqtile.widget.battery import BatteryState, BatteryStatus
from test.widgets.conftest import TEST_DIR
class DummyBattery:
def __init__(self, status):
self._status = status
def update_status(self):
return self._status
class DummyErrorBattery:
def __init__(self, **config):
pass
def update_status(self):
raise RuntimeError("err")
def dummy_load_battery(bat):
def load_battery(**config):
return DummyBattery(bat)
return load_battery
def test_text_battery_charging(monkeypatch):
loaded_bat = BatteryStatus(
state=BatteryState.CHARGING,
percent=0.5,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert text == "^ 50% 0:28 15.00 W"
def test_text_battery_discharging(monkeypatch):
loaded_bat = BatteryStatus(
state=BatteryState.DISCHARGING,
percent=0.5,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert text == "V 50% 0:28 15.00 W"
def test_text_battery_full(monkeypatch):
loaded_bat = BatteryStatus(
state=BatteryState.FULL,
percent=0.5,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert text == "Full"
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery(show_short_text=False)
text = batt.poll()
assert text == "= 50% 0:28 15.00 W"
def test_text_battery_empty(monkeypatch):
loaded_bat = BatteryStatus(
state=BatteryState.EMPTY,
percent=0.5,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert text == "Empty"
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery(show_short_text=False)
text = batt.poll()
assert text == "x 50% 0:28 15.00 W"
loaded_bat = BatteryStatus(
state=BatteryState.UNKNOWN,
percent=0.,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert text == "Empty"
def test_text_battery_unknown(monkeypatch):
loaded_bat = BatteryStatus(
state=BatteryState.UNKNOWN,
percent=0.5,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery()
text = batt.poll()
assert text == "? 50% 0:28 15.00 W"
def test_text_battery_hidden(monkeypatch):
loaded_bat = BatteryStatus(
state=BatteryState.DISCHARGING,
percent=0.5,
power=15.,
time=1729,
)
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery(hide_threshold=0.6)
text = batt.poll()
assert text != ""
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", dummy_load_battery(loaded_bat))
batt = Battery(hide_threshold=0.4)
text = batt.poll()
assert text == ""
def test_text_battery_error(monkeypatch):
with monkeypatch.context() as m:
m.setattr(battery, "load_battery", DummyErrorBattery)
batt = Battery()
text = batt.poll()
assert text == "Error: err"
def test_images_fail():
"""Test BatteryIcon() with a bad theme_path
This theme path doesn't contain all of the required images.
"""
batt = BatteryIcon(theme_path=TEST_DIR)
with pytest.raises(images.LoadingError):
batt.setup_images()
def test_images_good(tmpdir, fake_bar, svg_img_as_pypath):
"""Test BatteryIcon() with a good theme_path
This theme path does contain all of the required images.
"""
for name in BatteryIcon.icon_names:
target = tmpdir.join(name + '.svg')
svg_img_as_pypath.copy(target)
batt = BatteryIcon(theme_path=str(tmpdir))
batt.fontsize = 12
batt.bar = fake_bar
batt.setup_images()
assert len(batt.surfaces) == len(BatteryIcon.icon_names)
for name, surfpat in batt.surfaces.items():
assert isinstance(surfpat, cairocffi.SurfacePattern)
def test_images_default(fake_bar):
"""Test BatteryIcon() with the default theme_path
Ensure that the default images are successfully loaded.
"""
batt = BatteryIcon()
batt.fontsize = 12
batt.bar = fake_bar
batt.setup_images()
assert len(batt.surfaces) == len(BatteryIcon.icon_names)
for name, surfpat in batt.surfaces.items():
assert isinstance(surfpat, cairocffi.SurfacePattern)
|
py | b418182519a9c87bd294848bbcbd67d6e19084fa | """
Utility routines
"""
import collections
from copy import deepcopy
import json
import itertools
import re
import sys
import traceback
import warnings
import jsonschema
import six
import pandas as pd
import numpy as np
from .schemapi import SchemaBase, Undefined
try:
from pandas.api.types import infer_dtype as _infer_dtype
except ImportError:
# Import for pandas < 0.20.0
from pandas.lib import infer_dtype as _infer_dtype
def infer_dtype(value):
"""Infer the dtype of the value.
This is a compatibility function for pandas infer_dtype,
with skipna=False regardless of the pandas version.
"""
if not hasattr(infer_dtype, '_supports_skipna'):
try:
_ = _infer_dtype([1], skipna=False)
except TypeError:
# pandas < 0.21.0 don't support skipna keyword
infer_dtype._supports_skipna = False
else:
infer_dtype._supports_skipna = True
if infer_dtype._supports_skipna:
return _infer_dtype(value, skipna=False)
else:
return _infer_dtype(value)
TYPECODE_MAP = {'ordinal': 'O',
'nominal': 'N',
'quantitative': 'Q',
'temporal': 'T',
'geojson': 'G'}
INV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}
# aggregates from vega-lite version 2.4.3
AGGREGATES = ['argmax', 'argmin', 'average', 'count', 'distinct', 'max',
'mean', 'median', 'min', 'missing', 'q1', 'q3', 'ci0', 'ci1',
'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values',
'variance', 'variancep']
# window aggregates from vega-lite version 2.5.2
WINDOW_AGGREGATES = ["row_number", "rank", "dense_rank", "percent_rank",
"cume_dist", "ntile", "lag", "lead", "first_value",
"last_value", "nth_value"]
# timeUnits from vega-lite version 3.0.0
TIMEUNITS = ["utcyear", "utcquarter", "utcmonth", "utcday", "utcdate",
"utchours", "utcminutes", "utcseconds", "utcmilliseconds",
"utcyearquarter", "utcyearquartermonth", "utcyearmonth",
"utcyearmonthdate", "utcyearmonthdatehours",
"utcyearmonthdatehoursminutes",
"utcyearmonthdatehoursminutesseconds",
"utcquartermonth", "utcmonthdate", "utcmonthdatehours",
"utchoursminutes", "utchoursminutesseconds", "utcminutesseconds",
"utcsecondsmilliseconds",
"year", "quarter", "month", "day", "date", "hours", "minutes",
"seconds", "milliseconds", "yearquarter", "yearquartermonth",
"yearmonth", "yearmonthdate", "yearmonthdatehours",
"yearmonthdatehoursminutes",
"yearmonthdatehoursminutesseconds", "quartermonth", "monthdate",
"monthdatehours", "hoursminutes", "hoursminutesseconds", "minutesseconds",
"secondsmilliseconds"]
def infer_vegalite_type(data):
"""
From an array-like input, infer the correct vega typecode
('ordinal', 'nominal', 'quantitative', or 'temporal')
Parameters
----------
data: Numpy array or Pandas Series
"""
# Otherwise, infer based on the dtype of the input
typ = infer_dtype(data)
# TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
if typ in ['floating', 'mixed-integer-float', 'integer',
'mixed-integer', 'complex']:
return 'quantitative'
elif typ in ['string', 'bytes', 'categorical', 'boolean', 'mixed', 'unicode']:
return 'nominal'
elif typ in ['datetime', 'datetime64', 'timedelta',
'timedelta64', 'date', 'time', 'period']:
return 'temporal'
else:
warnings.warn("I don't know how to infer vegalite type from '{}'. "
"Defaulting to nominal.".format(typ))
return 'nominal'
def merge_props_geom(feat):
"""
Merge properties with geometry
* Overwrites 'type' and 'geometry' entries if existing
"""
geom = {k: feat[k] for k in ('type', 'geometry')}
try:
feat['properties'].update(geom)
props_geom = feat['properties']
except (AttributeError, KeyError):
# AttributeError when 'properties' equals None
# KeyError when 'properties' is non-existing
props_geom = geom
return props_geom
def sanitize_geo_interface(geo):
"""Santize a geo_interface to prepare it for serialization.
* Make a copy
* Convert type array or _Array to list
* Convert tuples to lists (using json.loads/dumps)
* Merge properties with geometry
"""
geo = deepcopy(geo)
# convert type _Array or array to list
for key in geo.keys():
if str(type(geo[key]).__name__).startswith(('_Array','array')):
geo[key] = geo[key].tolist()
# convert (nested) tuples to lists
geo = json.loads(json.dumps(geo))
# sanitize features
if geo['type'] == 'FeatureCollection':
geo = geo['features']
if len(geo) > 0:
for idx, feat in enumerate(geo):
geo[idx] = merge_props_geom(feat)
elif geo['type'] == 'Feature':
geo = merge_props_geom(geo)
else:
geo = {'type': 'Feature', 'geometry': geo}
return geo
def sanitize_dataframe(df):
"""Sanitize a DataFrame to prepare it for serialization.
* Make a copy
* Convert RangeIndex columns to strings
* Raise ValueError if column names are not strings
* Raise ValueError if it has a hierarchical index.
* Convert categoricals to strings.
* Convert np.bool_ dtypes to Python bool objects
* Convert np.int dtypes to Python int objects
* Convert floats to objects and replace NaNs/infs with None.
* Convert DateTime dtypes into appropriate string representations
* Raise a ValueError for TimeDelta dtypes
"""
df = df.copy()
if isinstance(df.columns, pd.RangeIndex):
df.columns = df.columns.astype(str)
for col in df.columns:
if not isinstance(col, six.string_types):
raise ValueError('Dataframe contains invalid column name: {0!r}. '
'Column names must be strings'.format(col))
if isinstance(df.index, pd.MultiIndex):
raise ValueError('Hierarchical indices not supported')
if isinstance(df.columns, pd.MultiIndex):
raise ValueError('Hierarchical indices not supported')
def to_list_if_array(val):
if isinstance(val, np.ndarray):
return val.tolist()
else:
return val
for col_name, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
# XXXX: work around bug in to_json for categorical types
# https://github.com/pydata/pandas/issues/10778
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype) == 'bool':
# convert numpy bools to objects; np.bool is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif str(dtype).startswith('datetime'):
# Convert datetimes to strings. This needs to be a full ISO string
# with time, which is why we cannot use ``col.astype(str)``.
# This is because Javascript parses date-only times in UTC, but
# parses full ISO-8601 dates as local time, and dates in Vega and
# Vega-Lite are displayed in local time by default.
# (see https://github.com/altair-viz/altair/issues/1027)
df[col_name] = df[col_name].apply(lambda x: x.isoformat()).replace('NaT', '')
elif str(dtype).startswith('timedelta'):
raise ValueError('Field "{col_name}" has type "{dtype}" which is '
'not supported by Altair. Please convert to '
'either a timestamp or a numerical value.'
''.format(col_name=col_name, dtype=dtype))
elif str(dtype).startswith('geometry'):
# geopandas >=0.6.1 uses the dtype geometry. Continue here
# otherwise it will give an error on np.issubdtype(dtype, np.integer)
continue
elif np.issubdtype(dtype, np.integer):
# convert integers to objects; np.int is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.floating):
# For floats, convert to Python float: np.float is not JSON serializable
# Also convert NaN/inf values to null, as they are not JSON serializable
col = df[col_name]
bad_values = col.isnull() | np.isinf(col)
df[col_name] = col.astype(object).where(~bad_values, None)
elif dtype == object:
# Convert numpy arrays saved as objects to lists
# Arrays are not JSON serializable
col = df[col_name].apply(to_list_if_array, convert_dtype=False)
df[col_name] = col.where(col.notnull(), None)
return df
def parse_shorthand(shorthand, data=None, parse_aggregates=True,
parse_window_ops=False,
parse_timeunits=True, parse_types=True):
"""General tool to parse shorthand values
These are of the form:
- "col_name"
- "col_name:O"
- "average(col_name)"
- "average(col_name):O"
Optionally, a dataframe may be supplied, from which the type
will be inferred if not specified in the shorthand.
Parameters
----------
shorthand : dict or string
The shorthand representation to be parsed
data : DataFrame, optional
If specified and of type DataFrame, then use these values to infer the
column type if not provided by the shorthand.
parse_aggregates : boolean
If True (default), then parse aggregate functions within the shorthand.
parse_window_ops : boolean
If True then parse window operations within the shorthand (default:False)
parse_timeunits : boolean
If True (default), then parse timeUnits from within the shorthand
parse_types : boolean
If True (default), then parse typecodes within the shorthand
Returns
-------
attrs : dict
a dictionary of attributes extracted from the shorthand
Examples
--------
>>> data = pd.DataFrame({'foo': ['A', 'B', 'A', 'B'],
... 'bar': [1, 2, 3, 4]})
>>> parse_shorthand('name') == {'field': 'name'}
True
>> parse_shorthand('name:Q') == {'field': 'name', 'type': 'quantitative'}
True
>>> parse_shorthand('average(col)') == {'aggregate': 'average', 'field': 'col'}
True
>>> parse_shorthand('foo:O') == {'field': 'foo', 'type': 'ordinal'}
True
>>> parse_shorthand('min(foo):Q') == {'aggregate': 'min', 'field': 'foo', 'type': 'quantitative'}
True
>>> parse_shorthand('month(col)') == {'field': 'col', 'timeUnit': 'month', 'type': 'temporal'}
True
>>> parse_shorthand('year(col):O') == {'field': 'col', 'timeUnit': 'year', 'type': 'ordinal'}
True
>>> parse_shorthand('foo', data) == {'field': 'foo', 'type': 'nominal'}
True
>>> parse_shorthand('bar', data) == {'field': 'bar', 'type': 'quantitative'}
True
>>> parse_shorthand('bar:O', data) == {'field': 'bar', 'type': 'ordinal'}
True
>>> parse_shorthand('sum(bar)', data) == {'aggregate': 'sum', 'field': 'bar', 'type': 'quantitative'}
True
>>> parse_shorthand('count()', data) == {'aggregate': 'count', 'type': 'quantitative'}
True
"""
if not shorthand:
return {}
valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)
units = dict(field='(?P<field>.*)',
type='(?P<type>{})'.format('|'.join(valid_typecodes)),
agg_count='(?P<aggregate>count)',
op_count='(?P<op>count)',
aggregate='(?P<aggregate>{})'.format('|'.join(AGGREGATES)),
window_op='(?P<op>{})'.format('|'.join(AGGREGATES + WINDOW_AGGREGATES)),
timeUnit='(?P<timeUnit>{})'.format('|'.join(TIMEUNITS)))
patterns = []
if parse_aggregates:
patterns.extend([r'{agg_count}\(\)'])
patterns.extend([r'{aggregate}\({field}\)'])
if parse_window_ops:
patterns.extend([r'{op_count}\(\)'])
patterns.extend([r'{window_op}\({field}\)'])
if parse_timeunits:
patterns.extend([r'{timeUnit}\({field}\)'])
patterns.extend([r'{field}'])
if parse_types:
patterns = list(itertools.chain(*((p + ':{type}', p) for p in patterns)))
regexps = (re.compile(r'\A' + p.format(**units) + r'\Z', re.DOTALL)
for p in patterns)
# find matches depending on valid fields passed
if isinstance(shorthand, dict):
attrs = shorthand
else:
attrs = next(exp.match(shorthand).groupdict() for exp in regexps
if exp.match(shorthand))
# Handle short form of the type expression
if 'type' in attrs:
attrs['type'] = INV_TYPECODE_MAP.get(attrs['type'], attrs['type'])
# counts are quantitative by default
if attrs == {'aggregate': 'count'}:
attrs['type'] = 'quantitative'
# times are temporal by default
if 'timeUnit' in attrs and 'type' not in attrs:
attrs['type'] = 'temporal'
# if data is specified and type is not, infer type from data
if isinstance(data, pd.DataFrame) and 'type' not in attrs:
if 'field' in attrs and attrs['field'] in data.columns:
attrs['type'] = infer_vegalite_type(data[attrs['field']])
return attrs
def use_signature(Obj):
"""Apply call signature and documentation of Obj to the decorated method"""
def decorate(f):
# call-signature of f is exposed via __wrapped__.
# we want it to mimic Obj.__init__
f.__wrapped__ = Obj.__init__
f._uses_signature = Obj
# Supplement the docstring of f with information from Obj
if Obj.__doc__:
doclines = Obj.__doc__.splitlines()
if f.__doc__:
doc = f.__doc__ + '\n'.join(doclines[1:])
else:
doc = '\n'.join(doclines)
try:
f.__doc__ = doc
except AttributeError:
# __doc__ is not modifiable for classes in Python < 3.3
pass
return f
return decorate
def update_subtraits(obj, attrs, **kwargs):
"""Recursively update sub-traits without overwriting other traits"""
# TODO: infer keywords from args
if not kwargs:
return obj
# obj can be a SchemaBase object or a dict
if obj is Undefined:
obj = dct = {}
elif isinstance(obj, SchemaBase):
dct = obj._kwds
else:
dct = obj
if isinstance(attrs, six.string_types):
attrs = (attrs,)
if len(attrs) == 0:
dct.update(kwargs)
else:
attr = attrs[0]
trait = dct.get(attr, Undefined)
if trait is Undefined:
trait = dct[attr] = {}
dct[attr] = update_subtraits(trait, attrs[1:], **kwargs)
return obj
def update_nested(original, update, copy=False):
"""Update nested dictionaries
Parameters
----------
original : dict
the original (nested) dictionary, which will be updated in-place
update : dict
the nested dictionary of updates
copy : bool, default False
if True, then copy the original dictionary rather than modifying it
Returns
-------
original : dict
a reference to the (modified) original dict
Examples
--------
>>> original = {'x': {'b': 2, 'c': 4}}
>>> update = {'x': {'b': 5, 'd': 6}, 'y': 40}
>>> update_nested(original, update) # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
>>> original # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
"""
if copy:
original = deepcopy(original)
for key, val in update.items():
if isinstance(val, collections.Mapping):
orig_val = original.get(key, {})
if isinstance(orig_val, collections.Mapping):
original[key] = update_nested(orig_val, val)
else:
original[key] = val
else:
original[key] = val
return original
def display_traceback(in_ipython=True):
exc_info = sys.exc_info()
if in_ipython:
from IPython.core.getipython import get_ipython
ip = get_ipython()
else:
ip = None
if ip is not None:
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
def infer_encoding_types(args, kwargs, channels):
"""Infer typed keyword arguments for args and kwargs
Parameters
----------
args : tuple
List of function args
kwargs : dict
Dict of function kwargs
channels : module
The module containing all altair encoding channel classes.
Returns
-------
kwargs : dict
All args and kwargs in a single dict, with keys and types
based on the channels mapping.
"""
# Construct a dictionary of channel type to encoding name
# TODO: cache this somehow?
channel_objs = (getattr(channels, name) for name in dir(channels))
channel_objs = (c for c in channel_objs
if isinstance(c, type) and issubclass(c, SchemaBase))
channel_to_name = {c: c._encoding_name for c in channel_objs}
name_to_channel = {}
for chan, name in channel_to_name.items():
chans = name_to_channel.setdefault(name, {})
key = 'value' if chan.__name__.endswith('Value') else 'field'
chans[key] = chan
# First use the mapping to convert args to kwargs based on their types.
for arg in args:
if isinstance(arg, (list, tuple)) and len(arg) > 0:
type_ = type(arg[0])
else:
type_ = type(arg)
encoding = channel_to_name.get(type_, None)
if encoding is None:
raise NotImplementedError("positional of type {}"
"".format(type_))
if encoding in kwargs:
raise ValueError("encoding {} specified twice.".format(encoding))
kwargs[encoding] = arg
def _wrap_in_channel_class(obj, encoding):
try:
condition = obj['condition']
except (KeyError, TypeError):
pass
else:
if condition is not Undefined:
obj = obj.copy()
obj['condition'] = _wrap_in_channel_class(condition, encoding)
if isinstance(obj, SchemaBase):
return obj
if isinstance(obj, six.string_types):
obj = {'shorthand': obj}
if isinstance(obj, (list, tuple)):
return [_wrap_in_channel_class(subobj, encoding) for subobj in obj]
if encoding not in name_to_channel:
warnings.warn("Unrecognized encoding channel '{}'".format(encoding))
return obj
classes = name_to_channel[encoding]
cls = classes['value'] if 'value' in obj else classes['field']
try:
# Don't force validation here; some objects won't be valid until
# they're created in the context of a chart.
return cls.from_dict(obj, validate=False)
except jsonschema.ValidationError:
# our attempts at finding the correct class have failed
return obj
return {encoding: _wrap_in_channel_class(obj, encoding)
for encoding, obj in kwargs.items()}
|
py | b418189f50d439a5ac69cff7ba46e06e8ba98271 | # coding: utf8
import clinica.engine as ce
class DwiPreprocessingUsingT1Cli(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipeline."""
self._name = 'dwi-preprocessing-using-t1'
def define_description(self):
"""Define a description of this pipeline."""
self._description = ('Preprocessing of raw DWI datasets using a T1w image:\n'
'http://clinica.run/doc/Pipelines/DWI_Preprocessing/')
def define_options(self):
"""Define the sub-command arguments."""
from clinica.engine.cmdparser import PIPELINE_CATEGORIES
# Clinica compulsory arguments (e.g. BIDS, CAPS, group_id)
clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY'])
clinica_comp.add_argument("bids_directory",
help='Path to the BIDS directory.')
clinica_comp.add_argument("caps_directory",
help='Path to the CAPS directory.')
# Optional arguments (e.g. FWHM)
optional = self._args.add_argument_group(PIPELINE_CATEGORIES['OPTIONAL'])
optional.add_argument("--low_bval",
metavar='N', type=int, default=5,
help='Define the b0 volumes as all volume bval <= low_bval '
'(default: --low_bval %(default)s).')
# Clinica standard arguments (e.g. --n_procs)
self.add_clinica_standard_arguments()
def run_command(self, args):
"""Run the pipeline with defined args."""
from networkx import Graph
from .dwi_preprocessing_using_t1_pipeline import DwiPreprocessingUsingT1
from clinica.utils.ux import print_end_pipeline, print_crash_files_and_exit
parameters = {
'low_bval': args.low_bval
}
pipeline = DwiPreprocessingUsingT1(
bids_directory=self.absolute_path(args.bids_directory),
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv),
base_dir=self.absolute_path(args.working_directory),
parameters=parameters,
name=self.name
)
if args.n_procs:
exec_pipeline = pipeline.run(plugin='MultiProc',
plugin_args={'n_procs': args.n_procs})
else:
exec_pipeline = pipeline.run()
if isinstance(exec_pipeline, Graph):
print_end_pipeline(self.name, pipeline.base_dir, pipeline.base_dir_was_specified)
else:
print_crash_files_and_exit(args.logname, pipeline.base_dir)
|
py | b418198abf29dcab19eb320926a676e3b54acdf6 | # -*- coding: utf-8 -*
import numpy as np
import sys,os
import cv2
import argparse
import caffe
import time
import dlib
# class rect_t:
# def __init__(self, left, top, right, bottom):
# self.left = left
# self.top = top
# self.right = right
# self.bottom = bottom
def postprocess(img, out):
h = img.shape[0]
w = img.shape[1]
box = out['detection_out'][0,0,:,3:7] * np.array([w, h, w, h])
cls = out['detection_out'][0,0,:,1]
conf = out['detection_out'][0,0,:,2]
return (box.astype(np.int32), conf, cls)
# def detect(img_dir, imgfile):
#
# full_path = os.path.join(img_dir, imgfile + ".jpg")
# frame = cv2.imread(full_path)
# transformed_image = transformer.preprocess('data', frame)
#
# # transformed_image = frame.astype(np.float32) - [127.5, 127.5, 127.5]
# # transformed_image = transformed_image / 128.0
# # transformed_image = np.transpose(transformed_image, (2,0,1))
# # transformed_image = transformed_image[np.newaxis, :,:,:]
#
# # print img
# # print(transformed_image)
# # print(net.blobs['data'].data.shape)
# # print(transformed_image.shape)
#
#
# # net.blobs['data'].reshape(*(transformed_image.shape))
# net.blobs['data'].data[...] = transformed_image
#
# # print(net.blobs['data'].data.shape)
# # exit()
#
#
# time_start=time.time()
# out = net.forward()
# time_end=time.time()
# print (time_end-time_start)
# #print(out['detection_out'])
# box, conf, cls = postprocess(frame, out)
#
# count = 0
# _str = ""
# str_name = imgfile + "\n"
# str_box = ""
#
# _str += str_name
# for i in range(len(box)):
# p1 = (box[i][0], box[i][1])
# p2 = (box[i][2], box[i][3])
# if conf[i] >= 0.9 :
# cv2.rectangle(frame, p1, p2, (0,255,0))
# p3 = (max(p1[0], 15), max(p1[1], 15))
# title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
# cv2.putText(frame, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
#
# str_box += str(box[i][0]) + " " \
# + str(box[i][1]) + " " \
# + str(box[i][2] - box[i][0]) + " " \
# + str(box[i][3] - box[i][1]) + " " \
# + str(conf[i]) + "\n"
# count += 1
# _str += str(count) + "\n"
# _str += str_box
# print(_str)
# return _str, frame
def detect(img, net):
transformed_image = img.astype(np.float32) - [104, 117, 123]
transformed_image = np.transpose(transformed_image, (2,0,1))
transformed_image = transformed_image[np.newaxis, :,:,:]
# print img
# print(transformed_image)
# print(net.blobs['data'].data.shape)
# print(transformed_image.shape)
net.blobs['data'].reshape(*(transformed_image.shape))
net.blobs['data'].data[...] = transformed_image
# print(net.blobs['data'].data.shape)
# exit()
time_start=time.time()
out = net.forward()
time_end=time.time()
print (time_end-time_start)
#print(out['detection_out'])
box, conf, cls = postprocess(img, out)
rects = []
for i in range(len(box)):
if conf[i] >= 0.9 :
rect = dlib.rectangle(box[i][0], box[i][1], box[i][2], box[i][3])
rects.append(rect)
return rects
def face_detector_init(proto, model):
caffe.set_mode_cpu()
net = caffe.Net(proto, model, caffe.TEST)
return net
# if __name__ == '__main__':
# args = parse_arguments(sys.argv[1:])
#
# image_dir = args.image_dir
# file_list = args.file_list
# file_result = args.file_result
# proto = args.prototxt
# model = args.caffemodel
#
# if not os.path.exists(image_dir):
# print("image_dir: {} does not exist".format(image_dir))
# exit()
# if not os.path.exists(file_list):
# print("file_list: {} does not exist".format(file_list))
# exit()
# if not os.path.exists(proto):
# print("prototxt: {} does not exist".format(proto))
# exit()
# if not os.path.exists(model):
# print("caffemodel: {} does not exist".format(model))
# exit()
#
# output_dir = os.path.basename(image_dir) + "square_real_out_035_topk200_cuda"
# if not os.path.exists(output_dir):
# os.mkdir(output_dir)
#
#
# caffe.set_mode_gpu()
# net = caffe.Net(proto, model,caffe.TEST)
#
# CLASSES = ('background','face')
#
# transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# transformer.set_transpose('data', (2, 0, 1))
# transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
#
# imgs_path_fd = open(file_list, "r")
# imgs_path = imgs_path_fd.readlines()
# imgs_path_fd.close()
#
# print(imgs_path)
#
# str_ret =""
# for img_path in imgs_path:
# _str, frame = detect(image_dir, img_path.strip("\n"))
# str_ret += _str
# cv2.imwrite(os.path.join(output_dir, img_path.replace("/","_").strip("\n") + ".jpg"), frame)
#
# d_ret_fd = open(file_result, "w")
# d_ret_fd.writelines(str_ret)
# d_ret_fd.close()
|
Subsets and Splits